##// END OF EJS Templates
repository: introduce register_changeset callback...
Joerg Sonnenberger -
r47083:0903d6b9 default
parent child Browse files
Show More
@@ -1,1703 +1,1706 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import open
21 from .pycompat import open
22
22
23 from . import (
23 from . import (
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 requirements,
29 requirements,
30 scmutil,
30 scmutil,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import repository
34 from .interfaces import repository
35
35
36 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
36 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
37 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
37 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
38 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
38 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
39
39
40 LFS_REQUIREMENT = b'lfs'
40 LFS_REQUIREMENT = b'lfs'
41
41
42 readexactly = util.readexactly
42 readexactly = util.readexactly
43
43
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(b">l", d)[0]
48 l = struct.unpack(b">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_(b"invalid chunk length %d") % l)
51 raise error.Abort(_(b"invalid chunk length %d") % l)
52 return b""
52 return b""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55
55
56 def chunkheader(length):
56 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
57 """return a changegroup chunk header (string)"""
58 return struct.pack(b">l", length + 4)
58 return struct.pack(b">l", length + 4)
59
59
60
60
61 def closechunk():
61 def closechunk():
62 """return a changegroup chunk header (string) for a zero-length chunk"""
62 """return a changegroup chunk header (string) for a zero-length chunk"""
63 return struct.pack(b">l", 0)
63 return struct.pack(b">l", 0)
64
64
65
65
66 def _fileheader(path):
66 def _fileheader(path):
67 """Obtain a changegroup chunk header for a named path."""
67 """Obtain a changegroup chunk header for a named path."""
68 return chunkheader(len(path)) + path
68 return chunkheader(len(path)) + path
69
69
70
70
71 def writechunks(ui, chunks, filename, vfs=None):
71 def writechunks(ui, chunks, filename, vfs=None):
72 """Write chunks to a file and return its filename.
72 """Write chunks to a file and return its filename.
73
73
74 The stream is assumed to be a bundle file.
74 The stream is assumed to be a bundle file.
75 Existing files will not be overwritten.
75 Existing files will not be overwritten.
76 If no filename is specified, a temporary file is created.
76 If no filename is specified, a temporary file is created.
77 """
77 """
78 fh = None
78 fh = None
79 cleanup = None
79 cleanup = None
80 try:
80 try:
81 if filename:
81 if filename:
82 if vfs:
82 if vfs:
83 fh = vfs.open(filename, b"wb")
83 fh = vfs.open(filename, b"wb")
84 else:
84 else:
85 # Increase default buffer size because default is usually
85 # Increase default buffer size because default is usually
86 # small (4k is common on Linux).
86 # small (4k is common on Linux).
87 fh = open(filename, b"wb", 131072)
87 fh = open(filename, b"wb", 131072)
88 else:
88 else:
89 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
89 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
90 fh = os.fdopen(fd, "wb")
90 fh = os.fdopen(fd, "wb")
91 cleanup = filename
91 cleanup = filename
92 for c in chunks:
92 for c in chunks:
93 fh.write(c)
93 fh.write(c)
94 cleanup = None
94 cleanup = None
95 return filename
95 return filename
96 finally:
96 finally:
97 if fh is not None:
97 if fh is not None:
98 fh.close()
98 fh.close()
99 if cleanup is not None:
99 if cleanup is not None:
100 if filename and vfs:
100 if filename and vfs:
101 vfs.unlink(cleanup)
101 vfs.unlink(cleanup)
102 else:
102 else:
103 os.unlink(cleanup)
103 os.unlink(cleanup)
104
104
105
105
106 class cg1unpacker(object):
106 class cg1unpacker(object):
107 """Unpacker for cg1 changegroup streams.
107 """Unpacker for cg1 changegroup streams.
108
108
109 A changegroup unpacker handles the framing of the revision data in
109 A changegroup unpacker handles the framing of the revision data in
110 the wire format. Most consumers will want to use the apply()
110 the wire format. Most consumers will want to use the apply()
111 method to add the changes from the changegroup to a repository.
111 method to add the changes from the changegroup to a repository.
112
112
113 If you're forwarding a changegroup unmodified to another consumer,
113 If you're forwarding a changegroup unmodified to another consumer,
114 use getchunks(), which returns an iterator of changegroup
114 use getchunks(), which returns an iterator of changegroup
115 chunks. This is mostly useful for cases where you need to know the
115 chunks. This is mostly useful for cases where you need to know the
116 data stream has ended by observing the end of the changegroup.
116 data stream has ended by observing the end of the changegroup.
117
117
118 deltachunk() is useful only if you're applying delta data. Most
118 deltachunk() is useful only if you're applying delta data. Most
119 consumers should prefer apply() instead.
119 consumers should prefer apply() instead.
120
120
121 A few other public methods exist. Those are used only for
121 A few other public methods exist. Those are used only for
122 bundlerepo and some debug commands - their use is discouraged.
122 bundlerepo and some debug commands - their use is discouraged.
123 """
123 """
124
124
125 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
125 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
126 deltaheadersize = deltaheader.size
126 deltaheadersize = deltaheader.size
127 version = b'01'
127 version = b'01'
128 _grouplistcount = 1 # One list of files after the manifests
128 _grouplistcount = 1 # One list of files after the manifests
129
129
130 def __init__(self, fh, alg, extras=None):
130 def __init__(self, fh, alg, extras=None):
131 if alg is None:
131 if alg is None:
132 alg = b'UN'
132 alg = b'UN'
133 if alg not in util.compengines.supportedbundletypes:
133 if alg not in util.compengines.supportedbundletypes:
134 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
134 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
135 if alg == b'BZ':
135 if alg == b'BZ':
136 alg = b'_truncatedBZ'
136 alg = b'_truncatedBZ'
137
137
138 compengine = util.compengines.forbundletype(alg)
138 compengine = util.compengines.forbundletype(alg)
139 self._stream = compengine.decompressorreader(fh)
139 self._stream = compengine.decompressorreader(fh)
140 self._type = alg
140 self._type = alg
141 self.extras = extras or {}
141 self.extras = extras or {}
142 self.callback = None
142 self.callback = None
143
143
144 # These methods (compressed, read, seek, tell) all appear to only
144 # These methods (compressed, read, seek, tell) all appear to only
145 # be used by bundlerepo, but it's a little hard to tell.
145 # be used by bundlerepo, but it's a little hard to tell.
146 def compressed(self):
146 def compressed(self):
147 return self._type is not None and self._type != b'UN'
147 return self._type is not None and self._type != b'UN'
148
148
149 def read(self, l):
149 def read(self, l):
150 return self._stream.read(l)
150 return self._stream.read(l)
151
151
152 def seek(self, pos):
152 def seek(self, pos):
153 return self._stream.seek(pos)
153 return self._stream.seek(pos)
154
154
155 def tell(self):
155 def tell(self):
156 return self._stream.tell()
156 return self._stream.tell()
157
157
158 def close(self):
158 def close(self):
159 return self._stream.close()
159 return self._stream.close()
160
160
161 def _chunklength(self):
161 def _chunklength(self):
162 d = readexactly(self._stream, 4)
162 d = readexactly(self._stream, 4)
163 l = struct.unpack(b">l", d)[0]
163 l = struct.unpack(b">l", d)[0]
164 if l <= 4:
164 if l <= 4:
165 if l:
165 if l:
166 raise error.Abort(_(b"invalid chunk length %d") % l)
166 raise error.Abort(_(b"invalid chunk length %d") % l)
167 return 0
167 return 0
168 if self.callback:
168 if self.callback:
169 self.callback()
169 self.callback()
170 return l - 4
170 return l - 4
171
171
172 def changelogheader(self):
172 def changelogheader(self):
173 """v10 does not have a changelog header chunk"""
173 """v10 does not have a changelog header chunk"""
174 return {}
174 return {}
175
175
176 def manifestheader(self):
176 def manifestheader(self):
177 """v10 does not have a manifest header chunk"""
177 """v10 does not have a manifest header chunk"""
178 return {}
178 return {}
179
179
180 def filelogheader(self):
180 def filelogheader(self):
181 """return the header of the filelogs chunk, v10 only has the filename"""
181 """return the header of the filelogs chunk, v10 only has the filename"""
182 l = self._chunklength()
182 l = self._chunklength()
183 if not l:
183 if not l:
184 return {}
184 return {}
185 fname = readexactly(self._stream, l)
185 fname = readexactly(self._stream, l)
186 return {b'filename': fname}
186 return {b'filename': fname}
187
187
188 def _deltaheader(self, headertuple, prevnode):
188 def _deltaheader(self, headertuple, prevnode):
189 node, p1, p2, cs = headertuple
189 node, p1, p2, cs = headertuple
190 if prevnode is None:
190 if prevnode is None:
191 deltabase = p1
191 deltabase = p1
192 else:
192 else:
193 deltabase = prevnode
193 deltabase = prevnode
194 flags = 0
194 flags = 0
195 return node, p1, p2, deltabase, cs, flags
195 return node, p1, p2, deltabase, cs, flags
196
196
197 def deltachunk(self, prevnode):
197 def deltachunk(self, prevnode):
198 l = self._chunklength()
198 l = self._chunklength()
199 if not l:
199 if not l:
200 return {}
200 return {}
201 headerdata = readexactly(self._stream, self.deltaheadersize)
201 headerdata = readexactly(self._stream, self.deltaheadersize)
202 header = self.deltaheader.unpack(headerdata)
202 header = self.deltaheader.unpack(headerdata)
203 delta = readexactly(self._stream, l - self.deltaheadersize)
203 delta = readexactly(self._stream, l - self.deltaheadersize)
204 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
204 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
205 return (node, p1, p2, cs, deltabase, delta, flags)
205 return (node, p1, p2, cs, deltabase, delta, flags)
206
206
207 def getchunks(self):
207 def getchunks(self):
208 """returns all the chunks contains in the bundle
208 """returns all the chunks contains in the bundle
209
209
210 Used when you need to forward the binary stream to a file or another
210 Used when you need to forward the binary stream to a file or another
211 network API. To do so, it parse the changegroup data, otherwise it will
211 network API. To do so, it parse the changegroup data, otherwise it will
212 block in case of sshrepo because it don't know the end of the stream.
212 block in case of sshrepo because it don't know the end of the stream.
213 """
213 """
214 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
214 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
215 # and a list of filelogs. For changegroup 3, we expect 4 parts:
215 # and a list of filelogs. For changegroup 3, we expect 4 parts:
216 # changelog, manifestlog, a list of tree manifestlogs, and a list of
216 # changelog, manifestlog, a list of tree manifestlogs, and a list of
217 # filelogs.
217 # filelogs.
218 #
218 #
219 # Changelog and manifestlog parts are terminated with empty chunks. The
219 # Changelog and manifestlog parts are terminated with empty chunks. The
220 # tree and file parts are a list of entry sections. Each entry section
220 # tree and file parts are a list of entry sections. Each entry section
221 # is a series of chunks terminating in an empty chunk. The list of these
221 # is a series of chunks terminating in an empty chunk. The list of these
222 # entry sections is terminated in yet another empty chunk, so we know
222 # entry sections is terminated in yet another empty chunk, so we know
223 # we've reached the end of the tree/file list when we reach an empty
223 # we've reached the end of the tree/file list when we reach an empty
224 # chunk that was proceeded by no non-empty chunks.
224 # chunk that was proceeded by no non-empty chunks.
225
225
226 parts = 0
226 parts = 0
227 while parts < 2 + self._grouplistcount:
227 while parts < 2 + self._grouplistcount:
228 noentries = True
228 noentries = True
229 while True:
229 while True:
230 chunk = getchunk(self)
230 chunk = getchunk(self)
231 if not chunk:
231 if not chunk:
232 # The first two empty chunks represent the end of the
232 # The first two empty chunks represent the end of the
233 # changelog and the manifestlog portions. The remaining
233 # changelog and the manifestlog portions. The remaining
234 # empty chunks represent either A) the end of individual
234 # empty chunks represent either A) the end of individual
235 # tree or file entries in the file list, or B) the end of
235 # tree or file entries in the file list, or B) the end of
236 # the entire list. It's the end of the entire list if there
236 # the entire list. It's the end of the entire list if there
237 # were no entries (i.e. noentries is True).
237 # were no entries (i.e. noentries is True).
238 if parts < 2:
238 if parts < 2:
239 parts += 1
239 parts += 1
240 elif noentries:
240 elif noentries:
241 parts += 1
241 parts += 1
242 break
242 break
243 noentries = False
243 noentries = False
244 yield chunkheader(len(chunk))
244 yield chunkheader(len(chunk))
245 pos = 0
245 pos = 0
246 while pos < len(chunk):
246 while pos < len(chunk):
247 next = pos + 2 ** 20
247 next = pos + 2 ** 20
248 yield chunk[pos:next]
248 yield chunk[pos:next]
249 pos = next
249 pos = next
250 yield closechunk()
250 yield closechunk()
251
251
252 def _unpackmanifests(self, repo, revmap, trp, prog):
252 def _unpackmanifests(self, repo, revmap, trp, prog):
253 self.callback = prog.increment
253 self.callback = prog.increment
254 # no need to check for empty manifest group here:
254 # no need to check for empty manifest group here:
255 # if the result of the merge of 1 and 2 is the same in 3 and 4,
255 # if the result of the merge of 1 and 2 is the same in 3 and 4,
256 # no new manifest will be created and the manifest group will
256 # no new manifest will be created and the manifest group will
257 # be empty during the pull
257 # be empty during the pull
258 self.manifestheader()
258 self.manifestheader()
259 deltas = self.deltaiter()
259 deltas = self.deltaiter()
260 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
260 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
261 prog.complete()
261 prog.complete()
262 self.callback = None
262 self.callback = None
263
263
264 def apply(
264 def apply(
265 self,
265 self,
266 repo,
266 repo,
267 tr,
267 tr,
268 srctype,
268 srctype,
269 url,
269 url,
270 targetphase=phases.draft,
270 targetphase=phases.draft,
271 expectedtotal=None,
271 expectedtotal=None,
272 ):
272 ):
273 """Add the changegroup returned by source.read() to this repo.
273 """Add the changegroup returned by source.read() to this repo.
274 srctype is a string like 'push', 'pull', or 'unbundle'. url is
274 srctype is a string like 'push', 'pull', or 'unbundle'. url is
275 the URL of the repo where this changegroup is coming from.
275 the URL of the repo where this changegroup is coming from.
276
276
277 Return an integer summarizing the change to this repo:
277 Return an integer summarizing the change to this repo:
278 - nothing changed or no source: 0
278 - nothing changed or no source: 0
279 - more heads than before: 1+added heads (2..n)
279 - more heads than before: 1+added heads (2..n)
280 - fewer heads than before: -1-removed heads (-2..-n)
280 - fewer heads than before: -1-removed heads (-2..-n)
281 - number of heads stays the same: 1
281 - number of heads stays the same: 1
282 """
282 """
283 repo = repo.unfiltered()
283 repo = repo.unfiltered()
284
284
285 def csmap(x):
285 def csmap(x):
286 repo.ui.debug(b"add changeset %s\n" % short(x))
286 repo.ui.debug(b"add changeset %s\n" % short(x))
287 return len(cl)
287 return len(cl)
288
288
289 def revmap(x):
289 def revmap(x):
290 return cl.rev(x)
290 return cl.rev(x)
291
291
292 try:
292 try:
293 # The transaction may already carry source information. In this
293 # The transaction may already carry source information. In this
294 # case we use the top level data. We overwrite the argument
294 # case we use the top level data. We overwrite the argument
295 # because we need to use the top level value (if they exist)
295 # because we need to use the top level value (if they exist)
296 # in this function.
296 # in this function.
297 srctype = tr.hookargs.setdefault(b'source', srctype)
297 srctype = tr.hookargs.setdefault(b'source', srctype)
298 tr.hookargs.setdefault(b'url', url)
298 tr.hookargs.setdefault(b'url', url)
299 repo.hook(
299 repo.hook(
300 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
300 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
301 )
301 )
302
302
303 # write changelog data to temp files so concurrent readers
303 # write changelog data to temp files so concurrent readers
304 # will not see an inconsistent view
304 # will not see an inconsistent view
305 cl = repo.changelog
305 cl = repo.changelog
306 cl.delayupdate(tr)
306 cl.delayupdate(tr)
307 oldheads = set(cl.heads())
307 oldheads = set(cl.heads())
308
308
309 trp = weakref.proxy(tr)
309 trp = weakref.proxy(tr)
310 # pull off the changeset group
310 # pull off the changeset group
311 repo.ui.status(_(b"adding changesets\n"))
311 repo.ui.status(_(b"adding changesets\n"))
312 clstart = len(cl)
312 clstart = len(cl)
313 progress = repo.ui.makeprogress(
313 progress = repo.ui.makeprogress(
314 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
314 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
315 )
315 )
316 self.callback = progress.increment
316 self.callback = progress.increment
317
317
318 efilesset = set()
318 efilesset = set()
319 cgnodes = []
319 cgnodes = []
320
320
321 def ondupchangelog(cl, node):
321 def ondupchangelog(cl, node):
322 if cl.rev(node) < clstart:
322 if cl.rev(node) < clstart:
323 cgnodes.append(node)
323 cgnodes.append(node)
324
324
325 def onchangelog(cl, node):
325 def onchangelog(cl, node):
326 efilesset.update(cl.readfiles(node))
326 rev = cl.rev(node)
327 ctx = cl.changelogrevision(rev)
328 efilesset.update(ctx.files)
329 repo.register_changeset(rev, ctx)
327
330
328 self.changelogheader()
331 self.changelogheader()
329 deltas = self.deltaiter()
332 deltas = self.deltaiter()
330 if not cl.addgroup(
333 if not cl.addgroup(
331 deltas,
334 deltas,
332 csmap,
335 csmap,
333 trp,
336 trp,
334 addrevisioncb=onchangelog,
337 addrevisioncb=onchangelog,
335 duplicaterevisioncb=ondupchangelog,
338 duplicaterevisioncb=ondupchangelog,
336 ):
339 ):
337 repo.ui.develwarn(
340 repo.ui.develwarn(
338 b'applied empty changelog from changegroup',
341 b'applied empty changelog from changegroup',
339 config=b'warn-empty-changegroup',
342 config=b'warn-empty-changegroup',
340 )
343 )
341 efiles = len(efilesset)
344 efiles = len(efilesset)
342 clend = len(cl)
345 clend = len(cl)
343 changesets = clend - clstart
346 changesets = clend - clstart
344 progress.complete()
347 progress.complete()
345 del deltas
348 del deltas
346 # TODO Python 2.7 removal
349 # TODO Python 2.7 removal
347 # del efilesset
350 # del efilesset
348 efilesset = None
351 efilesset = None
349 self.callback = None
352 self.callback = None
350
353
351 # pull off the manifest group
354 # pull off the manifest group
352 repo.ui.status(_(b"adding manifests\n"))
355 repo.ui.status(_(b"adding manifests\n"))
353 # We know that we'll never have more manifests than we had
356 # We know that we'll never have more manifests than we had
354 # changesets.
357 # changesets.
355 progress = repo.ui.makeprogress(
358 progress = repo.ui.makeprogress(
356 _(b'manifests'), unit=_(b'chunks'), total=changesets
359 _(b'manifests'), unit=_(b'chunks'), total=changesets
357 )
360 )
358 self._unpackmanifests(repo, revmap, trp, progress)
361 self._unpackmanifests(repo, revmap, trp, progress)
359
362
360 needfiles = {}
363 needfiles = {}
361 if repo.ui.configbool(b'server', b'validate'):
364 if repo.ui.configbool(b'server', b'validate'):
362 cl = repo.changelog
365 cl = repo.changelog
363 ml = repo.manifestlog
366 ml = repo.manifestlog
364 # validate incoming csets have their manifests
367 # validate incoming csets have their manifests
365 for cset in pycompat.xrange(clstart, clend):
368 for cset in pycompat.xrange(clstart, clend):
366 mfnode = cl.changelogrevision(cset).manifest
369 mfnode = cl.changelogrevision(cset).manifest
367 mfest = ml[mfnode].readdelta()
370 mfest = ml[mfnode].readdelta()
368 # store file nodes we must see
371 # store file nodes we must see
369 for f, n in pycompat.iteritems(mfest):
372 for f, n in pycompat.iteritems(mfest):
370 needfiles.setdefault(f, set()).add(n)
373 needfiles.setdefault(f, set()).add(n)
371
374
372 # process the files
375 # process the files
373 repo.ui.status(_(b"adding file changes\n"))
376 repo.ui.status(_(b"adding file changes\n"))
374 newrevs, newfiles = _addchangegroupfiles(
377 newrevs, newfiles = _addchangegroupfiles(
375 repo, self, revmap, trp, efiles, needfiles
378 repo, self, revmap, trp, efiles, needfiles
376 )
379 )
377
380
378 # making sure the value exists
381 # making sure the value exists
379 tr.changes.setdefault(b'changegroup-count-changesets', 0)
382 tr.changes.setdefault(b'changegroup-count-changesets', 0)
380 tr.changes.setdefault(b'changegroup-count-revisions', 0)
383 tr.changes.setdefault(b'changegroup-count-revisions', 0)
381 tr.changes.setdefault(b'changegroup-count-files', 0)
384 tr.changes.setdefault(b'changegroup-count-files', 0)
382 tr.changes.setdefault(b'changegroup-count-heads', 0)
385 tr.changes.setdefault(b'changegroup-count-heads', 0)
383
386
384 # some code use bundle operation for internal purpose. They usually
387 # some code use bundle operation for internal purpose. They usually
385 # set `ui.quiet` to do this outside of user sight. Size the report
388 # set `ui.quiet` to do this outside of user sight. Size the report
386 # of such operation now happens at the end of the transaction, that
389 # of such operation now happens at the end of the transaction, that
387 # ui.quiet has not direct effect on the output.
390 # ui.quiet has not direct effect on the output.
388 #
391 #
389 # To preserve this intend use an inelegant hack, we fail to report
392 # To preserve this intend use an inelegant hack, we fail to report
390 # the change if `quiet` is set. We should probably move to
393 # the change if `quiet` is set. We should probably move to
391 # something better, but this is a good first step to allow the "end
394 # something better, but this is a good first step to allow the "end
392 # of transaction report" to pass tests.
395 # of transaction report" to pass tests.
393 if not repo.ui.quiet:
396 if not repo.ui.quiet:
394 tr.changes[b'changegroup-count-changesets'] += changesets
397 tr.changes[b'changegroup-count-changesets'] += changesets
395 tr.changes[b'changegroup-count-revisions'] += newrevs
398 tr.changes[b'changegroup-count-revisions'] += newrevs
396 tr.changes[b'changegroup-count-files'] += newfiles
399 tr.changes[b'changegroup-count-files'] += newfiles
397
400
398 deltaheads = 0
401 deltaheads = 0
399 if oldheads:
402 if oldheads:
400 heads = cl.heads()
403 heads = cl.heads()
401 deltaheads += len(heads) - len(oldheads)
404 deltaheads += len(heads) - len(oldheads)
402 for h in heads:
405 for h in heads:
403 if h not in oldheads and repo[h].closesbranch():
406 if h not in oldheads and repo[h].closesbranch():
404 deltaheads -= 1
407 deltaheads -= 1
405
408
406 # see previous comment about checking ui.quiet
409 # see previous comment about checking ui.quiet
407 if not repo.ui.quiet:
410 if not repo.ui.quiet:
408 tr.changes[b'changegroup-count-heads'] += deltaheads
411 tr.changes[b'changegroup-count-heads'] += deltaheads
409 repo.invalidatevolatilesets()
412 repo.invalidatevolatilesets()
410
413
411 if changesets > 0:
414 if changesets > 0:
412 if b'node' not in tr.hookargs:
415 if b'node' not in tr.hookargs:
413 tr.hookargs[b'node'] = hex(cl.node(clstart))
416 tr.hookargs[b'node'] = hex(cl.node(clstart))
414 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
417 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
415 hookargs = dict(tr.hookargs)
418 hookargs = dict(tr.hookargs)
416 else:
419 else:
417 hookargs = dict(tr.hookargs)
420 hookargs = dict(tr.hookargs)
418 hookargs[b'node'] = hex(cl.node(clstart))
421 hookargs[b'node'] = hex(cl.node(clstart))
419 hookargs[b'node_last'] = hex(cl.node(clend - 1))
422 hookargs[b'node_last'] = hex(cl.node(clend - 1))
420 repo.hook(
423 repo.hook(
421 b'pretxnchangegroup',
424 b'pretxnchangegroup',
422 throw=True,
425 throw=True,
423 **pycompat.strkwargs(hookargs)
426 **pycompat.strkwargs(hookargs)
424 )
427 )
425
428
426 added = pycompat.xrange(clstart, clend)
429 added = pycompat.xrange(clstart, clend)
427 phaseall = None
430 phaseall = None
428 if srctype in (b'push', b'serve'):
431 if srctype in (b'push', b'serve'):
429 # Old servers can not push the boundary themselves.
432 # Old servers can not push the boundary themselves.
430 # New servers won't push the boundary if changeset already
433 # New servers won't push the boundary if changeset already
431 # exists locally as secret
434 # exists locally as secret
432 #
435 #
433 # We should not use added here but the list of all change in
436 # We should not use added here but the list of all change in
434 # the bundle
437 # the bundle
435 if repo.publishing():
438 if repo.publishing():
436 targetphase = phaseall = phases.public
439 targetphase = phaseall = phases.public
437 else:
440 else:
438 # closer target phase computation
441 # closer target phase computation
439
442
440 # Those changesets have been pushed from the
443 # Those changesets have been pushed from the
441 # outside, their phases are going to be pushed
444 # outside, their phases are going to be pushed
442 # alongside. Therefor `targetphase` is
445 # alongside. Therefor `targetphase` is
443 # ignored.
446 # ignored.
444 targetphase = phaseall = phases.draft
447 targetphase = phaseall = phases.draft
445 if added:
448 if added:
446 phases.registernew(repo, tr, targetphase, added)
449 phases.registernew(repo, tr, targetphase, added)
447 if phaseall is not None:
450 if phaseall is not None:
448 phases.advanceboundary(repo, tr, phaseall, cgnodes, revs=added)
451 phases.advanceboundary(repo, tr, phaseall, cgnodes, revs=added)
449 cgnodes = []
452 cgnodes = []
450
453
451 if changesets > 0:
454 if changesets > 0:
452
455
453 def runhooks(unused_success):
456 def runhooks(unused_success):
454 # These hooks run when the lock releases, not when the
457 # These hooks run when the lock releases, not when the
455 # transaction closes. So it's possible for the changelog
458 # transaction closes. So it's possible for the changelog
456 # to have changed since we last saw it.
459 # to have changed since we last saw it.
457 if clstart >= len(repo):
460 if clstart >= len(repo):
458 return
461 return
459
462
460 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
463 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
461
464
462 for rev in added:
465 for rev in added:
463 args = hookargs.copy()
466 args = hookargs.copy()
464 args[b'node'] = hex(cl.node(rev))
467 args[b'node'] = hex(cl.node(rev))
465 del args[b'node_last']
468 del args[b'node_last']
466 repo.hook(b"incoming", **pycompat.strkwargs(args))
469 repo.hook(b"incoming", **pycompat.strkwargs(args))
467
470
468 newheads = [h for h in repo.heads() if h not in oldheads]
471 newheads = [h for h in repo.heads() if h not in oldheads]
469 repo.ui.log(
472 repo.ui.log(
470 b"incoming",
473 b"incoming",
471 b"%d incoming changes - new heads: %s\n",
474 b"%d incoming changes - new heads: %s\n",
472 len(added),
475 len(added),
473 b', '.join([hex(c[:6]) for c in newheads]),
476 b', '.join([hex(c[:6]) for c in newheads]),
474 )
477 )
475
478
476 tr.addpostclose(
479 tr.addpostclose(
477 b'changegroup-runhooks-%020i' % clstart,
480 b'changegroup-runhooks-%020i' % clstart,
478 lambda tr: repo._afterlock(runhooks),
481 lambda tr: repo._afterlock(runhooks),
479 )
482 )
480 finally:
483 finally:
481 repo.ui.flush()
484 repo.ui.flush()
482 # never return 0 here:
485 # never return 0 here:
483 if deltaheads < 0:
486 if deltaheads < 0:
484 ret = deltaheads - 1
487 ret = deltaheads - 1
485 else:
488 else:
486 ret = deltaheads + 1
489 ret = deltaheads + 1
487 return ret
490 return ret
488
491
489 def deltaiter(self):
492 def deltaiter(self):
490 """
493 """
491 returns an iterator of the deltas in this changegroup
494 returns an iterator of the deltas in this changegroup
492
495
493 Useful for passing to the underlying storage system to be stored.
496 Useful for passing to the underlying storage system to be stored.
494 """
497 """
495 chain = None
498 chain = None
496 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
499 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
497 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
500 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
498 yield chunkdata
501 yield chunkdata
499 chain = chunkdata[0]
502 chain = chunkdata[0]
500
503
501
504
502 class cg2unpacker(cg1unpacker):
505 class cg2unpacker(cg1unpacker):
503 """Unpacker for cg2 streams.
506 """Unpacker for cg2 streams.
504
507
505 cg2 streams add support for generaldelta, so the delta header
508 cg2 streams add support for generaldelta, so the delta header
506 format is slightly different. All other features about the data
509 format is slightly different. All other features about the data
507 remain the same.
510 remain the same.
508 """
511 """
509
512
510 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
513 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
511 deltaheadersize = deltaheader.size
514 deltaheadersize = deltaheader.size
512 version = b'02'
515 version = b'02'
513
516
514 def _deltaheader(self, headertuple, prevnode):
517 def _deltaheader(self, headertuple, prevnode):
515 node, p1, p2, deltabase, cs = headertuple
518 node, p1, p2, deltabase, cs = headertuple
516 flags = 0
519 flags = 0
517 return node, p1, p2, deltabase, cs, flags
520 return node, p1, p2, deltabase, cs, flags
518
521
519
522
520 class cg3unpacker(cg2unpacker):
523 class cg3unpacker(cg2unpacker):
521 """Unpacker for cg3 streams.
524 """Unpacker for cg3 streams.
522
525
523 cg3 streams add support for exchanging treemanifests and revlog
526 cg3 streams add support for exchanging treemanifests and revlog
524 flags. It adds the revlog flags to the delta header and an empty chunk
527 flags. It adds the revlog flags to the delta header and an empty chunk
525 separating manifests and files.
528 separating manifests and files.
526 """
529 """
527
530
528 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
531 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
529 deltaheadersize = deltaheader.size
532 deltaheadersize = deltaheader.size
530 version = b'03'
533 version = b'03'
531 _grouplistcount = 2 # One list of manifests and one list of files
534 _grouplistcount = 2 # One list of manifests and one list of files
532
535
533 def _deltaheader(self, headertuple, prevnode):
536 def _deltaheader(self, headertuple, prevnode):
534 node, p1, p2, deltabase, cs, flags = headertuple
537 node, p1, p2, deltabase, cs, flags = headertuple
535 return node, p1, p2, deltabase, cs, flags
538 return node, p1, p2, deltabase, cs, flags
536
539
537 def _unpackmanifests(self, repo, revmap, trp, prog):
540 def _unpackmanifests(self, repo, revmap, trp, prog):
538 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
541 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
539 for chunkdata in iter(self.filelogheader, {}):
542 for chunkdata in iter(self.filelogheader, {}):
540 # If we get here, there are directory manifests in the changegroup
543 # If we get here, there are directory manifests in the changegroup
541 d = chunkdata[b"filename"]
544 d = chunkdata[b"filename"]
542 repo.ui.debug(b"adding %s revisions\n" % d)
545 repo.ui.debug(b"adding %s revisions\n" % d)
543 deltas = self.deltaiter()
546 deltas = self.deltaiter()
544 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
547 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
545 raise error.Abort(_(b"received dir revlog group is empty"))
548 raise error.Abort(_(b"received dir revlog group is empty"))
546
549
547
550
548 class headerlessfixup(object):
551 class headerlessfixup(object):
549 def __init__(self, fh, h):
552 def __init__(self, fh, h):
550 self._h = h
553 self._h = h
551 self._fh = fh
554 self._fh = fh
552
555
553 def read(self, n):
556 def read(self, n):
554 if self._h:
557 if self._h:
555 d, self._h = self._h[:n], self._h[n:]
558 d, self._h = self._h[:n], self._h[n:]
556 if len(d) < n:
559 if len(d) < n:
557 d += readexactly(self._fh, n - len(d))
560 d += readexactly(self._fh, n - len(d))
558 return d
561 return d
559 return readexactly(self._fh, n)
562 return readexactly(self._fh, n)
560
563
561
564
562 def _revisiondeltatochunks(delta, headerfn):
565 def _revisiondeltatochunks(delta, headerfn):
563 """Serialize a revisiondelta to changegroup chunks."""
566 """Serialize a revisiondelta to changegroup chunks."""
564
567
565 # The captured revision delta may be encoded as a delta against
568 # The captured revision delta may be encoded as a delta against
566 # a base revision or as a full revision. The changegroup format
569 # a base revision or as a full revision. The changegroup format
567 # requires that everything on the wire be deltas. So for full
570 # requires that everything on the wire be deltas. So for full
568 # revisions, we need to invent a header that says to rewrite
571 # revisions, we need to invent a header that says to rewrite
569 # data.
572 # data.
570
573
571 if delta.delta is not None:
574 if delta.delta is not None:
572 prefix, data = b'', delta.delta
575 prefix, data = b'', delta.delta
573 elif delta.basenode == nullid:
576 elif delta.basenode == nullid:
574 data = delta.revision
577 data = delta.revision
575 prefix = mdiff.trivialdiffheader(len(data))
578 prefix = mdiff.trivialdiffheader(len(data))
576 else:
579 else:
577 data = delta.revision
580 data = delta.revision
578 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
581 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
579
582
580 meta = headerfn(delta)
583 meta = headerfn(delta)
581
584
582 yield chunkheader(len(meta) + len(prefix) + len(data))
585 yield chunkheader(len(meta) + len(prefix) + len(data))
583 yield meta
586 yield meta
584 if prefix:
587 if prefix:
585 yield prefix
588 yield prefix
586 yield data
589 yield data
587
590
588
591
589 def _sortnodesellipsis(store, nodes, cl, lookup):
592 def _sortnodesellipsis(store, nodes, cl, lookup):
590 """Sort nodes for changegroup generation."""
593 """Sort nodes for changegroup generation."""
591 # Ellipses serving mode.
594 # Ellipses serving mode.
592 #
595 #
593 # In a perfect world, we'd generate better ellipsis-ified graphs
596 # In a perfect world, we'd generate better ellipsis-ified graphs
594 # for non-changelog revlogs. In practice, we haven't started doing
597 # for non-changelog revlogs. In practice, we haven't started doing
595 # that yet, so the resulting DAGs for the manifestlog and filelogs
598 # that yet, so the resulting DAGs for the manifestlog and filelogs
596 # are actually full of bogus parentage on all the ellipsis
599 # are actually full of bogus parentage on all the ellipsis
597 # nodes. This has the side effect that, while the contents are
600 # nodes. This has the side effect that, while the contents are
598 # correct, the individual DAGs might be completely out of whack in
601 # correct, the individual DAGs might be completely out of whack in
599 # a case like 882681bc3166 and its ancestors (back about 10
602 # a case like 882681bc3166 and its ancestors (back about 10
600 # revisions or so) in the main hg repo.
603 # revisions or so) in the main hg repo.
601 #
604 #
602 # The one invariant we *know* holds is that the new (potentially
605 # The one invariant we *know* holds is that the new (potentially
603 # bogus) DAG shape will be valid if we order the nodes in the
606 # bogus) DAG shape will be valid if we order the nodes in the
604 # order that they're introduced in dramatis personae by the
607 # order that they're introduced in dramatis personae by the
605 # changelog, so what we do is we sort the non-changelog histories
608 # changelog, so what we do is we sort the non-changelog histories
606 # by the order in which they are used by the changelog.
609 # by the order in which they are used by the changelog.
607 key = lambda n: cl.rev(lookup(n))
610 key = lambda n: cl.rev(lookup(n))
608 return sorted(nodes, key=key)
611 return sorted(nodes, key=key)
609
612
610
613
611 def _resolvenarrowrevisioninfo(
614 def _resolvenarrowrevisioninfo(
612 cl,
615 cl,
613 store,
616 store,
614 ischangelog,
617 ischangelog,
615 rev,
618 rev,
616 linkrev,
619 linkrev,
617 linknode,
620 linknode,
618 clrevtolocalrev,
621 clrevtolocalrev,
619 fullclnodes,
622 fullclnodes,
620 precomputedellipsis,
623 precomputedellipsis,
621 ):
624 ):
622 linkparents = precomputedellipsis[linkrev]
625 linkparents = precomputedellipsis[linkrev]
623
626
624 def local(clrev):
627 def local(clrev):
625 """Turn a changelog revnum into a local revnum.
628 """Turn a changelog revnum into a local revnum.
626
629
627 The ellipsis dag is stored as revnums on the changelog,
630 The ellipsis dag is stored as revnums on the changelog,
628 but when we're producing ellipsis entries for
631 but when we're producing ellipsis entries for
629 non-changelog revlogs, we need to turn those numbers into
632 non-changelog revlogs, we need to turn those numbers into
630 something local. This does that for us, and during the
633 something local. This does that for us, and during the
631 changelog sending phase will also expand the stored
634 changelog sending phase will also expand the stored
632 mappings as needed.
635 mappings as needed.
633 """
636 """
634 if clrev == nullrev:
637 if clrev == nullrev:
635 return nullrev
638 return nullrev
636
639
637 if ischangelog:
640 if ischangelog:
638 return clrev
641 return clrev
639
642
640 # Walk the ellipsis-ized changelog breadth-first looking for a
643 # Walk the ellipsis-ized changelog breadth-first looking for a
641 # change that has been linked from the current revlog.
644 # change that has been linked from the current revlog.
642 #
645 #
643 # For a flat manifest revlog only a single step should be necessary
646 # For a flat manifest revlog only a single step should be necessary
644 # as all relevant changelog entries are relevant to the flat
647 # as all relevant changelog entries are relevant to the flat
645 # manifest.
648 # manifest.
646 #
649 #
647 # For a filelog or tree manifest dirlog however not every changelog
650 # For a filelog or tree manifest dirlog however not every changelog
648 # entry will have been relevant, so we need to skip some changelog
651 # entry will have been relevant, so we need to skip some changelog
649 # nodes even after ellipsis-izing.
652 # nodes even after ellipsis-izing.
650 walk = [clrev]
653 walk = [clrev]
651 while walk:
654 while walk:
652 p = walk[0]
655 p = walk[0]
653 walk = walk[1:]
656 walk = walk[1:]
654 if p in clrevtolocalrev:
657 if p in clrevtolocalrev:
655 return clrevtolocalrev[p]
658 return clrevtolocalrev[p]
656 elif p in fullclnodes:
659 elif p in fullclnodes:
657 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
660 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
658 elif p in precomputedellipsis:
661 elif p in precomputedellipsis:
659 walk.extend(
662 walk.extend(
660 [pp for pp in precomputedellipsis[p] if pp != nullrev]
663 [pp for pp in precomputedellipsis[p] if pp != nullrev]
661 )
664 )
662 else:
665 else:
663 # In this case, we've got an ellipsis with parents
666 # In this case, we've got an ellipsis with parents
664 # outside the current bundle (likely an
667 # outside the current bundle (likely an
665 # incremental pull). We "know" that we can use the
668 # incremental pull). We "know" that we can use the
666 # value of this same revlog at whatever revision
669 # value of this same revlog at whatever revision
667 # is pointed to by linknode. "Know" is in scare
670 # is pointed to by linknode. "Know" is in scare
668 # quotes because I haven't done enough examination
671 # quotes because I haven't done enough examination
669 # of edge cases to convince myself this is really
672 # of edge cases to convince myself this is really
670 # a fact - it works for all the (admittedly
673 # a fact - it works for all the (admittedly
671 # thorough) cases in our testsuite, but I would be
674 # thorough) cases in our testsuite, but I would be
672 # somewhat unsurprised to find a case in the wild
675 # somewhat unsurprised to find a case in the wild
673 # where this breaks down a bit. That said, I don't
676 # where this breaks down a bit. That said, I don't
674 # know if it would hurt anything.
677 # know if it would hurt anything.
675 for i in pycompat.xrange(rev, 0, -1):
678 for i in pycompat.xrange(rev, 0, -1):
676 if store.linkrev(i) == clrev:
679 if store.linkrev(i) == clrev:
677 return i
680 return i
678 # We failed to resolve a parent for this node, so
681 # We failed to resolve a parent for this node, so
679 # we crash the changegroup construction.
682 # we crash the changegroup construction.
680 raise error.Abort(
683 raise error.Abort(
681 b'unable to resolve parent while packing %r %r'
684 b'unable to resolve parent while packing %r %r'
682 b' for changeset %r' % (store.indexfile, rev, clrev)
685 b' for changeset %r' % (store.indexfile, rev, clrev)
683 )
686 )
684
687
685 return nullrev
688 return nullrev
686
689
687 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
690 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
688 p1, p2 = nullrev, nullrev
691 p1, p2 = nullrev, nullrev
689 elif len(linkparents) == 1:
692 elif len(linkparents) == 1:
690 (p1,) = sorted(local(p) for p in linkparents)
693 (p1,) = sorted(local(p) for p in linkparents)
691 p2 = nullrev
694 p2 = nullrev
692 else:
695 else:
693 p1, p2 = sorted(local(p) for p in linkparents)
696 p1, p2 = sorted(local(p) for p in linkparents)
694
697
695 p1node, p2node = store.node(p1), store.node(p2)
698 p1node, p2node = store.node(p1), store.node(p2)
696
699
697 return p1node, p2node, linknode
700 return p1node, p2node, linknode
698
701
699
702
700 def deltagroup(
703 def deltagroup(
701 repo,
704 repo,
702 store,
705 store,
703 nodes,
706 nodes,
704 ischangelog,
707 ischangelog,
705 lookup,
708 lookup,
706 forcedeltaparentprev,
709 forcedeltaparentprev,
707 topic=None,
710 topic=None,
708 ellipses=False,
711 ellipses=False,
709 clrevtolocalrev=None,
712 clrevtolocalrev=None,
710 fullclnodes=None,
713 fullclnodes=None,
711 precomputedellipsis=None,
714 precomputedellipsis=None,
712 ):
715 ):
713 """Calculate deltas for a set of revisions.
716 """Calculate deltas for a set of revisions.
714
717
715 Is a generator of ``revisiondelta`` instances.
718 Is a generator of ``revisiondelta`` instances.
716
719
717 If topic is not None, progress detail will be generated using this
720 If topic is not None, progress detail will be generated using this
718 topic name (e.g. changesets, manifests, etc).
721 topic name (e.g. changesets, manifests, etc).
719 """
722 """
720 if not nodes:
723 if not nodes:
721 return
724 return
722
725
723 cl = repo.changelog
726 cl = repo.changelog
724
727
725 if ischangelog:
728 if ischangelog:
726 # `hg log` shows changesets in storage order. To preserve order
729 # `hg log` shows changesets in storage order. To preserve order
727 # across clones, send out changesets in storage order.
730 # across clones, send out changesets in storage order.
728 nodesorder = b'storage'
731 nodesorder = b'storage'
729 elif ellipses:
732 elif ellipses:
730 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
733 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
731 nodesorder = b'nodes'
734 nodesorder = b'nodes'
732 else:
735 else:
733 nodesorder = None
736 nodesorder = None
734
737
735 # Perform ellipses filtering and revision massaging. We do this before
738 # Perform ellipses filtering and revision massaging. We do this before
736 # emitrevisions() because a) filtering out revisions creates less work
739 # emitrevisions() because a) filtering out revisions creates less work
737 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
740 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
738 # assumptions about delta choices and we would possibly send a delta
741 # assumptions about delta choices and we would possibly send a delta
739 # referencing a missing base revision.
742 # referencing a missing base revision.
740 #
743 #
741 # Also, calling lookup() has side-effects with regards to populating
744 # Also, calling lookup() has side-effects with regards to populating
742 # data structures. If we don't call lookup() for each node or if we call
745 # data structures. If we don't call lookup() for each node or if we call
743 # lookup() after the first pass through each node, things can break -
746 # lookup() after the first pass through each node, things can break -
744 # possibly intermittently depending on the python hash seed! For that
747 # possibly intermittently depending on the python hash seed! For that
745 # reason, we store a mapping of all linknodes during the initial node
748 # reason, we store a mapping of all linknodes during the initial node
746 # pass rather than use lookup() on the output side.
749 # pass rather than use lookup() on the output side.
747 if ellipses:
750 if ellipses:
748 filtered = []
751 filtered = []
749 adjustedparents = {}
752 adjustedparents = {}
750 linknodes = {}
753 linknodes = {}
751
754
752 for node in nodes:
755 for node in nodes:
753 rev = store.rev(node)
756 rev = store.rev(node)
754 linknode = lookup(node)
757 linknode = lookup(node)
755 linkrev = cl.rev(linknode)
758 linkrev = cl.rev(linknode)
756 clrevtolocalrev[linkrev] = rev
759 clrevtolocalrev[linkrev] = rev
757
760
758 # If linknode is in fullclnodes, it means the corresponding
761 # If linknode is in fullclnodes, it means the corresponding
759 # changeset was a full changeset and is being sent unaltered.
762 # changeset was a full changeset and is being sent unaltered.
760 if linknode in fullclnodes:
763 if linknode in fullclnodes:
761 linknodes[node] = linknode
764 linknodes[node] = linknode
762
765
763 # If the corresponding changeset wasn't in the set computed
766 # If the corresponding changeset wasn't in the set computed
764 # as relevant to us, it should be dropped outright.
767 # as relevant to us, it should be dropped outright.
765 elif linkrev not in precomputedellipsis:
768 elif linkrev not in precomputedellipsis:
766 continue
769 continue
767
770
768 else:
771 else:
769 # We could probably do this later and avoid the dict
772 # We could probably do this later and avoid the dict
770 # holding state. But it likely doesn't matter.
773 # holding state. But it likely doesn't matter.
771 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
774 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
772 cl,
775 cl,
773 store,
776 store,
774 ischangelog,
777 ischangelog,
775 rev,
778 rev,
776 linkrev,
779 linkrev,
777 linknode,
780 linknode,
778 clrevtolocalrev,
781 clrevtolocalrev,
779 fullclnodes,
782 fullclnodes,
780 precomputedellipsis,
783 precomputedellipsis,
781 )
784 )
782
785
783 adjustedparents[node] = (p1node, p2node)
786 adjustedparents[node] = (p1node, p2node)
784 linknodes[node] = linknode
787 linknodes[node] = linknode
785
788
786 filtered.append(node)
789 filtered.append(node)
787
790
788 nodes = filtered
791 nodes = filtered
789
792
790 # We expect the first pass to be fast, so we only engage the progress
793 # We expect the first pass to be fast, so we only engage the progress
791 # meter for constructing the revision deltas.
794 # meter for constructing the revision deltas.
792 progress = None
795 progress = None
793 if topic is not None:
796 if topic is not None:
794 progress = repo.ui.makeprogress(
797 progress = repo.ui.makeprogress(
795 topic, unit=_(b'chunks'), total=len(nodes)
798 topic, unit=_(b'chunks'), total=len(nodes)
796 )
799 )
797
800
798 configtarget = repo.ui.config(b'devel', b'bundle.delta')
801 configtarget = repo.ui.config(b'devel', b'bundle.delta')
799 if configtarget not in (b'', b'p1', b'full'):
802 if configtarget not in (b'', b'p1', b'full'):
800 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
803 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
801 repo.ui.warn(msg % configtarget)
804 repo.ui.warn(msg % configtarget)
802
805
803 deltamode = repository.CG_DELTAMODE_STD
806 deltamode = repository.CG_DELTAMODE_STD
804 if forcedeltaparentprev:
807 if forcedeltaparentprev:
805 deltamode = repository.CG_DELTAMODE_PREV
808 deltamode = repository.CG_DELTAMODE_PREV
806 elif configtarget == b'p1':
809 elif configtarget == b'p1':
807 deltamode = repository.CG_DELTAMODE_P1
810 deltamode = repository.CG_DELTAMODE_P1
808 elif configtarget == b'full':
811 elif configtarget == b'full':
809 deltamode = repository.CG_DELTAMODE_FULL
812 deltamode = repository.CG_DELTAMODE_FULL
810
813
811 revisions = store.emitrevisions(
814 revisions = store.emitrevisions(
812 nodes,
815 nodes,
813 nodesorder=nodesorder,
816 nodesorder=nodesorder,
814 revisiondata=True,
817 revisiondata=True,
815 assumehaveparentrevisions=not ellipses,
818 assumehaveparentrevisions=not ellipses,
816 deltamode=deltamode,
819 deltamode=deltamode,
817 )
820 )
818
821
819 for i, revision in enumerate(revisions):
822 for i, revision in enumerate(revisions):
820 if progress:
823 if progress:
821 progress.update(i + 1)
824 progress.update(i + 1)
822
825
823 if ellipses:
826 if ellipses:
824 linknode = linknodes[revision.node]
827 linknode = linknodes[revision.node]
825
828
826 if revision.node in adjustedparents:
829 if revision.node in adjustedparents:
827 p1node, p2node = adjustedparents[revision.node]
830 p1node, p2node = adjustedparents[revision.node]
828 revision.p1node = p1node
831 revision.p1node = p1node
829 revision.p2node = p2node
832 revision.p2node = p2node
830 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
833 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
831
834
832 else:
835 else:
833 linknode = lookup(revision.node)
836 linknode = lookup(revision.node)
834
837
835 revision.linknode = linknode
838 revision.linknode = linknode
836 yield revision
839 yield revision
837
840
838 if progress:
841 if progress:
839 progress.complete()
842 progress.complete()
840
843
841
844
842 class cgpacker(object):
845 class cgpacker(object):
843 def __init__(
846 def __init__(
844 self,
847 self,
845 repo,
848 repo,
846 oldmatcher,
849 oldmatcher,
847 matcher,
850 matcher,
848 version,
851 version,
849 builddeltaheader,
852 builddeltaheader,
850 manifestsend,
853 manifestsend,
851 forcedeltaparentprev=False,
854 forcedeltaparentprev=False,
852 bundlecaps=None,
855 bundlecaps=None,
853 ellipses=False,
856 ellipses=False,
854 shallow=False,
857 shallow=False,
855 ellipsisroots=None,
858 ellipsisroots=None,
856 fullnodes=None,
859 fullnodes=None,
857 ):
860 ):
858 """Given a source repo, construct a bundler.
861 """Given a source repo, construct a bundler.
859
862
860 oldmatcher is a matcher that matches on files the client already has.
863 oldmatcher is a matcher that matches on files the client already has.
861 These will not be included in the changegroup.
864 These will not be included in the changegroup.
862
865
863 matcher is a matcher that matches on files to include in the
866 matcher is a matcher that matches on files to include in the
864 changegroup. Used to facilitate sparse changegroups.
867 changegroup. Used to facilitate sparse changegroups.
865
868
866 forcedeltaparentprev indicates whether delta parents must be against
869 forcedeltaparentprev indicates whether delta parents must be against
867 the previous revision in a delta group. This should only be used for
870 the previous revision in a delta group. This should only be used for
868 compatibility with changegroup version 1.
871 compatibility with changegroup version 1.
869
872
870 builddeltaheader is a callable that constructs the header for a group
873 builddeltaheader is a callable that constructs the header for a group
871 delta.
874 delta.
872
875
873 manifestsend is a chunk to send after manifests have been fully emitted.
876 manifestsend is a chunk to send after manifests have been fully emitted.
874
877
875 ellipses indicates whether ellipsis serving mode is enabled.
878 ellipses indicates whether ellipsis serving mode is enabled.
876
879
877 bundlecaps is optional and can be used to specify the set of
880 bundlecaps is optional and can be used to specify the set of
878 capabilities which can be used to build the bundle. While bundlecaps is
881 capabilities which can be used to build the bundle. While bundlecaps is
879 unused in core Mercurial, extensions rely on this feature to communicate
882 unused in core Mercurial, extensions rely on this feature to communicate
880 capabilities to customize the changegroup packer.
883 capabilities to customize the changegroup packer.
881
884
882 shallow indicates whether shallow data might be sent. The packer may
885 shallow indicates whether shallow data might be sent. The packer may
883 need to pack file contents not introduced by the changes being packed.
886 need to pack file contents not introduced by the changes being packed.
884
887
885 fullnodes is the set of changelog nodes which should not be ellipsis
888 fullnodes is the set of changelog nodes which should not be ellipsis
886 nodes. We store this rather than the set of nodes that should be
889 nodes. We store this rather than the set of nodes that should be
887 ellipsis because for very large histories we expect this to be
890 ellipsis because for very large histories we expect this to be
888 significantly smaller.
891 significantly smaller.
889 """
892 """
890 assert oldmatcher
893 assert oldmatcher
891 assert matcher
894 assert matcher
892 self._oldmatcher = oldmatcher
895 self._oldmatcher = oldmatcher
893 self._matcher = matcher
896 self._matcher = matcher
894
897
895 self.version = version
898 self.version = version
896 self._forcedeltaparentprev = forcedeltaparentprev
899 self._forcedeltaparentprev = forcedeltaparentprev
897 self._builddeltaheader = builddeltaheader
900 self._builddeltaheader = builddeltaheader
898 self._manifestsend = manifestsend
901 self._manifestsend = manifestsend
899 self._ellipses = ellipses
902 self._ellipses = ellipses
900
903
901 # Set of capabilities we can use to build the bundle.
904 # Set of capabilities we can use to build the bundle.
902 if bundlecaps is None:
905 if bundlecaps is None:
903 bundlecaps = set()
906 bundlecaps = set()
904 self._bundlecaps = bundlecaps
907 self._bundlecaps = bundlecaps
905 self._isshallow = shallow
908 self._isshallow = shallow
906 self._fullclnodes = fullnodes
909 self._fullclnodes = fullnodes
907
910
908 # Maps ellipsis revs to their roots at the changelog level.
911 # Maps ellipsis revs to their roots at the changelog level.
909 self._precomputedellipsis = ellipsisroots
912 self._precomputedellipsis = ellipsisroots
910
913
911 self._repo = repo
914 self._repo = repo
912
915
913 if self._repo.ui.verbose and not self._repo.ui.debugflag:
916 if self._repo.ui.verbose and not self._repo.ui.debugflag:
914 self._verbosenote = self._repo.ui.note
917 self._verbosenote = self._repo.ui.note
915 else:
918 else:
916 self._verbosenote = lambda s: None
919 self._verbosenote = lambda s: None
917
920
918 def generate(
921 def generate(
919 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
922 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
920 ):
923 ):
921 """Yield a sequence of changegroup byte chunks.
924 """Yield a sequence of changegroup byte chunks.
922 If changelog is False, changelog data won't be added to changegroup
925 If changelog is False, changelog data won't be added to changegroup
923 """
926 """
924
927
925 repo = self._repo
928 repo = self._repo
926 cl = repo.changelog
929 cl = repo.changelog
927
930
928 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
931 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
929 size = 0
932 size = 0
930
933
931 clstate, deltas = self._generatechangelog(
934 clstate, deltas = self._generatechangelog(
932 cl, clnodes, generate=changelog
935 cl, clnodes, generate=changelog
933 )
936 )
934 for delta in deltas:
937 for delta in deltas:
935 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
938 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
936 size += len(chunk)
939 size += len(chunk)
937 yield chunk
940 yield chunk
938
941
939 close = closechunk()
942 close = closechunk()
940 size += len(close)
943 size += len(close)
941 yield closechunk()
944 yield closechunk()
942
945
943 self._verbosenote(_(b'%8.i (changelog)\n') % size)
946 self._verbosenote(_(b'%8.i (changelog)\n') % size)
944
947
945 clrevorder = clstate[b'clrevorder']
948 clrevorder = clstate[b'clrevorder']
946 manifests = clstate[b'manifests']
949 manifests = clstate[b'manifests']
947 changedfiles = clstate[b'changedfiles']
950 changedfiles = clstate[b'changedfiles']
948
951
949 # We need to make sure that the linkrev in the changegroup refers to
952 # We need to make sure that the linkrev in the changegroup refers to
950 # the first changeset that introduced the manifest or file revision.
953 # the first changeset that introduced the manifest or file revision.
951 # The fastpath is usually safer than the slowpath, because the filelogs
954 # The fastpath is usually safer than the slowpath, because the filelogs
952 # are walked in revlog order.
955 # are walked in revlog order.
953 #
956 #
954 # When taking the slowpath when the manifest revlog uses generaldelta,
957 # When taking the slowpath when the manifest revlog uses generaldelta,
955 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
958 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
956 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
959 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
957 #
960 #
958 # When taking the fastpath, we are only vulnerable to reordering
961 # When taking the fastpath, we are only vulnerable to reordering
959 # of the changelog itself. The changelog never uses generaldelta and is
962 # of the changelog itself. The changelog never uses generaldelta and is
960 # never reordered. To handle this case, we simply take the slowpath,
963 # never reordered. To handle this case, we simply take the slowpath,
961 # which already has the 'clrevorder' logic. This was also fixed in
964 # which already has the 'clrevorder' logic. This was also fixed in
962 # cc0ff93d0c0c.
965 # cc0ff93d0c0c.
963
966
964 # Treemanifests don't work correctly with fastpathlinkrev
967 # Treemanifests don't work correctly with fastpathlinkrev
965 # either, because we don't discover which directory nodes to
968 # either, because we don't discover which directory nodes to
966 # send along with files. This could probably be fixed.
969 # send along with files. This could probably be fixed.
967 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
970 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
968
971
969 fnodes = {} # needed file nodes
972 fnodes = {} # needed file nodes
970
973
971 size = 0
974 size = 0
972 it = self.generatemanifests(
975 it = self.generatemanifests(
973 commonrevs,
976 commonrevs,
974 clrevorder,
977 clrevorder,
975 fastpathlinkrev,
978 fastpathlinkrev,
976 manifests,
979 manifests,
977 fnodes,
980 fnodes,
978 source,
981 source,
979 clstate[b'clrevtomanifestrev'],
982 clstate[b'clrevtomanifestrev'],
980 )
983 )
981
984
982 for tree, deltas in it:
985 for tree, deltas in it:
983 if tree:
986 if tree:
984 assert self.version == b'03'
987 assert self.version == b'03'
985 chunk = _fileheader(tree)
988 chunk = _fileheader(tree)
986 size += len(chunk)
989 size += len(chunk)
987 yield chunk
990 yield chunk
988
991
989 for delta in deltas:
992 for delta in deltas:
990 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
993 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
991 for chunk in chunks:
994 for chunk in chunks:
992 size += len(chunk)
995 size += len(chunk)
993 yield chunk
996 yield chunk
994
997
995 close = closechunk()
998 close = closechunk()
996 size += len(close)
999 size += len(close)
997 yield close
1000 yield close
998
1001
999 self._verbosenote(_(b'%8.i (manifests)\n') % size)
1002 self._verbosenote(_(b'%8.i (manifests)\n') % size)
1000 yield self._manifestsend
1003 yield self._manifestsend
1001
1004
1002 mfdicts = None
1005 mfdicts = None
1003 if self._ellipses and self._isshallow:
1006 if self._ellipses and self._isshallow:
1004 mfdicts = [
1007 mfdicts = [
1005 (self._repo.manifestlog[n].read(), lr)
1008 (self._repo.manifestlog[n].read(), lr)
1006 for (n, lr) in pycompat.iteritems(manifests)
1009 for (n, lr) in pycompat.iteritems(manifests)
1007 ]
1010 ]
1008
1011
1009 manifests.clear()
1012 manifests.clear()
1010 clrevs = {cl.rev(x) for x in clnodes}
1013 clrevs = {cl.rev(x) for x in clnodes}
1011
1014
1012 it = self.generatefiles(
1015 it = self.generatefiles(
1013 changedfiles,
1016 changedfiles,
1014 commonrevs,
1017 commonrevs,
1015 source,
1018 source,
1016 mfdicts,
1019 mfdicts,
1017 fastpathlinkrev,
1020 fastpathlinkrev,
1018 fnodes,
1021 fnodes,
1019 clrevs,
1022 clrevs,
1020 )
1023 )
1021
1024
1022 for path, deltas in it:
1025 for path, deltas in it:
1023 h = _fileheader(path)
1026 h = _fileheader(path)
1024 size = len(h)
1027 size = len(h)
1025 yield h
1028 yield h
1026
1029
1027 for delta in deltas:
1030 for delta in deltas:
1028 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1031 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1029 for chunk in chunks:
1032 for chunk in chunks:
1030 size += len(chunk)
1033 size += len(chunk)
1031 yield chunk
1034 yield chunk
1032
1035
1033 close = closechunk()
1036 close = closechunk()
1034 size += len(close)
1037 size += len(close)
1035 yield close
1038 yield close
1036
1039
1037 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1040 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1038
1041
1039 yield closechunk()
1042 yield closechunk()
1040
1043
1041 if clnodes:
1044 if clnodes:
1042 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1045 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1043
1046
1044 def _generatechangelog(self, cl, nodes, generate=True):
1047 def _generatechangelog(self, cl, nodes, generate=True):
1045 """Generate data for changelog chunks.
1048 """Generate data for changelog chunks.
1046
1049
1047 Returns a 2-tuple of a dict containing state and an iterable of
1050 Returns a 2-tuple of a dict containing state and an iterable of
1048 byte chunks. The state will not be fully populated until the
1051 byte chunks. The state will not be fully populated until the
1049 chunk stream has been fully consumed.
1052 chunk stream has been fully consumed.
1050
1053
1051 if generate is False, the state will be fully populated and no chunk
1054 if generate is False, the state will be fully populated and no chunk
1052 stream will be yielded
1055 stream will be yielded
1053 """
1056 """
1054 clrevorder = {}
1057 clrevorder = {}
1055 manifests = {}
1058 manifests = {}
1056 mfl = self._repo.manifestlog
1059 mfl = self._repo.manifestlog
1057 changedfiles = set()
1060 changedfiles = set()
1058 clrevtomanifestrev = {}
1061 clrevtomanifestrev = {}
1059
1062
1060 state = {
1063 state = {
1061 b'clrevorder': clrevorder,
1064 b'clrevorder': clrevorder,
1062 b'manifests': manifests,
1065 b'manifests': manifests,
1063 b'changedfiles': changedfiles,
1066 b'changedfiles': changedfiles,
1064 b'clrevtomanifestrev': clrevtomanifestrev,
1067 b'clrevtomanifestrev': clrevtomanifestrev,
1065 }
1068 }
1066
1069
1067 if not (generate or self._ellipses):
1070 if not (generate or self._ellipses):
1068 # sort the nodes in storage order
1071 # sort the nodes in storage order
1069 nodes = sorted(nodes, key=cl.rev)
1072 nodes = sorted(nodes, key=cl.rev)
1070 for node in nodes:
1073 for node in nodes:
1071 c = cl.changelogrevision(node)
1074 c = cl.changelogrevision(node)
1072 clrevorder[node] = len(clrevorder)
1075 clrevorder[node] = len(clrevorder)
1073 # record the first changeset introducing this manifest version
1076 # record the first changeset introducing this manifest version
1074 manifests.setdefault(c.manifest, node)
1077 manifests.setdefault(c.manifest, node)
1075 # Record a complete list of potentially-changed files in
1078 # Record a complete list of potentially-changed files in
1076 # this manifest.
1079 # this manifest.
1077 changedfiles.update(c.files)
1080 changedfiles.update(c.files)
1078
1081
1079 return state, ()
1082 return state, ()
1080
1083
1081 # Callback for the changelog, used to collect changed files and
1084 # Callback for the changelog, used to collect changed files and
1082 # manifest nodes.
1085 # manifest nodes.
1083 # Returns the linkrev node (identity in the changelog case).
1086 # Returns the linkrev node (identity in the changelog case).
1084 def lookupcl(x):
1087 def lookupcl(x):
1085 c = cl.changelogrevision(x)
1088 c = cl.changelogrevision(x)
1086 clrevorder[x] = len(clrevorder)
1089 clrevorder[x] = len(clrevorder)
1087
1090
1088 if self._ellipses:
1091 if self._ellipses:
1089 # Only update manifests if x is going to be sent. Otherwise we
1092 # Only update manifests if x is going to be sent. Otherwise we
1090 # end up with bogus linkrevs specified for manifests and
1093 # end up with bogus linkrevs specified for manifests and
1091 # we skip some manifest nodes that we should otherwise
1094 # we skip some manifest nodes that we should otherwise
1092 # have sent.
1095 # have sent.
1093 if (
1096 if (
1094 x in self._fullclnodes
1097 x in self._fullclnodes
1095 or cl.rev(x) in self._precomputedellipsis
1098 or cl.rev(x) in self._precomputedellipsis
1096 ):
1099 ):
1097
1100
1098 manifestnode = c.manifest
1101 manifestnode = c.manifest
1099 # Record the first changeset introducing this manifest
1102 # Record the first changeset introducing this manifest
1100 # version.
1103 # version.
1101 manifests.setdefault(manifestnode, x)
1104 manifests.setdefault(manifestnode, x)
1102 # Set this narrow-specific dict so we have the lowest
1105 # Set this narrow-specific dict so we have the lowest
1103 # manifest revnum to look up for this cl revnum. (Part of
1106 # manifest revnum to look up for this cl revnum. (Part of
1104 # mapping changelog ellipsis parents to manifest ellipsis
1107 # mapping changelog ellipsis parents to manifest ellipsis
1105 # parents)
1108 # parents)
1106 clrevtomanifestrev.setdefault(
1109 clrevtomanifestrev.setdefault(
1107 cl.rev(x), mfl.rev(manifestnode)
1110 cl.rev(x), mfl.rev(manifestnode)
1108 )
1111 )
1109 # We can't trust the changed files list in the changeset if the
1112 # We can't trust the changed files list in the changeset if the
1110 # client requested a shallow clone.
1113 # client requested a shallow clone.
1111 if self._isshallow:
1114 if self._isshallow:
1112 changedfiles.update(mfl[c.manifest].read().keys())
1115 changedfiles.update(mfl[c.manifest].read().keys())
1113 else:
1116 else:
1114 changedfiles.update(c.files)
1117 changedfiles.update(c.files)
1115 else:
1118 else:
1116 # record the first changeset introducing this manifest version
1119 # record the first changeset introducing this manifest version
1117 manifests.setdefault(c.manifest, x)
1120 manifests.setdefault(c.manifest, x)
1118 # Record a complete list of potentially-changed files in
1121 # Record a complete list of potentially-changed files in
1119 # this manifest.
1122 # this manifest.
1120 changedfiles.update(c.files)
1123 changedfiles.update(c.files)
1121
1124
1122 return x
1125 return x
1123
1126
1124 gen = deltagroup(
1127 gen = deltagroup(
1125 self._repo,
1128 self._repo,
1126 cl,
1129 cl,
1127 nodes,
1130 nodes,
1128 True,
1131 True,
1129 lookupcl,
1132 lookupcl,
1130 self._forcedeltaparentprev,
1133 self._forcedeltaparentprev,
1131 ellipses=self._ellipses,
1134 ellipses=self._ellipses,
1132 topic=_(b'changesets'),
1135 topic=_(b'changesets'),
1133 clrevtolocalrev={},
1136 clrevtolocalrev={},
1134 fullclnodes=self._fullclnodes,
1137 fullclnodes=self._fullclnodes,
1135 precomputedellipsis=self._precomputedellipsis,
1138 precomputedellipsis=self._precomputedellipsis,
1136 )
1139 )
1137
1140
1138 return state, gen
1141 return state, gen
1139
1142
1140 def generatemanifests(
1143 def generatemanifests(
1141 self,
1144 self,
1142 commonrevs,
1145 commonrevs,
1143 clrevorder,
1146 clrevorder,
1144 fastpathlinkrev,
1147 fastpathlinkrev,
1145 manifests,
1148 manifests,
1146 fnodes,
1149 fnodes,
1147 source,
1150 source,
1148 clrevtolocalrev,
1151 clrevtolocalrev,
1149 ):
1152 ):
1150 """Returns an iterator of changegroup chunks containing manifests.
1153 """Returns an iterator of changegroup chunks containing manifests.
1151
1154
1152 `source` is unused here, but is used by extensions like remotefilelog to
1155 `source` is unused here, but is used by extensions like remotefilelog to
1153 change what is sent based in pulls vs pushes, etc.
1156 change what is sent based in pulls vs pushes, etc.
1154 """
1157 """
1155 repo = self._repo
1158 repo = self._repo
1156 mfl = repo.manifestlog
1159 mfl = repo.manifestlog
1157 tmfnodes = {b'': manifests}
1160 tmfnodes = {b'': manifests}
1158
1161
1159 # Callback for the manifest, used to collect linkrevs for filelog
1162 # Callback for the manifest, used to collect linkrevs for filelog
1160 # revisions.
1163 # revisions.
1161 # Returns the linkrev node (collected in lookupcl).
1164 # Returns the linkrev node (collected in lookupcl).
1162 def makelookupmflinknode(tree, nodes):
1165 def makelookupmflinknode(tree, nodes):
1163 if fastpathlinkrev:
1166 if fastpathlinkrev:
1164 assert not tree
1167 assert not tree
1165 return (
1168 return (
1166 manifests.__getitem__
1169 manifests.__getitem__
1167 ) # pytype: disable=unsupported-operands
1170 ) # pytype: disable=unsupported-operands
1168
1171
1169 def lookupmflinknode(x):
1172 def lookupmflinknode(x):
1170 """Callback for looking up the linknode for manifests.
1173 """Callback for looking up the linknode for manifests.
1171
1174
1172 Returns the linkrev node for the specified manifest.
1175 Returns the linkrev node for the specified manifest.
1173
1176
1174 SIDE EFFECT:
1177 SIDE EFFECT:
1175
1178
1176 1) fclnodes gets populated with the list of relevant
1179 1) fclnodes gets populated with the list of relevant
1177 file nodes if we're not using fastpathlinkrev
1180 file nodes if we're not using fastpathlinkrev
1178 2) When treemanifests are in use, collects treemanifest nodes
1181 2) When treemanifests are in use, collects treemanifest nodes
1179 to send
1182 to send
1180
1183
1181 Note that this means manifests must be completely sent to
1184 Note that this means manifests must be completely sent to
1182 the client before you can trust the list of files and
1185 the client before you can trust the list of files and
1183 treemanifests to send.
1186 treemanifests to send.
1184 """
1187 """
1185 clnode = nodes[x]
1188 clnode = nodes[x]
1186 mdata = mfl.get(tree, x).readfast(shallow=True)
1189 mdata = mfl.get(tree, x).readfast(shallow=True)
1187 for p, n, fl in mdata.iterentries():
1190 for p, n, fl in mdata.iterentries():
1188 if fl == b't': # subdirectory manifest
1191 if fl == b't': # subdirectory manifest
1189 subtree = tree + p + b'/'
1192 subtree = tree + p + b'/'
1190 tmfclnodes = tmfnodes.setdefault(subtree, {})
1193 tmfclnodes = tmfnodes.setdefault(subtree, {})
1191 tmfclnode = tmfclnodes.setdefault(n, clnode)
1194 tmfclnode = tmfclnodes.setdefault(n, clnode)
1192 if clrevorder[clnode] < clrevorder[tmfclnode]:
1195 if clrevorder[clnode] < clrevorder[tmfclnode]:
1193 tmfclnodes[n] = clnode
1196 tmfclnodes[n] = clnode
1194 else:
1197 else:
1195 f = tree + p
1198 f = tree + p
1196 fclnodes = fnodes.setdefault(f, {})
1199 fclnodes = fnodes.setdefault(f, {})
1197 fclnode = fclnodes.setdefault(n, clnode)
1200 fclnode = fclnodes.setdefault(n, clnode)
1198 if clrevorder[clnode] < clrevorder[fclnode]:
1201 if clrevorder[clnode] < clrevorder[fclnode]:
1199 fclnodes[n] = clnode
1202 fclnodes[n] = clnode
1200 return clnode
1203 return clnode
1201
1204
1202 return lookupmflinknode
1205 return lookupmflinknode
1203
1206
1204 while tmfnodes:
1207 while tmfnodes:
1205 tree, nodes = tmfnodes.popitem()
1208 tree, nodes = tmfnodes.popitem()
1206
1209
1207 should_visit = self._matcher.visitdir(tree[:-1])
1210 should_visit = self._matcher.visitdir(tree[:-1])
1208 if tree and not should_visit:
1211 if tree and not should_visit:
1209 continue
1212 continue
1210
1213
1211 store = mfl.getstorage(tree)
1214 store = mfl.getstorage(tree)
1212
1215
1213 if not should_visit:
1216 if not should_visit:
1214 # No nodes to send because this directory is out of
1217 # No nodes to send because this directory is out of
1215 # the client's view of the repository (probably
1218 # the client's view of the repository (probably
1216 # because of narrow clones). Do this even for the root
1219 # because of narrow clones). Do this even for the root
1217 # directory (tree=='')
1220 # directory (tree=='')
1218 prunednodes = []
1221 prunednodes = []
1219 else:
1222 else:
1220 # Avoid sending any manifest nodes we can prove the
1223 # Avoid sending any manifest nodes we can prove the
1221 # client already has by checking linkrevs. See the
1224 # client already has by checking linkrevs. See the
1222 # related comment in generatefiles().
1225 # related comment in generatefiles().
1223 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1226 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1224
1227
1225 if tree and not prunednodes:
1228 if tree and not prunednodes:
1226 continue
1229 continue
1227
1230
1228 lookupfn = makelookupmflinknode(tree, nodes)
1231 lookupfn = makelookupmflinknode(tree, nodes)
1229
1232
1230 deltas = deltagroup(
1233 deltas = deltagroup(
1231 self._repo,
1234 self._repo,
1232 store,
1235 store,
1233 prunednodes,
1236 prunednodes,
1234 False,
1237 False,
1235 lookupfn,
1238 lookupfn,
1236 self._forcedeltaparentprev,
1239 self._forcedeltaparentprev,
1237 ellipses=self._ellipses,
1240 ellipses=self._ellipses,
1238 topic=_(b'manifests'),
1241 topic=_(b'manifests'),
1239 clrevtolocalrev=clrevtolocalrev,
1242 clrevtolocalrev=clrevtolocalrev,
1240 fullclnodes=self._fullclnodes,
1243 fullclnodes=self._fullclnodes,
1241 precomputedellipsis=self._precomputedellipsis,
1244 precomputedellipsis=self._precomputedellipsis,
1242 )
1245 )
1243
1246
1244 if not self._oldmatcher.visitdir(store.tree[:-1]):
1247 if not self._oldmatcher.visitdir(store.tree[:-1]):
1245 yield tree, deltas
1248 yield tree, deltas
1246 else:
1249 else:
1247 # 'deltas' is a generator and we need to consume it even if
1250 # 'deltas' is a generator and we need to consume it even if
1248 # we are not going to send it because a side-effect is that
1251 # we are not going to send it because a side-effect is that
1249 # it updates tmdnodes (via lookupfn)
1252 # it updates tmdnodes (via lookupfn)
1250 for d in deltas:
1253 for d in deltas:
1251 pass
1254 pass
1252 if not tree:
1255 if not tree:
1253 yield tree, []
1256 yield tree, []
1254
1257
1255 def _prunemanifests(self, store, nodes, commonrevs):
1258 def _prunemanifests(self, store, nodes, commonrevs):
1256 if not self._ellipses:
1259 if not self._ellipses:
1257 # In non-ellipses case and large repositories, it is better to
1260 # In non-ellipses case and large repositories, it is better to
1258 # prevent calling of store.rev and store.linkrev on a lot of
1261 # prevent calling of store.rev and store.linkrev on a lot of
1259 # nodes as compared to sending some extra data
1262 # nodes as compared to sending some extra data
1260 return nodes.copy()
1263 return nodes.copy()
1261 # This is split out as a separate method to allow filtering
1264 # This is split out as a separate method to allow filtering
1262 # commonrevs in extension code.
1265 # commonrevs in extension code.
1263 #
1266 #
1264 # TODO(augie): this shouldn't be required, instead we should
1267 # TODO(augie): this shouldn't be required, instead we should
1265 # make filtering of revisions to send delegated to the store
1268 # make filtering of revisions to send delegated to the store
1266 # layer.
1269 # layer.
1267 frev, flr = store.rev, store.linkrev
1270 frev, flr = store.rev, store.linkrev
1268 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1271 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1269
1272
1270 # The 'source' parameter is useful for extensions
1273 # The 'source' parameter is useful for extensions
1271 def generatefiles(
1274 def generatefiles(
1272 self,
1275 self,
1273 changedfiles,
1276 changedfiles,
1274 commonrevs,
1277 commonrevs,
1275 source,
1278 source,
1276 mfdicts,
1279 mfdicts,
1277 fastpathlinkrev,
1280 fastpathlinkrev,
1278 fnodes,
1281 fnodes,
1279 clrevs,
1282 clrevs,
1280 ):
1283 ):
1281 changedfiles = [
1284 changedfiles = [
1282 f
1285 f
1283 for f in changedfiles
1286 for f in changedfiles
1284 if self._matcher(f) and not self._oldmatcher(f)
1287 if self._matcher(f) and not self._oldmatcher(f)
1285 ]
1288 ]
1286
1289
1287 if not fastpathlinkrev:
1290 if not fastpathlinkrev:
1288
1291
1289 def normallinknodes(unused, fname):
1292 def normallinknodes(unused, fname):
1290 return fnodes.get(fname, {})
1293 return fnodes.get(fname, {})
1291
1294
1292 else:
1295 else:
1293 cln = self._repo.changelog.node
1296 cln = self._repo.changelog.node
1294
1297
1295 def normallinknodes(store, fname):
1298 def normallinknodes(store, fname):
1296 flinkrev = store.linkrev
1299 flinkrev = store.linkrev
1297 fnode = store.node
1300 fnode = store.node
1298 revs = ((r, flinkrev(r)) for r in store)
1301 revs = ((r, flinkrev(r)) for r in store)
1299 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1302 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1300
1303
1301 clrevtolocalrev = {}
1304 clrevtolocalrev = {}
1302
1305
1303 if self._isshallow:
1306 if self._isshallow:
1304 # In a shallow clone, the linknodes callback needs to also include
1307 # In a shallow clone, the linknodes callback needs to also include
1305 # those file nodes that are in the manifests we sent but weren't
1308 # those file nodes that are in the manifests we sent but weren't
1306 # introduced by those manifests.
1309 # introduced by those manifests.
1307 commonctxs = [self._repo[c] for c in commonrevs]
1310 commonctxs = [self._repo[c] for c in commonrevs]
1308 clrev = self._repo.changelog.rev
1311 clrev = self._repo.changelog.rev
1309
1312
1310 def linknodes(flog, fname):
1313 def linknodes(flog, fname):
1311 for c in commonctxs:
1314 for c in commonctxs:
1312 try:
1315 try:
1313 fnode = c.filenode(fname)
1316 fnode = c.filenode(fname)
1314 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1317 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1315 except error.ManifestLookupError:
1318 except error.ManifestLookupError:
1316 pass
1319 pass
1317 links = normallinknodes(flog, fname)
1320 links = normallinknodes(flog, fname)
1318 if len(links) != len(mfdicts):
1321 if len(links) != len(mfdicts):
1319 for mf, lr in mfdicts:
1322 for mf, lr in mfdicts:
1320 fnode = mf.get(fname, None)
1323 fnode = mf.get(fname, None)
1321 if fnode in links:
1324 if fnode in links:
1322 links[fnode] = min(links[fnode], lr, key=clrev)
1325 links[fnode] = min(links[fnode], lr, key=clrev)
1323 elif fnode:
1326 elif fnode:
1324 links[fnode] = lr
1327 links[fnode] = lr
1325 return links
1328 return links
1326
1329
1327 else:
1330 else:
1328 linknodes = normallinknodes
1331 linknodes = normallinknodes
1329
1332
1330 repo = self._repo
1333 repo = self._repo
1331 progress = repo.ui.makeprogress(
1334 progress = repo.ui.makeprogress(
1332 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1335 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1333 )
1336 )
1334 for i, fname in enumerate(sorted(changedfiles)):
1337 for i, fname in enumerate(sorted(changedfiles)):
1335 filerevlog = repo.file(fname)
1338 filerevlog = repo.file(fname)
1336 if not filerevlog:
1339 if not filerevlog:
1337 raise error.Abort(
1340 raise error.Abort(
1338 _(b"empty or missing file data for %s") % fname
1341 _(b"empty or missing file data for %s") % fname
1339 )
1342 )
1340
1343
1341 clrevtolocalrev.clear()
1344 clrevtolocalrev.clear()
1342
1345
1343 linkrevnodes = linknodes(filerevlog, fname)
1346 linkrevnodes = linknodes(filerevlog, fname)
1344 # Lookup for filenodes, we collected the linkrev nodes above in the
1347 # Lookup for filenodes, we collected the linkrev nodes above in the
1345 # fastpath case and with lookupmf in the slowpath case.
1348 # fastpath case and with lookupmf in the slowpath case.
1346 def lookupfilelog(x):
1349 def lookupfilelog(x):
1347 return linkrevnodes[x]
1350 return linkrevnodes[x]
1348
1351
1349 frev, flr = filerevlog.rev, filerevlog.linkrev
1352 frev, flr = filerevlog.rev, filerevlog.linkrev
1350 # Skip sending any filenode we know the client already
1353 # Skip sending any filenode we know the client already
1351 # has. This avoids over-sending files relatively
1354 # has. This avoids over-sending files relatively
1352 # inexpensively, so it's not a problem if we under-filter
1355 # inexpensively, so it's not a problem if we under-filter
1353 # here.
1356 # here.
1354 filenodes = [
1357 filenodes = [
1355 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1358 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1356 ]
1359 ]
1357
1360
1358 if not filenodes:
1361 if not filenodes:
1359 continue
1362 continue
1360
1363
1361 progress.update(i + 1, item=fname)
1364 progress.update(i + 1, item=fname)
1362
1365
1363 deltas = deltagroup(
1366 deltas = deltagroup(
1364 self._repo,
1367 self._repo,
1365 filerevlog,
1368 filerevlog,
1366 filenodes,
1369 filenodes,
1367 False,
1370 False,
1368 lookupfilelog,
1371 lookupfilelog,
1369 self._forcedeltaparentprev,
1372 self._forcedeltaparentprev,
1370 ellipses=self._ellipses,
1373 ellipses=self._ellipses,
1371 clrevtolocalrev=clrevtolocalrev,
1374 clrevtolocalrev=clrevtolocalrev,
1372 fullclnodes=self._fullclnodes,
1375 fullclnodes=self._fullclnodes,
1373 precomputedellipsis=self._precomputedellipsis,
1376 precomputedellipsis=self._precomputedellipsis,
1374 )
1377 )
1375
1378
1376 yield fname, deltas
1379 yield fname, deltas
1377
1380
1378 progress.complete()
1381 progress.complete()
1379
1382
1380
1383
1381 def _makecg1packer(
1384 def _makecg1packer(
1382 repo,
1385 repo,
1383 oldmatcher,
1386 oldmatcher,
1384 matcher,
1387 matcher,
1385 bundlecaps,
1388 bundlecaps,
1386 ellipses=False,
1389 ellipses=False,
1387 shallow=False,
1390 shallow=False,
1388 ellipsisroots=None,
1391 ellipsisroots=None,
1389 fullnodes=None,
1392 fullnodes=None,
1390 ):
1393 ):
1391 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1394 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1392 d.node, d.p1node, d.p2node, d.linknode
1395 d.node, d.p1node, d.p2node, d.linknode
1393 )
1396 )
1394
1397
1395 return cgpacker(
1398 return cgpacker(
1396 repo,
1399 repo,
1397 oldmatcher,
1400 oldmatcher,
1398 matcher,
1401 matcher,
1399 b'01',
1402 b'01',
1400 builddeltaheader=builddeltaheader,
1403 builddeltaheader=builddeltaheader,
1401 manifestsend=b'',
1404 manifestsend=b'',
1402 forcedeltaparentprev=True,
1405 forcedeltaparentprev=True,
1403 bundlecaps=bundlecaps,
1406 bundlecaps=bundlecaps,
1404 ellipses=ellipses,
1407 ellipses=ellipses,
1405 shallow=shallow,
1408 shallow=shallow,
1406 ellipsisroots=ellipsisroots,
1409 ellipsisroots=ellipsisroots,
1407 fullnodes=fullnodes,
1410 fullnodes=fullnodes,
1408 )
1411 )
1409
1412
1410
1413
1411 def _makecg2packer(
1414 def _makecg2packer(
1412 repo,
1415 repo,
1413 oldmatcher,
1416 oldmatcher,
1414 matcher,
1417 matcher,
1415 bundlecaps,
1418 bundlecaps,
1416 ellipses=False,
1419 ellipses=False,
1417 shallow=False,
1420 shallow=False,
1418 ellipsisroots=None,
1421 ellipsisroots=None,
1419 fullnodes=None,
1422 fullnodes=None,
1420 ):
1423 ):
1421 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1424 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1422 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1425 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1423 )
1426 )
1424
1427
1425 return cgpacker(
1428 return cgpacker(
1426 repo,
1429 repo,
1427 oldmatcher,
1430 oldmatcher,
1428 matcher,
1431 matcher,
1429 b'02',
1432 b'02',
1430 builddeltaheader=builddeltaheader,
1433 builddeltaheader=builddeltaheader,
1431 manifestsend=b'',
1434 manifestsend=b'',
1432 bundlecaps=bundlecaps,
1435 bundlecaps=bundlecaps,
1433 ellipses=ellipses,
1436 ellipses=ellipses,
1434 shallow=shallow,
1437 shallow=shallow,
1435 ellipsisroots=ellipsisroots,
1438 ellipsisroots=ellipsisroots,
1436 fullnodes=fullnodes,
1439 fullnodes=fullnodes,
1437 )
1440 )
1438
1441
1439
1442
1440 def _makecg3packer(
1443 def _makecg3packer(
1441 repo,
1444 repo,
1442 oldmatcher,
1445 oldmatcher,
1443 matcher,
1446 matcher,
1444 bundlecaps,
1447 bundlecaps,
1445 ellipses=False,
1448 ellipses=False,
1446 shallow=False,
1449 shallow=False,
1447 ellipsisroots=None,
1450 ellipsisroots=None,
1448 fullnodes=None,
1451 fullnodes=None,
1449 ):
1452 ):
1450 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1453 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1451 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1454 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1452 )
1455 )
1453
1456
1454 return cgpacker(
1457 return cgpacker(
1455 repo,
1458 repo,
1456 oldmatcher,
1459 oldmatcher,
1457 matcher,
1460 matcher,
1458 b'03',
1461 b'03',
1459 builddeltaheader=builddeltaheader,
1462 builddeltaheader=builddeltaheader,
1460 manifestsend=closechunk(),
1463 manifestsend=closechunk(),
1461 bundlecaps=bundlecaps,
1464 bundlecaps=bundlecaps,
1462 ellipses=ellipses,
1465 ellipses=ellipses,
1463 shallow=shallow,
1466 shallow=shallow,
1464 ellipsisroots=ellipsisroots,
1467 ellipsisroots=ellipsisroots,
1465 fullnodes=fullnodes,
1468 fullnodes=fullnodes,
1466 )
1469 )
1467
1470
1468
1471
1469 _packermap = {
1472 _packermap = {
1470 b'01': (_makecg1packer, cg1unpacker),
1473 b'01': (_makecg1packer, cg1unpacker),
1471 # cg2 adds support for exchanging generaldelta
1474 # cg2 adds support for exchanging generaldelta
1472 b'02': (_makecg2packer, cg2unpacker),
1475 b'02': (_makecg2packer, cg2unpacker),
1473 # cg3 adds support for exchanging revlog flags and treemanifests
1476 # cg3 adds support for exchanging revlog flags and treemanifests
1474 b'03': (_makecg3packer, cg3unpacker),
1477 b'03': (_makecg3packer, cg3unpacker),
1475 }
1478 }
1476
1479
1477
1480
1478 def allsupportedversions(repo):
1481 def allsupportedversions(repo):
1479 versions = set(_packermap.keys())
1482 versions = set(_packermap.keys())
1480 needv03 = False
1483 needv03 = False
1481 if (
1484 if (
1482 repo.ui.configbool(b'experimental', b'changegroup3')
1485 repo.ui.configbool(b'experimental', b'changegroup3')
1483 or repo.ui.configbool(b'experimental', b'treemanifest')
1486 or repo.ui.configbool(b'experimental', b'treemanifest')
1484 or scmutil.istreemanifest(repo)
1487 or scmutil.istreemanifest(repo)
1485 ):
1488 ):
1486 # we keep version 03 because we need to to exchange treemanifest data
1489 # we keep version 03 because we need to to exchange treemanifest data
1487 #
1490 #
1488 # we also keep vresion 01 and 02, because it is possible for repo to
1491 # we also keep vresion 01 and 02, because it is possible for repo to
1489 # contains both normal and tree manifest at the same time. so using
1492 # contains both normal and tree manifest at the same time. so using
1490 # older version to pull data is viable
1493 # older version to pull data is viable
1491 #
1494 #
1492 # (or even to push subset of history)
1495 # (or even to push subset of history)
1493 needv03 = True
1496 needv03 = True
1494 if b'exp-sidedata-flag' in repo.requirements:
1497 if b'exp-sidedata-flag' in repo.requirements:
1495 needv03 = True
1498 needv03 = True
1496 # don't attempt to use 01/02 until we do sidedata cleaning
1499 # don't attempt to use 01/02 until we do sidedata cleaning
1497 versions.discard(b'01')
1500 versions.discard(b'01')
1498 versions.discard(b'02')
1501 versions.discard(b'02')
1499 if not needv03:
1502 if not needv03:
1500 versions.discard(b'03')
1503 versions.discard(b'03')
1501 return versions
1504 return versions
1502
1505
1503
1506
1504 # Changegroup versions that can be applied to the repo
1507 # Changegroup versions that can be applied to the repo
1505 def supportedincomingversions(repo):
1508 def supportedincomingversions(repo):
1506 return allsupportedversions(repo)
1509 return allsupportedversions(repo)
1507
1510
1508
1511
1509 # Changegroup versions that can be created from the repo
1512 # Changegroup versions that can be created from the repo
1510 def supportedoutgoingversions(repo):
1513 def supportedoutgoingversions(repo):
1511 versions = allsupportedversions(repo)
1514 versions = allsupportedversions(repo)
1512 if scmutil.istreemanifest(repo):
1515 if scmutil.istreemanifest(repo):
1513 # Versions 01 and 02 support only flat manifests and it's just too
1516 # Versions 01 and 02 support only flat manifests and it's just too
1514 # expensive to convert between the flat manifest and tree manifest on
1517 # expensive to convert between the flat manifest and tree manifest on
1515 # the fly. Since tree manifests are hashed differently, all of history
1518 # the fly. Since tree manifests are hashed differently, all of history
1516 # would have to be converted. Instead, we simply don't even pretend to
1519 # would have to be converted. Instead, we simply don't even pretend to
1517 # support versions 01 and 02.
1520 # support versions 01 and 02.
1518 versions.discard(b'01')
1521 versions.discard(b'01')
1519 versions.discard(b'02')
1522 versions.discard(b'02')
1520 if requirements.NARROW_REQUIREMENT in repo.requirements:
1523 if requirements.NARROW_REQUIREMENT in repo.requirements:
1521 # Versions 01 and 02 don't support revlog flags, and we need to
1524 # Versions 01 and 02 don't support revlog flags, and we need to
1522 # support that for stripping and unbundling to work.
1525 # support that for stripping and unbundling to work.
1523 versions.discard(b'01')
1526 versions.discard(b'01')
1524 versions.discard(b'02')
1527 versions.discard(b'02')
1525 if LFS_REQUIREMENT in repo.requirements:
1528 if LFS_REQUIREMENT in repo.requirements:
1526 # Versions 01 and 02 don't support revlog flags, and we need to
1529 # Versions 01 and 02 don't support revlog flags, and we need to
1527 # mark LFS entries with REVIDX_EXTSTORED.
1530 # mark LFS entries with REVIDX_EXTSTORED.
1528 versions.discard(b'01')
1531 versions.discard(b'01')
1529 versions.discard(b'02')
1532 versions.discard(b'02')
1530
1533
1531 return versions
1534 return versions
1532
1535
1533
1536
1534 def localversion(repo):
1537 def localversion(repo):
1535 # Finds the best version to use for bundles that are meant to be used
1538 # Finds the best version to use for bundles that are meant to be used
1536 # locally, such as those from strip and shelve, and temporary bundles.
1539 # locally, such as those from strip and shelve, and temporary bundles.
1537 return max(supportedoutgoingversions(repo))
1540 return max(supportedoutgoingversions(repo))
1538
1541
1539
1542
1540 def safeversion(repo):
1543 def safeversion(repo):
1541 # Finds the smallest version that it's safe to assume clients of the repo
1544 # Finds the smallest version that it's safe to assume clients of the repo
1542 # will support. For example, all hg versions that support generaldelta also
1545 # will support. For example, all hg versions that support generaldelta also
1543 # support changegroup 02.
1546 # support changegroup 02.
1544 versions = supportedoutgoingversions(repo)
1547 versions = supportedoutgoingversions(repo)
1545 if b'generaldelta' in repo.requirements:
1548 if b'generaldelta' in repo.requirements:
1546 versions.discard(b'01')
1549 versions.discard(b'01')
1547 assert versions
1550 assert versions
1548 return min(versions)
1551 return min(versions)
1549
1552
1550
1553
1551 def getbundler(
1554 def getbundler(
1552 version,
1555 version,
1553 repo,
1556 repo,
1554 bundlecaps=None,
1557 bundlecaps=None,
1555 oldmatcher=None,
1558 oldmatcher=None,
1556 matcher=None,
1559 matcher=None,
1557 ellipses=False,
1560 ellipses=False,
1558 shallow=False,
1561 shallow=False,
1559 ellipsisroots=None,
1562 ellipsisroots=None,
1560 fullnodes=None,
1563 fullnodes=None,
1561 ):
1564 ):
1562 assert version in supportedoutgoingversions(repo)
1565 assert version in supportedoutgoingversions(repo)
1563
1566
1564 if matcher is None:
1567 if matcher is None:
1565 matcher = matchmod.always()
1568 matcher = matchmod.always()
1566 if oldmatcher is None:
1569 if oldmatcher is None:
1567 oldmatcher = matchmod.never()
1570 oldmatcher = matchmod.never()
1568
1571
1569 if version == b'01' and not matcher.always():
1572 if version == b'01' and not matcher.always():
1570 raise error.ProgrammingError(
1573 raise error.ProgrammingError(
1571 b'version 01 changegroups do not support sparse file matchers'
1574 b'version 01 changegroups do not support sparse file matchers'
1572 )
1575 )
1573
1576
1574 if ellipses and version in (b'01', b'02'):
1577 if ellipses and version in (b'01', b'02'):
1575 raise error.Abort(
1578 raise error.Abort(
1576 _(
1579 _(
1577 b'ellipsis nodes require at least cg3 on client and server, '
1580 b'ellipsis nodes require at least cg3 on client and server, '
1578 b'but negotiated version %s'
1581 b'but negotiated version %s'
1579 )
1582 )
1580 % version
1583 % version
1581 )
1584 )
1582
1585
1583 # Requested files could include files not in the local store. So
1586 # Requested files could include files not in the local store. So
1584 # filter those out.
1587 # filter those out.
1585 matcher = repo.narrowmatch(matcher)
1588 matcher = repo.narrowmatch(matcher)
1586
1589
1587 fn = _packermap[version][0]
1590 fn = _packermap[version][0]
1588 return fn(
1591 return fn(
1589 repo,
1592 repo,
1590 oldmatcher,
1593 oldmatcher,
1591 matcher,
1594 matcher,
1592 bundlecaps,
1595 bundlecaps,
1593 ellipses=ellipses,
1596 ellipses=ellipses,
1594 shallow=shallow,
1597 shallow=shallow,
1595 ellipsisroots=ellipsisroots,
1598 ellipsisroots=ellipsisroots,
1596 fullnodes=fullnodes,
1599 fullnodes=fullnodes,
1597 )
1600 )
1598
1601
1599
1602
1600 def getunbundler(version, fh, alg, extras=None):
1603 def getunbundler(version, fh, alg, extras=None):
1601 return _packermap[version][1](fh, alg, extras=extras)
1604 return _packermap[version][1](fh, alg, extras=extras)
1602
1605
1603
1606
1604 def _changegroupinfo(repo, nodes, source):
1607 def _changegroupinfo(repo, nodes, source):
1605 if repo.ui.verbose or source == b'bundle':
1608 if repo.ui.verbose or source == b'bundle':
1606 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1609 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1607 if repo.ui.debugflag:
1610 if repo.ui.debugflag:
1608 repo.ui.debug(b"list of changesets:\n")
1611 repo.ui.debug(b"list of changesets:\n")
1609 for node in nodes:
1612 for node in nodes:
1610 repo.ui.debug(b"%s\n" % hex(node))
1613 repo.ui.debug(b"%s\n" % hex(node))
1611
1614
1612
1615
1613 def makechangegroup(
1616 def makechangegroup(
1614 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1617 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1615 ):
1618 ):
1616 cgstream = makestream(
1619 cgstream = makestream(
1617 repo,
1620 repo,
1618 outgoing,
1621 outgoing,
1619 version,
1622 version,
1620 source,
1623 source,
1621 fastpath=fastpath,
1624 fastpath=fastpath,
1622 bundlecaps=bundlecaps,
1625 bundlecaps=bundlecaps,
1623 )
1626 )
1624 return getunbundler(
1627 return getunbundler(
1625 version,
1628 version,
1626 util.chunkbuffer(cgstream),
1629 util.chunkbuffer(cgstream),
1627 None,
1630 None,
1628 {b'clcount': len(outgoing.missing)},
1631 {b'clcount': len(outgoing.missing)},
1629 )
1632 )
1630
1633
1631
1634
1632 def makestream(
1635 def makestream(
1633 repo,
1636 repo,
1634 outgoing,
1637 outgoing,
1635 version,
1638 version,
1636 source,
1639 source,
1637 fastpath=False,
1640 fastpath=False,
1638 bundlecaps=None,
1641 bundlecaps=None,
1639 matcher=None,
1642 matcher=None,
1640 ):
1643 ):
1641 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1644 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1642
1645
1643 repo = repo.unfiltered()
1646 repo = repo.unfiltered()
1644 commonrevs = outgoing.common
1647 commonrevs = outgoing.common
1645 csets = outgoing.missing
1648 csets = outgoing.missing
1646 heads = outgoing.ancestorsof
1649 heads = outgoing.ancestorsof
1647 # We go through the fast path if we get told to, or if all (unfiltered
1650 # We go through the fast path if we get told to, or if all (unfiltered
1648 # heads have been requested (since we then know there all linkrevs will
1651 # heads have been requested (since we then know there all linkrevs will
1649 # be pulled by the client).
1652 # be pulled by the client).
1650 heads.sort()
1653 heads.sort()
1651 fastpathlinkrev = fastpath or (
1654 fastpathlinkrev = fastpath or (
1652 repo.filtername is None and heads == sorted(repo.heads())
1655 repo.filtername is None and heads == sorted(repo.heads())
1653 )
1656 )
1654
1657
1655 repo.hook(b'preoutgoing', throw=True, source=source)
1658 repo.hook(b'preoutgoing', throw=True, source=source)
1656 _changegroupinfo(repo, csets, source)
1659 _changegroupinfo(repo, csets, source)
1657 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1660 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1658
1661
1659
1662
1660 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1663 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1661 revisions = 0
1664 revisions = 0
1662 files = 0
1665 files = 0
1663 progress = repo.ui.makeprogress(
1666 progress = repo.ui.makeprogress(
1664 _(b'files'), unit=_(b'files'), total=expectedfiles
1667 _(b'files'), unit=_(b'files'), total=expectedfiles
1665 )
1668 )
1666 for chunkdata in iter(source.filelogheader, {}):
1669 for chunkdata in iter(source.filelogheader, {}):
1667 files += 1
1670 files += 1
1668 f = chunkdata[b"filename"]
1671 f = chunkdata[b"filename"]
1669 repo.ui.debug(b"adding %s revisions\n" % f)
1672 repo.ui.debug(b"adding %s revisions\n" % f)
1670 progress.increment()
1673 progress.increment()
1671 fl = repo.file(f)
1674 fl = repo.file(f)
1672 o = len(fl)
1675 o = len(fl)
1673 try:
1676 try:
1674 deltas = source.deltaiter()
1677 deltas = source.deltaiter()
1675 if not fl.addgroup(deltas, revmap, trp):
1678 if not fl.addgroup(deltas, revmap, trp):
1676 raise error.Abort(_(b"received file revlog group is empty"))
1679 raise error.Abort(_(b"received file revlog group is empty"))
1677 except error.CensoredBaseError as e:
1680 except error.CensoredBaseError as e:
1678 raise error.Abort(_(b"received delta base is censored: %s") % e)
1681 raise error.Abort(_(b"received delta base is censored: %s") % e)
1679 revisions += len(fl) - o
1682 revisions += len(fl) - o
1680 if f in needfiles:
1683 if f in needfiles:
1681 needs = needfiles[f]
1684 needs = needfiles[f]
1682 for new in pycompat.xrange(o, len(fl)):
1685 for new in pycompat.xrange(o, len(fl)):
1683 n = fl.node(new)
1686 n = fl.node(new)
1684 if n in needs:
1687 if n in needs:
1685 needs.remove(n)
1688 needs.remove(n)
1686 else:
1689 else:
1687 raise error.Abort(_(b"received spurious file revlog entry"))
1690 raise error.Abort(_(b"received spurious file revlog entry"))
1688 if not needs:
1691 if not needs:
1689 del needfiles[f]
1692 del needfiles[f]
1690 progress.complete()
1693 progress.complete()
1691
1694
1692 for f, needs in pycompat.iteritems(needfiles):
1695 for f, needs in pycompat.iteritems(needfiles):
1693 fl = repo.file(f)
1696 fl = repo.file(f)
1694 for n in needs:
1697 for n in needs:
1695 try:
1698 try:
1696 fl.rev(n)
1699 fl.rev(n)
1697 except error.LookupError:
1700 except error.LookupError:
1698 raise error.Abort(
1701 raise error.Abort(
1699 _(b'missing file data for %s:%s - run hg verify')
1702 _(b'missing file data for %s:%s - run hg verify')
1700 % (f, hex(n))
1703 % (f, hex(n))
1701 )
1704 )
1702
1705
1703 return revisions, files
1706 return revisions, files
@@ -1,494 +1,497 b''
1 # commit.py - fonction to perform commit
1 # commit.py - fonction to perform commit
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 hex,
12 hex,
13 nullid,
13 nullid,
14 nullrev,
14 nullrev,
15 )
15 )
16
16
17 from . import (
17 from . import (
18 context,
18 context,
19 mergestate,
19 mergestate,
20 metadata,
20 metadata,
21 phases,
21 phases,
22 scmutil,
22 scmutil,
23 subrepoutil,
23 subrepoutil,
24 )
24 )
25
25
26
26
27 def _write_copy_meta(repo):
27 def _write_copy_meta(repo):
28 """return a (changelog, filelog) boolean tuple
28 """return a (changelog, filelog) boolean tuple
29
29
30 changelog: copy related information should be stored in the changeset
30 changelog: copy related information should be stored in the changeset
31 filelof: copy related information should be written in the file revision
31 filelof: copy related information should be written in the file revision
32 """
32 """
33 if repo.filecopiesmode == b'changeset-sidedata':
33 if repo.filecopiesmode == b'changeset-sidedata':
34 writechangesetcopy = True
34 writechangesetcopy = True
35 writefilecopymeta = True
35 writefilecopymeta = True
36 else:
36 else:
37 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
37 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
38 writefilecopymeta = writecopiesto != b'changeset-only'
38 writefilecopymeta = writecopiesto != b'changeset-only'
39 writechangesetcopy = writecopiesto in (
39 writechangesetcopy = writecopiesto in (
40 b'changeset-only',
40 b'changeset-only',
41 b'compatibility',
41 b'compatibility',
42 )
42 )
43 return writechangesetcopy, writefilecopymeta
43 return writechangesetcopy, writefilecopymeta
44
44
45
45
46 def commitctx(repo, ctx, error=False, origctx=None):
46 def commitctx(repo, ctx, error=False, origctx=None):
47 """Add a new revision to the target repository.
47 """Add a new revision to the target repository.
48 Revision information is passed via the context argument.
48 Revision information is passed via the context argument.
49
49
50 ctx.files() should list all files involved in this commit, i.e.
50 ctx.files() should list all files involved in this commit, i.e.
51 modified/added/removed files. On merge, it may be wider than the
51 modified/added/removed files. On merge, it may be wider than the
52 ctx.files() to be committed, since any file nodes derived directly
52 ctx.files() to be committed, since any file nodes derived directly
53 from p1 or p2 are excluded from the committed ctx.files().
53 from p1 or p2 are excluded from the committed ctx.files().
54
54
55 origctx is for convert to work around the problem that bug
55 origctx is for convert to work around the problem that bug
56 fixes to the files list in changesets change hashes. For
56 fixes to the files list in changesets change hashes. For
57 convert to be the identity, it can pass an origctx and this
57 convert to be the identity, it can pass an origctx and this
58 function will use the same files list when it makes sense to
58 function will use the same files list when it makes sense to
59 do so.
59 do so.
60 """
60 """
61 repo = repo.unfiltered()
61 repo = repo.unfiltered()
62
62
63 p1, p2 = ctx.p1(), ctx.p2()
63 p1, p2 = ctx.p1(), ctx.p2()
64 user = ctx.user()
64 user = ctx.user()
65
65
66 with repo.lock(), repo.transaction(b"commit") as tr:
66 with repo.lock(), repo.transaction(b"commit") as tr:
67 mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx)
67 mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx)
68
68
69 extra = ctx.extra().copy()
69 extra = ctx.extra().copy()
70
70
71 if extra is not None:
71 if extra is not None:
72 for name in (
72 for name in (
73 b'p1copies',
73 b'p1copies',
74 b'p2copies',
74 b'p2copies',
75 b'filesadded',
75 b'filesadded',
76 b'filesremoved',
76 b'filesremoved',
77 ):
77 ):
78 extra.pop(name, None)
78 extra.pop(name, None)
79 if repo.changelog._copiesstorage == b'extra':
79 if repo.changelog._copiesstorage == b'extra':
80 extra = _extra_with_copies(repo, extra, files)
80 extra = _extra_with_copies(repo, extra, files)
81
81
82 # save the tip to check whether we actually committed anything
82 # save the tip to check whether we actually committed anything
83 oldtip = repo.changelog.tiprev()
83 oldtip = repo.changelog.tiprev()
84
84
85 # update changelog
85 # update changelog
86 repo.ui.note(_(b"committing changelog\n"))
86 repo.ui.note(_(b"committing changelog\n"))
87 repo.changelog.delayupdate(tr)
87 repo.changelog.delayupdate(tr)
88 n = repo.changelog.add(
88 n = repo.changelog.add(
89 mn,
89 mn,
90 files,
90 files,
91 ctx.description(),
91 ctx.description(),
92 tr,
92 tr,
93 p1.node(),
93 p1.node(),
94 p2.node(),
94 p2.node(),
95 user,
95 user,
96 ctx.date(),
96 ctx.date(),
97 extra,
97 extra,
98 )
98 )
99 rev = repo[n].rev()
99 rev = repo[n].rev()
100 if oldtip != repo.changelog.tiprev():
101 repo.register_changeset(rev, repo.changelog.changelogrevision(rev))
102
100 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
103 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
101 repo.hook(
104 repo.hook(
102 b'pretxncommit',
105 b'pretxncommit',
103 throw=True,
106 throw=True,
104 node=hex(n),
107 node=hex(n),
105 parent1=xp1,
108 parent1=xp1,
106 parent2=xp2,
109 parent2=xp2,
107 )
110 )
108 # set the new commit is proper phase
111 # set the new commit is proper phase
109 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
112 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
110
113
111 # prevent unmarking changesets as public on recommit
114 # prevent unmarking changesets as public on recommit
112 waspublic = oldtip == repo.changelog.tiprev() and not repo[rev].phase()
115 waspublic = oldtip == repo.changelog.tiprev() and not repo[rev].phase()
113
116
114 if targetphase and not waspublic:
117 if targetphase and not waspublic:
115 # retract boundary do not alter parent changeset.
118 # retract boundary do not alter parent changeset.
116 # if a parent have higher the resulting phase will
119 # if a parent have higher the resulting phase will
117 # be compliant anyway
120 # be compliant anyway
118 #
121 #
119 # if minimal phase was 0 we don't need to retract anything
122 # if minimal phase was 0 we don't need to retract anything
120 phases.registernew(repo, tr, targetphase, [rev])
123 phases.registernew(repo, tr, targetphase, [rev])
121 return n
124 return n
122
125
123
126
124 def _prepare_files(tr, ctx, error=False, origctx=None):
127 def _prepare_files(tr, ctx, error=False, origctx=None):
125 repo = ctx.repo()
128 repo = ctx.repo()
126 p1 = ctx.p1()
129 p1 = ctx.p1()
127
130
128 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
131 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
129 files = metadata.ChangingFiles()
132 files = metadata.ChangingFiles()
130 ms = mergestate.mergestate.read(repo)
133 ms = mergestate.mergestate.read(repo)
131 salvaged = _get_salvaged(repo, ms, ctx)
134 salvaged = _get_salvaged(repo, ms, ctx)
132 for s in salvaged:
135 for s in salvaged:
133 files.mark_salvaged(s)
136 files.mark_salvaged(s)
134
137
135 if ctx.manifestnode():
138 if ctx.manifestnode():
136 # reuse an existing manifest revision
139 # reuse an existing manifest revision
137 repo.ui.debug(b'reusing known manifest\n')
140 repo.ui.debug(b'reusing known manifest\n')
138 mn = ctx.manifestnode()
141 mn = ctx.manifestnode()
139 files.update_touched(ctx.files())
142 files.update_touched(ctx.files())
140 if writechangesetcopy:
143 if writechangesetcopy:
141 files.update_added(ctx.filesadded())
144 files.update_added(ctx.filesadded())
142 files.update_removed(ctx.filesremoved())
145 files.update_removed(ctx.filesremoved())
143 elif not ctx.files():
146 elif not ctx.files():
144 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
147 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
145 mn = p1.manifestnode()
148 mn = p1.manifestnode()
146 else:
149 else:
147 mn = _process_files(tr, ctx, ms, files, error=error)
150 mn = _process_files(tr, ctx, ms, files, error=error)
148
151
149 if origctx and origctx.manifestnode() == mn:
152 if origctx and origctx.manifestnode() == mn:
150 origfiles = origctx.files()
153 origfiles = origctx.files()
151 assert files.touched.issubset(origfiles)
154 assert files.touched.issubset(origfiles)
152 files.update_touched(origfiles)
155 files.update_touched(origfiles)
153
156
154 if writechangesetcopy:
157 if writechangesetcopy:
155 files.update_copies_from_p1(ctx.p1copies())
158 files.update_copies_from_p1(ctx.p1copies())
156 files.update_copies_from_p2(ctx.p2copies())
159 files.update_copies_from_p2(ctx.p2copies())
157
160
158 return mn, files
161 return mn, files
159
162
160
163
161 def _get_salvaged(repo, ms, ctx):
164 def _get_salvaged(repo, ms, ctx):
162 """returns a list of salvaged files
165 """returns a list of salvaged files
163
166
164 returns empty list if config option which process salvaged files are
167 returns empty list if config option which process salvaged files are
165 not enabled"""
168 not enabled"""
166 salvaged = []
169 salvaged = []
167 copy_sd = repo.filecopiesmode == b'changeset-sidedata'
170 copy_sd = repo.filecopiesmode == b'changeset-sidedata'
168 if copy_sd and len(ctx.parents()) > 1:
171 if copy_sd and len(ctx.parents()) > 1:
169 if ms.active():
172 if ms.active():
170 for fname in sorted(ms.allextras().keys()):
173 for fname in sorted(ms.allextras().keys()):
171 might_removed = ms.extras(fname).get(b'merge-removal-candidate')
174 might_removed = ms.extras(fname).get(b'merge-removal-candidate')
172 if might_removed == b'yes':
175 if might_removed == b'yes':
173 if fname in ctx:
176 if fname in ctx:
174 salvaged.append(fname)
177 salvaged.append(fname)
175 return salvaged
178 return salvaged
176
179
177
180
178 def _process_files(tr, ctx, ms, files, error=False):
181 def _process_files(tr, ctx, ms, files, error=False):
179 repo = ctx.repo()
182 repo = ctx.repo()
180 p1 = ctx.p1()
183 p1 = ctx.p1()
181 p2 = ctx.p2()
184 p2 = ctx.p2()
182
185
183 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
186 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
184
187
185 m1ctx = p1.manifestctx()
188 m1ctx = p1.manifestctx()
186 m2ctx = p2.manifestctx()
189 m2ctx = p2.manifestctx()
187 mctx = m1ctx.copy()
190 mctx = m1ctx.copy()
188
191
189 m = mctx.read()
192 m = mctx.read()
190 m1 = m1ctx.read()
193 m1 = m1ctx.read()
191 m2 = m2ctx.read()
194 m2 = m2ctx.read()
192
195
193 # check in files
196 # check in files
194 added = []
197 added = []
195 removed = list(ctx.removed())
198 removed = list(ctx.removed())
196 linkrev = len(repo)
199 linkrev = len(repo)
197 repo.ui.note(_(b"committing files:\n"))
200 repo.ui.note(_(b"committing files:\n"))
198 uipathfn = scmutil.getuipathfn(repo)
201 uipathfn = scmutil.getuipathfn(repo)
199 for f in sorted(ctx.modified() + ctx.added()):
202 for f in sorted(ctx.modified() + ctx.added()):
200 repo.ui.note(uipathfn(f) + b"\n")
203 repo.ui.note(uipathfn(f) + b"\n")
201 try:
204 try:
202 fctx = ctx[f]
205 fctx = ctx[f]
203 if fctx is None:
206 if fctx is None:
204 removed.append(f)
207 removed.append(f)
205 else:
208 else:
206 added.append(f)
209 added.append(f)
207 m[f], is_touched = _filecommit(
210 m[f], is_touched = _filecommit(
208 repo, fctx, m1, m2, linkrev, tr, writefilecopymeta, ms
211 repo, fctx, m1, m2, linkrev, tr, writefilecopymeta, ms
209 )
212 )
210 if is_touched:
213 if is_touched:
211 if is_touched == 'added':
214 if is_touched == 'added':
212 files.mark_added(f)
215 files.mark_added(f)
213 elif is_touched == 'merged':
216 elif is_touched == 'merged':
214 files.mark_merged(f)
217 files.mark_merged(f)
215 else:
218 else:
216 files.mark_touched(f)
219 files.mark_touched(f)
217 m.setflag(f, fctx.flags())
220 m.setflag(f, fctx.flags())
218 except OSError:
221 except OSError:
219 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
222 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
220 raise
223 raise
221 except IOError as inst:
224 except IOError as inst:
222 errcode = getattr(inst, 'errno', errno.ENOENT)
225 errcode = getattr(inst, 'errno', errno.ENOENT)
223 if error or errcode and errcode != errno.ENOENT:
226 if error or errcode and errcode != errno.ENOENT:
224 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
227 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
225 raise
228 raise
226
229
227 # update manifest
230 # update manifest
228 removed = [f for f in removed if f in m1 or f in m2]
231 removed = [f for f in removed if f in m1 or f in m2]
229 drop = sorted([f for f in removed if f in m])
232 drop = sorted([f for f in removed if f in m])
230 for f in drop:
233 for f in drop:
231 del m[f]
234 del m[f]
232 if p2.rev() == nullrev:
235 if p2.rev() == nullrev:
233 files.update_removed(removed)
236 files.update_removed(removed)
234 else:
237 else:
235 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
238 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
236 for f in removed:
239 for f in removed:
237 if not rf(f):
240 if not rf(f):
238 files.mark_removed(f)
241 files.mark_removed(f)
239
242
240 mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop)
243 mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop)
241
244
242 return mn
245 return mn
243
246
244
247
245 def _filecommit(
248 def _filecommit(
246 repo,
249 repo,
247 fctx,
250 fctx,
248 manifest1,
251 manifest1,
249 manifest2,
252 manifest2,
250 linkrev,
253 linkrev,
251 tr,
254 tr,
252 includecopymeta,
255 includecopymeta,
253 ms,
256 ms,
254 ):
257 ):
255 """
258 """
256 commit an individual file as part of a larger transaction
259 commit an individual file as part of a larger transaction
257
260
258 input:
261 input:
259
262
260 fctx: a file context with the content we are trying to commit
263 fctx: a file context with the content we are trying to commit
261 manifest1: manifest of changeset first parent
264 manifest1: manifest of changeset first parent
262 manifest2: manifest of changeset second parent
265 manifest2: manifest of changeset second parent
263 linkrev: revision number of the changeset being created
266 linkrev: revision number of the changeset being created
264 tr: current transation
267 tr: current transation
265 includecopymeta: boolean, set to False to skip storing the copy data
268 includecopymeta: boolean, set to False to skip storing the copy data
266 (only used by the Google specific feature of using
269 (only used by the Google specific feature of using
267 changeset extra as copy source of truth).
270 changeset extra as copy source of truth).
268 ms: mergestate object
271 ms: mergestate object
269
272
270 output: (filenode, touched)
273 output: (filenode, touched)
271
274
272 filenode: the filenode that should be used by this changeset
275 filenode: the filenode that should be used by this changeset
273 touched: one of: None (mean untouched), 'added' or 'modified'
276 touched: one of: None (mean untouched), 'added' or 'modified'
274 """
277 """
275
278
276 fname = fctx.path()
279 fname = fctx.path()
277 fparent1 = manifest1.get(fname, nullid)
280 fparent1 = manifest1.get(fname, nullid)
278 fparent2 = manifest2.get(fname, nullid)
281 fparent2 = manifest2.get(fname, nullid)
279 touched = None
282 touched = None
280 if fparent1 == fparent2 == nullid:
283 if fparent1 == fparent2 == nullid:
281 touched = 'added'
284 touched = 'added'
282
285
283 if isinstance(fctx, context.filectx):
286 if isinstance(fctx, context.filectx):
284 # This block fast path most comparisons which are usually done. It
287 # This block fast path most comparisons which are usually done. It
285 # assumes that bare filectx is used and no merge happened, hence no
288 # assumes that bare filectx is used and no merge happened, hence no
286 # need to create a new file revision in this case.
289 # need to create a new file revision in this case.
287 node = fctx.filenode()
290 node = fctx.filenode()
288 if node in [fparent1, fparent2]:
291 if node in [fparent1, fparent2]:
289 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
292 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
290 if (
293 if (
291 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
294 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
292 ) or (
295 ) or (
293 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
296 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
294 ):
297 ):
295 touched = 'modified'
298 touched = 'modified'
296 return node, touched
299 return node, touched
297
300
298 flog = repo.file(fname)
301 flog = repo.file(fname)
299 meta = {}
302 meta = {}
300 cfname = fctx.copysource()
303 cfname = fctx.copysource()
301 fnode = None
304 fnode = None
302
305
303 if cfname and cfname != fname:
306 if cfname and cfname != fname:
304 # Mark the new revision of this file as a copy of another
307 # Mark the new revision of this file as a copy of another
305 # file. This copy data will effectively act as a parent
308 # file. This copy data will effectively act as a parent
306 # of this new revision. If this is a merge, the first
309 # of this new revision. If this is a merge, the first
307 # parent will be the nullid (meaning "look up the copy data")
310 # parent will be the nullid (meaning "look up the copy data")
308 # and the second one will be the other parent. For example:
311 # and the second one will be the other parent. For example:
309 #
312 #
310 # 0 --- 1 --- 3 rev1 changes file foo
313 # 0 --- 1 --- 3 rev1 changes file foo
311 # \ / rev2 renames foo to bar and changes it
314 # \ / rev2 renames foo to bar and changes it
312 # \- 2 -/ rev3 should have bar with all changes and
315 # \- 2 -/ rev3 should have bar with all changes and
313 # should record that bar descends from
316 # should record that bar descends from
314 # bar in rev2 and foo in rev1
317 # bar in rev2 and foo in rev1
315 #
318 #
316 # this allows this merge to succeed:
319 # this allows this merge to succeed:
317 #
320 #
318 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
321 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
319 # \ / merging rev3 and rev4 should use bar@rev2
322 # \ / merging rev3 and rev4 should use bar@rev2
320 # \- 2 --- 4 as the merge base
323 # \- 2 --- 4 as the merge base
321 #
324 #
322
325
323 cnode = manifest1.get(cfname)
326 cnode = manifest1.get(cfname)
324 newfparent = fparent2
327 newfparent = fparent2
325
328
326 if manifest2: # branch merge
329 if manifest2: # branch merge
327 if fparent2 == nullid or cnode is None: # copied on remote side
330 if fparent2 == nullid or cnode is None: # copied on remote side
328 if cfname in manifest2:
331 if cfname in manifest2:
329 cnode = manifest2[cfname]
332 cnode = manifest2[cfname]
330 newfparent = fparent1
333 newfparent = fparent1
331
334
332 # Here, we used to search backwards through history to try to find
335 # Here, we used to search backwards through history to try to find
333 # where the file copy came from if the source of a copy was not in
336 # where the file copy came from if the source of a copy was not in
334 # the parent directory. However, this doesn't actually make sense to
337 # the parent directory. However, this doesn't actually make sense to
335 # do (what does a copy from something not in your working copy even
338 # do (what does a copy from something not in your working copy even
336 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
339 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
337 # the user that copy information was dropped, so if they didn't
340 # the user that copy information was dropped, so if they didn't
338 # expect this outcome it can be fixed, but this is the correct
341 # expect this outcome it can be fixed, but this is the correct
339 # behavior in this circumstance.
342 # behavior in this circumstance.
340
343
341 if cnode:
344 if cnode:
342 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
345 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
343 if includecopymeta:
346 if includecopymeta:
344 meta[b"copy"] = cfname
347 meta[b"copy"] = cfname
345 meta[b"copyrev"] = hex(cnode)
348 meta[b"copyrev"] = hex(cnode)
346 fparent1, fparent2 = nullid, newfparent
349 fparent1, fparent2 = nullid, newfparent
347 else:
350 else:
348 repo.ui.warn(
351 repo.ui.warn(
349 _(
352 _(
350 b"warning: can't find ancestor for '%s' "
353 b"warning: can't find ancestor for '%s' "
351 b"copied from '%s'!\n"
354 b"copied from '%s'!\n"
352 )
355 )
353 % (fname, cfname)
356 % (fname, cfname)
354 )
357 )
355
358
356 elif fparent1 == nullid:
359 elif fparent1 == nullid:
357 fparent1, fparent2 = fparent2, nullid
360 fparent1, fparent2 = fparent2, nullid
358 elif fparent2 != nullid:
361 elif fparent2 != nullid:
359 # is one parent an ancestor of the other?
362 # is one parent an ancestor of the other?
360 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
363 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
361 if fparent1 in fparentancestors:
364 if fparent1 in fparentancestors:
362 fparent1, fparent2 = fparent2, nullid
365 fparent1, fparent2 = fparent2, nullid
363 elif fparent2 in fparentancestors:
366 elif fparent2 in fparentancestors:
364 fparent2 = nullid
367 fparent2 = nullid
365 elif not fparentancestors:
368 elif not fparentancestors:
366 # TODO: this whole if-else might be simplified much more
369 # TODO: this whole if-else might be simplified much more
367 if (
370 if (
368 ms.active()
371 ms.active()
369 and ms.extras(fname).get(b'filenode-source') == b'other'
372 and ms.extras(fname).get(b'filenode-source') == b'other'
370 ):
373 ):
371 fparent1, fparent2 = fparent2, nullid
374 fparent1, fparent2 = fparent2, nullid
372
375
373 force_new_node = False
376 force_new_node = False
374 # The file might have been deleted by merge code and user explicitly choose
377 # The file might have been deleted by merge code and user explicitly choose
375 # to revert the file and keep it. The other case can be where there is
378 # to revert the file and keep it. The other case can be where there is
376 # change-delete or delete-change conflict and user explicitly choose to keep
379 # change-delete or delete-change conflict and user explicitly choose to keep
377 # the file. The goal is to create a new filenode for users explicit choices
380 # the file. The goal is to create a new filenode for users explicit choices
378 if (
381 if (
379 repo.ui.configbool(b'experimental', b'merge-track-salvaged')
382 repo.ui.configbool(b'experimental', b'merge-track-salvaged')
380 and ms.active()
383 and ms.active()
381 and ms.extras(fname).get(b'merge-removal-candidate') == b'yes'
384 and ms.extras(fname).get(b'merge-removal-candidate') == b'yes'
382 ):
385 ):
383 force_new_node = True
386 force_new_node = True
384 # is the file changed?
387 # is the file changed?
385 text = fctx.data()
388 text = fctx.data()
386 if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
389 if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
387 if touched is None: # do not overwrite added
390 if touched is None: # do not overwrite added
388 if fparent2 == nullid:
391 if fparent2 == nullid:
389 touched = 'modified'
392 touched = 'modified'
390 else:
393 else:
391 touched = 'merged'
394 touched = 'merged'
392 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
395 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
393 # are just the flags changed during merge?
396 # are just the flags changed during merge?
394 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
397 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
395 touched = 'modified'
398 touched = 'modified'
396 fnode = fparent1
399 fnode = fparent1
397 else:
400 else:
398 fnode = fparent1
401 fnode = fparent1
399 return fnode, touched
402 return fnode, touched
400
403
401
404
402 def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
405 def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
403 """make a new manifest entry (or reuse a new one)
406 """make a new manifest entry (or reuse a new one)
404
407
405 given an initialised manifest context and precomputed list of
408 given an initialised manifest context and precomputed list of
406 - files: files affected by the commit
409 - files: files affected by the commit
407 - added: new entries in the manifest
410 - added: new entries in the manifest
408 - drop: entries present in parents but absent of this one
411 - drop: entries present in parents but absent of this one
409
412
410 Create a new manifest revision, reuse existing ones if possible.
413 Create a new manifest revision, reuse existing ones if possible.
411
414
412 Return the nodeid of the manifest revision.
415 Return the nodeid of the manifest revision.
413 """
416 """
414 repo = ctx.repo()
417 repo = ctx.repo()
415
418
416 md = None
419 md = None
417
420
418 # all this is cached, so it is find to get them all from the ctx.
421 # all this is cached, so it is find to get them all from the ctx.
419 p1 = ctx.p1()
422 p1 = ctx.p1()
420 p2 = ctx.p2()
423 p2 = ctx.p2()
421 m1ctx = p1.manifestctx()
424 m1ctx = p1.manifestctx()
422
425
423 m1 = m1ctx.read()
426 m1 = m1ctx.read()
424
427
425 if not files:
428 if not files:
426 # if no "files" actually changed in terms of the changelog,
429 # if no "files" actually changed in terms of the changelog,
427 # try hard to detect unmodified manifest entry so that the
430 # try hard to detect unmodified manifest entry so that the
428 # exact same commit can be reproduced later on convert.
431 # exact same commit can be reproduced later on convert.
429 md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
432 md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
430 if not files and md:
433 if not files and md:
431 repo.ui.debug(
434 repo.ui.debug(
432 b'not reusing manifest (no file change in '
435 b'not reusing manifest (no file change in '
433 b'changelog, but manifest differs)\n'
436 b'changelog, but manifest differs)\n'
434 )
437 )
435 if files or md:
438 if files or md:
436 repo.ui.note(_(b"committing manifest\n"))
439 repo.ui.note(_(b"committing manifest\n"))
437 # we're using narrowmatch here since it's already applied at
440 # we're using narrowmatch here since it's already applied at
438 # other stages (such as dirstate.walk), so we're already
441 # other stages (such as dirstate.walk), so we're already
439 # ignoring things outside of narrowspec in most cases. The
442 # ignoring things outside of narrowspec in most cases. The
440 # one case where we might have files outside the narrowspec
443 # one case where we might have files outside the narrowspec
441 # at this point is merges, and we already error out in the
444 # at this point is merges, and we already error out in the
442 # case where the merge has files outside of the narrowspec,
445 # case where the merge has files outside of the narrowspec,
443 # so this is safe.
446 # so this is safe.
444 mn = mctx.write(
447 mn = mctx.write(
445 tr,
448 tr,
446 linkrev,
449 linkrev,
447 p1.manifestnode(),
450 p1.manifestnode(),
448 p2.manifestnode(),
451 p2.manifestnode(),
449 added,
452 added,
450 drop,
453 drop,
451 match=repo.narrowmatch(),
454 match=repo.narrowmatch(),
452 )
455 )
453 else:
456 else:
454 repo.ui.debug(
457 repo.ui.debug(
455 b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
458 b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
456 )
459 )
457 mn = p1.manifestnode()
460 mn = p1.manifestnode()
458
461
459 return mn
462 return mn
460
463
461
464
462 def _extra_with_copies(repo, extra, files):
465 def _extra_with_copies(repo, extra, files):
463 """encode copy information into a `extra` dictionnary"""
466 """encode copy information into a `extra` dictionnary"""
464 p1copies = files.copied_from_p1
467 p1copies = files.copied_from_p1
465 p2copies = files.copied_from_p2
468 p2copies = files.copied_from_p2
466 filesadded = files.added
469 filesadded = files.added
467 filesremoved = files.removed
470 filesremoved = files.removed
468 files = sorted(files.touched)
471 files = sorted(files.touched)
469 if not _write_copy_meta(repo)[1]:
472 if not _write_copy_meta(repo)[1]:
470 # If writing only to changeset extras, use None to indicate that
473 # If writing only to changeset extras, use None to indicate that
471 # no entry should be written. If writing to both, write an empty
474 # no entry should be written. If writing to both, write an empty
472 # entry to prevent the reader from falling back to reading
475 # entry to prevent the reader from falling back to reading
473 # filelogs.
476 # filelogs.
474 p1copies = p1copies or None
477 p1copies = p1copies or None
475 p2copies = p2copies or None
478 p2copies = p2copies or None
476 filesadded = filesadded or None
479 filesadded = filesadded or None
477 filesremoved = filesremoved or None
480 filesremoved = filesremoved or None
478
481
479 extrasentries = p1copies, p2copies, filesadded, filesremoved
482 extrasentries = p1copies, p2copies, filesadded, filesremoved
480 if extra is None and any(x is not None for x in extrasentries):
483 if extra is None and any(x is not None for x in extrasentries):
481 extra = {}
484 extra = {}
482 if p1copies is not None:
485 if p1copies is not None:
483 p1copies = metadata.encodecopies(files, p1copies)
486 p1copies = metadata.encodecopies(files, p1copies)
484 extra[b'p1copies'] = p1copies
487 extra[b'p1copies'] = p1copies
485 if p2copies is not None:
488 if p2copies is not None:
486 p2copies = metadata.encodecopies(files, p2copies)
489 p2copies = metadata.encodecopies(files, p2copies)
487 extra[b'p2copies'] = p2copies
490 extra[b'p2copies'] = p2copies
488 if filesadded is not None:
491 if filesadded is not None:
489 filesadded = metadata.encodefileindices(files, filesadded)
492 filesadded = metadata.encodefileindices(files, filesadded)
490 extra[b'filesadded'] = filesadded
493 extra[b'filesadded'] = filesadded
491 if filesremoved is not None:
494 if filesremoved is not None:
492 filesremoved = metadata.encodefileindices(files, filesremoved)
495 filesremoved = metadata.encodefileindices(files, filesremoved)
493 extra[b'filesremoved'] = filesremoved
496 extra[b'filesremoved'] = filesremoved
494 return extra
497 return extra
@@ -1,796 +1,798 b''
1 # exchangev2.py - repository exchange for wire protocol version 2
1 # exchangev2.py - repository exchange for wire protocol version 2
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import weakref
11 import weakref
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18 from . import (
18 from . import (
19 bookmarks,
19 bookmarks,
20 error,
20 error,
21 mdiff,
21 mdiff,
22 narrowspec,
22 narrowspec,
23 phases,
23 phases,
24 pycompat,
24 pycompat,
25 setdiscovery,
25 setdiscovery,
26 )
26 )
27 from .interfaces import repository
27 from .interfaces import repository
28
28
29
29
30 def pull(pullop):
30 def pull(pullop):
31 """Pull using wire protocol version 2."""
31 """Pull using wire protocol version 2."""
32 repo = pullop.repo
32 repo = pullop.repo
33 remote = pullop.remote
33 remote = pullop.remote
34
34
35 usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop)
35 usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop)
36
36
37 # If this is a clone and it was requested to perform a "stream clone",
37 # If this is a clone and it was requested to perform a "stream clone",
38 # we obtain the raw files data from the remote then fall back to an
38 # we obtain the raw files data from the remote then fall back to an
39 # incremental pull. This is somewhat hacky and is not nearly robust enough
39 # incremental pull. This is somewhat hacky and is not nearly robust enough
40 # for long-term usage.
40 # for long-term usage.
41 if usingrawchangelogandmanifest:
41 if usingrawchangelogandmanifest:
42 with repo.transaction(b'clone'):
42 with repo.transaction(b'clone'):
43 _fetchrawstorefiles(repo, remote)
43 _fetchrawstorefiles(repo, remote)
44 repo.invalidate(clearfilecache=True)
44 repo.invalidate(clearfilecache=True)
45
45
46 tr = pullop.trmanager.transaction()
46 tr = pullop.trmanager.transaction()
47
47
48 # We don't use the repo's narrow matcher here because the patterns passed
48 # We don't use the repo's narrow matcher here because the patterns passed
49 # to exchange.pull() could be different.
49 # to exchange.pull() could be different.
50 narrowmatcher = narrowspec.match(
50 narrowmatcher = narrowspec.match(
51 repo.root,
51 repo.root,
52 # Empty maps to nevermatcher. So always
52 # Empty maps to nevermatcher. So always
53 # set includes if missing.
53 # set includes if missing.
54 pullop.includepats or {b'path:.'},
54 pullop.includepats or {b'path:.'},
55 pullop.excludepats,
55 pullop.excludepats,
56 )
56 )
57
57
58 if pullop.includepats or pullop.excludepats:
58 if pullop.includepats or pullop.excludepats:
59 pathfilter = {}
59 pathfilter = {}
60 if pullop.includepats:
60 if pullop.includepats:
61 pathfilter[b'include'] = sorted(pullop.includepats)
61 pathfilter[b'include'] = sorted(pullop.includepats)
62 if pullop.excludepats:
62 if pullop.excludepats:
63 pathfilter[b'exclude'] = sorted(pullop.excludepats)
63 pathfilter[b'exclude'] = sorted(pullop.excludepats)
64 else:
64 else:
65 pathfilter = None
65 pathfilter = None
66
66
67 # Figure out what needs to be fetched.
67 # Figure out what needs to be fetched.
68 common, fetch, remoteheads = _pullchangesetdiscovery(
68 common, fetch, remoteheads = _pullchangesetdiscovery(
69 repo, remote, pullop.heads, abortwhenunrelated=pullop.force
69 repo, remote, pullop.heads, abortwhenunrelated=pullop.force
70 )
70 )
71
71
72 # And fetch the data.
72 # And fetch the data.
73 pullheads = pullop.heads or remoteheads
73 pullheads = pullop.heads or remoteheads
74 csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads)
74 csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads)
75
75
76 # New revisions are written to the changelog. But all other updates
76 # New revisions are written to the changelog. But all other updates
77 # are deferred. Do those now.
77 # are deferred. Do those now.
78
78
79 # Ensure all new changesets are draft by default. If the repo is
79 # Ensure all new changesets are draft by default. If the repo is
80 # publishing, the phase will be adjusted by the loop below.
80 # publishing, the phase will be adjusted by the loop below.
81 if csetres[b'added']:
81 if csetres[b'added']:
82 phases.registernew(
82 phases.registernew(
83 repo, tr, phases.draft, [repo[n].rev() for n in csetres[b'added']]
83 repo, tr, phases.draft, [repo[n].rev() for n in csetres[b'added']]
84 )
84 )
85
85
86 # And adjust the phase of all changesets accordingly.
86 # And adjust the phase of all changesets accordingly.
87 for phasenumber, phase in phases.phasenames.items():
87 for phasenumber, phase in phases.phasenames.items():
88 if phase == b'secret' or not csetres[b'nodesbyphase'][phase]:
88 if phase == b'secret' or not csetres[b'nodesbyphase'][phase]:
89 continue
89 continue
90
90
91 phases.advanceboundary(
91 phases.advanceboundary(
92 repo,
92 repo,
93 tr,
93 tr,
94 phasenumber,
94 phasenumber,
95 csetres[b'nodesbyphase'][phase],
95 csetres[b'nodesbyphase'][phase],
96 )
96 )
97
97
98 # Write bookmark updates.
98 # Write bookmark updates.
99 bookmarks.updatefromremote(
99 bookmarks.updatefromremote(
100 repo.ui,
100 repo.ui,
101 repo,
101 repo,
102 csetres[b'bookmarks'],
102 csetres[b'bookmarks'],
103 remote.url(),
103 remote.url(),
104 pullop.gettransaction,
104 pullop.gettransaction,
105 explicit=pullop.explicitbookmarks,
105 explicit=pullop.explicitbookmarks,
106 )
106 )
107
107
108 manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes'])
108 manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes'])
109
109
110 # We don't properly support shallow changeset and manifest yet. So we apply
110 # We don't properly support shallow changeset and manifest yet. So we apply
111 # depth limiting locally.
111 # depth limiting locally.
112 if pullop.depth:
112 if pullop.depth:
113 relevantcsetnodes = set()
113 relevantcsetnodes = set()
114 clnode = repo.changelog.node
114 clnode = repo.changelog.node
115
115
116 for rev in repo.revs(
116 for rev in repo.revs(
117 b'ancestors(%ln, %s)', pullheads, pullop.depth - 1
117 b'ancestors(%ln, %s)', pullheads, pullop.depth - 1
118 ):
118 ):
119 relevantcsetnodes.add(clnode(rev))
119 relevantcsetnodes.add(clnode(rev))
120
120
121 csetrelevantfilter = lambda n: n in relevantcsetnodes
121 csetrelevantfilter = lambda n: n in relevantcsetnodes
122
122
123 else:
123 else:
124 csetrelevantfilter = lambda n: True
124 csetrelevantfilter = lambda n: True
125
125
126 # If obtaining the raw store files, we need to scan the full repo to
126 # If obtaining the raw store files, we need to scan the full repo to
127 # derive all the changesets, manifests, and linkrevs.
127 # derive all the changesets, manifests, and linkrevs.
128 if usingrawchangelogandmanifest:
128 if usingrawchangelogandmanifest:
129 csetsforfiles = []
129 csetsforfiles = []
130 mnodesforfiles = []
130 mnodesforfiles = []
131 manifestlinkrevs = {}
131 manifestlinkrevs = {}
132
132
133 for rev in repo:
133 for rev in repo:
134 ctx = repo[rev]
134 ctx = repo[rev]
135 node = ctx.node()
135 node = ctx.node()
136
136
137 if not csetrelevantfilter(node):
137 if not csetrelevantfilter(node):
138 continue
138 continue
139
139
140 mnode = ctx.manifestnode()
140 mnode = ctx.manifestnode()
141
141
142 csetsforfiles.append(node)
142 csetsforfiles.append(node)
143 mnodesforfiles.append(mnode)
143 mnodesforfiles.append(mnode)
144 manifestlinkrevs[mnode] = rev
144 manifestlinkrevs[mnode] = rev
145
145
146 else:
146 else:
147 csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)]
147 csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)]
148 mnodesforfiles = manres[b'added']
148 mnodesforfiles = manres[b'added']
149 manifestlinkrevs = manres[b'linkrevs']
149 manifestlinkrevs = manres[b'linkrevs']
150
150
151 # Find all file nodes referenced by added manifests and fetch those
151 # Find all file nodes referenced by added manifests and fetch those
152 # revisions.
152 # revisions.
153 fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles)
153 fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles)
154 _fetchfilesfromcsets(
154 _fetchfilesfromcsets(
155 repo,
155 repo,
156 tr,
156 tr,
157 remote,
157 remote,
158 pathfilter,
158 pathfilter,
159 fnodes,
159 fnodes,
160 csetsforfiles,
160 csetsforfiles,
161 manifestlinkrevs,
161 manifestlinkrevs,
162 shallow=bool(pullop.depth),
162 shallow=bool(pullop.depth),
163 )
163 )
164
164
165
165
166 def _checkuserawstorefiledata(pullop):
166 def _checkuserawstorefiledata(pullop):
167 """Check whether we should use rawstorefiledata command to retrieve data."""
167 """Check whether we should use rawstorefiledata command to retrieve data."""
168
168
169 repo = pullop.repo
169 repo = pullop.repo
170 remote = pullop.remote
170 remote = pullop.remote
171
171
172 # Command to obtain raw store data isn't available.
172 # Command to obtain raw store data isn't available.
173 if b'rawstorefiledata' not in remote.apidescriptor[b'commands']:
173 if b'rawstorefiledata' not in remote.apidescriptor[b'commands']:
174 return False
174 return False
175
175
176 # Only honor if user requested stream clone operation.
176 # Only honor if user requested stream clone operation.
177 if not pullop.streamclonerequested:
177 if not pullop.streamclonerequested:
178 return False
178 return False
179
179
180 # Only works on empty repos.
180 # Only works on empty repos.
181 if len(repo):
181 if len(repo):
182 return False
182 return False
183
183
184 # TODO This is super hacky. There needs to be a storage API for this. We
184 # TODO This is super hacky. There needs to be a storage API for this. We
185 # also need to check for compatibility with the remote.
185 # also need to check for compatibility with the remote.
186 if b'revlogv1' not in repo.requirements:
186 if b'revlogv1' not in repo.requirements:
187 return False
187 return False
188
188
189 return True
189 return True
190
190
191
191
192 def _fetchrawstorefiles(repo, remote):
192 def _fetchrawstorefiles(repo, remote):
193 with remote.commandexecutor() as e:
193 with remote.commandexecutor() as e:
194 objs = e.callcommand(
194 objs = e.callcommand(
195 b'rawstorefiledata',
195 b'rawstorefiledata',
196 {
196 {
197 b'files': [b'changelog', b'manifestlog'],
197 b'files': [b'changelog', b'manifestlog'],
198 },
198 },
199 ).result()
199 ).result()
200
200
201 # First object is a summary of files data that follows.
201 # First object is a summary of files data that follows.
202 overall = next(objs)
202 overall = next(objs)
203
203
204 progress = repo.ui.makeprogress(
204 progress = repo.ui.makeprogress(
205 _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes')
205 _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes')
206 )
206 )
207 with progress:
207 with progress:
208 progress.update(0)
208 progress.update(0)
209
209
210 # Next are pairs of file metadata, data.
210 # Next are pairs of file metadata, data.
211 while True:
211 while True:
212 try:
212 try:
213 filemeta = next(objs)
213 filemeta = next(objs)
214 except StopIteration:
214 except StopIteration:
215 break
215 break
216
216
217 for k in (b'location', b'path', b'size'):
217 for k in (b'location', b'path', b'size'):
218 if k not in filemeta:
218 if k not in filemeta:
219 raise error.Abort(
219 raise error.Abort(
220 _(b'remote file data missing key: %s') % k
220 _(b'remote file data missing key: %s') % k
221 )
221 )
222
222
223 if filemeta[b'location'] == b'store':
223 if filemeta[b'location'] == b'store':
224 vfs = repo.svfs
224 vfs = repo.svfs
225 else:
225 else:
226 raise error.Abort(
226 raise error.Abort(
227 _(b'invalid location for raw file data: %s')
227 _(b'invalid location for raw file data: %s')
228 % filemeta[b'location']
228 % filemeta[b'location']
229 )
229 )
230
230
231 bytesremaining = filemeta[b'size']
231 bytesremaining = filemeta[b'size']
232
232
233 with vfs.open(filemeta[b'path'], b'wb') as fh:
233 with vfs.open(filemeta[b'path'], b'wb') as fh:
234 while True:
234 while True:
235 try:
235 try:
236 chunk = next(objs)
236 chunk = next(objs)
237 except StopIteration:
237 except StopIteration:
238 break
238 break
239
239
240 bytesremaining -= len(chunk)
240 bytesremaining -= len(chunk)
241
241
242 if bytesremaining < 0:
242 if bytesremaining < 0:
243 raise error.Abort(
243 raise error.Abort(
244 _(
244 _(
245 b'received invalid number of bytes for file '
245 b'received invalid number of bytes for file '
246 b'data; expected %d, got extra'
246 b'data; expected %d, got extra'
247 )
247 )
248 % filemeta[b'size']
248 % filemeta[b'size']
249 )
249 )
250
250
251 progress.increment(step=len(chunk))
251 progress.increment(step=len(chunk))
252 fh.write(chunk)
252 fh.write(chunk)
253
253
254 try:
254 try:
255 if chunk.islast:
255 if chunk.islast:
256 break
256 break
257 except AttributeError:
257 except AttributeError:
258 raise error.Abort(
258 raise error.Abort(
259 _(
259 _(
260 b'did not receive indefinite length bytestring '
260 b'did not receive indefinite length bytestring '
261 b'for file data'
261 b'for file data'
262 )
262 )
263 )
263 )
264
264
265 if bytesremaining:
265 if bytesremaining:
266 raise error.Abort(
266 raise error.Abort(
267 _(
267 _(
268 b'received invalid number of bytes for'
268 b'received invalid number of bytes for'
269 b'file data; expected %d got %d'
269 b'file data; expected %d got %d'
270 )
270 )
271 % (
271 % (
272 filemeta[b'size'],
272 filemeta[b'size'],
273 filemeta[b'size'] - bytesremaining,
273 filemeta[b'size'] - bytesremaining,
274 )
274 )
275 )
275 )
276
276
277
277
278 def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):
278 def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):
279 """Determine which changesets need to be pulled."""
279 """Determine which changesets need to be pulled."""
280
280
281 if heads:
281 if heads:
282 knownnode = repo.changelog.hasnode
282 knownnode = repo.changelog.hasnode
283 if all(knownnode(head) for head in heads):
283 if all(knownnode(head) for head in heads):
284 return heads, False, heads
284 return heads, False, heads
285
285
286 # TODO wire protocol version 2 is capable of more efficient discovery
286 # TODO wire protocol version 2 is capable of more efficient discovery
287 # than setdiscovery. Consider implementing something better.
287 # than setdiscovery. Consider implementing something better.
288 common, fetch, remoteheads = setdiscovery.findcommonheads(
288 common, fetch, remoteheads = setdiscovery.findcommonheads(
289 repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated
289 repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated
290 )
290 )
291
291
292 common = set(common)
292 common = set(common)
293 remoteheads = set(remoteheads)
293 remoteheads = set(remoteheads)
294
294
295 # If a remote head is filtered locally, put it back in the common set.
295 # If a remote head is filtered locally, put it back in the common set.
296 # See the comment in exchange._pulldiscoverychangegroup() for more.
296 # See the comment in exchange._pulldiscoverychangegroup() for more.
297
297
298 if fetch and remoteheads:
298 if fetch and remoteheads:
299 has_node = repo.unfiltered().changelog.index.has_node
299 has_node = repo.unfiltered().changelog.index.has_node
300
300
301 common |= {head for head in remoteheads if has_node(head)}
301 common |= {head for head in remoteheads if has_node(head)}
302
302
303 if set(remoteheads).issubset(common):
303 if set(remoteheads).issubset(common):
304 fetch = []
304 fetch = []
305
305
306 common.discard(nullid)
306 common.discard(nullid)
307
307
308 return common, fetch, remoteheads
308 return common, fetch, remoteheads
309
309
310
310
311 def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads):
311 def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads):
312 # TODO consider adding a step here where we obtain the DAG shape first
312 # TODO consider adding a step here where we obtain the DAG shape first
313 # (or ask the server to slice changesets into chunks for us) so that
313 # (or ask the server to slice changesets into chunks for us) so that
314 # we can perform multiple fetches in batches. This will facilitate
314 # we can perform multiple fetches in batches. This will facilitate
315 # resuming interrupted clones, higher server-side cache hit rates due
315 # resuming interrupted clones, higher server-side cache hit rates due
316 # to smaller segments, etc.
316 # to smaller segments, etc.
317 with remote.commandexecutor() as e:
317 with remote.commandexecutor() as e:
318 objs = e.callcommand(
318 objs = e.callcommand(
319 b'changesetdata',
319 b'changesetdata',
320 {
320 {
321 b'revisions': [
321 b'revisions': [
322 {
322 {
323 b'type': b'changesetdagrange',
323 b'type': b'changesetdagrange',
324 b'roots': sorted(common),
324 b'roots': sorted(common),
325 b'heads': sorted(remoteheads),
325 b'heads': sorted(remoteheads),
326 }
326 }
327 ],
327 ],
328 b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
328 b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
329 },
329 },
330 ).result()
330 ).result()
331
331
332 # The context manager waits on all response data when exiting. So
332 # The context manager waits on all response data when exiting. So
333 # we need to remain in the context manager in order to stream data.
333 # we need to remain in the context manager in order to stream data.
334 return _processchangesetdata(repo, tr, objs)
334 return _processchangesetdata(repo, tr, objs)
335
335
336
336
337 def _processchangesetdata(repo, tr, objs):
337 def _processchangesetdata(repo, tr, objs):
338 repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs))
338 repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs))
339
339
340 urepo = repo.unfiltered()
340 urepo = repo.unfiltered()
341 cl = urepo.changelog
341 cl = urepo.changelog
342
342
343 cl.delayupdate(tr)
343 cl.delayupdate(tr)
344
344
345 # The first emitted object is a header describing the data that
345 # The first emitted object is a header describing the data that
346 # follows.
346 # follows.
347 meta = next(objs)
347 meta = next(objs)
348
348
349 progress = repo.ui.makeprogress(
349 progress = repo.ui.makeprogress(
350 _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems')
350 _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems')
351 )
351 )
352
352
353 manifestnodes = {}
353 manifestnodes = {}
354 added = []
354 added = []
355
355
356 def linkrev(node):
356 def linkrev(node):
357 repo.ui.debug(b'add changeset %s\n' % short(node))
357 repo.ui.debug(b'add changeset %s\n' % short(node))
358 # Linkrev for changelog is always self.
358 # Linkrev for changelog is always self.
359 return len(cl)
359 return len(cl)
360
360
361 def ondupchangeset(cl, node):
361 def ondupchangeset(cl, node):
362 added.append(node)
362 added.append(node)
363
363
364 def onchangeset(cl, node):
364 def onchangeset(cl, node):
365 progress.increment()
365 progress.increment()
366
366
367 rev = cl.rev(node)
367 rev = cl.rev(node)
368 revision = cl.changelogrevision(rev)
368 revision = cl.changelogrevision(rev)
369 added.append(node)
369 added.append(node)
370
370
371 # We need to preserve the mapping of changelog revision to node
371 # We need to preserve the mapping of changelog revision to node
372 # so we can set the linkrev accordingly when manifests are added.
372 # so we can set the linkrev accordingly when manifests are added.
373 manifestnodes[rev] = revision.manifest
373 manifestnodes[rev] = revision.manifest
374
374
375 repo.register_changeset(rev, revision)
376
375 nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
377 nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
376 remotebookmarks = {}
378 remotebookmarks = {}
377
379
378 # addgroup() expects a 7-tuple describing revisions. This normalizes
380 # addgroup() expects a 7-tuple describing revisions. This normalizes
379 # the wire data to that format.
381 # the wire data to that format.
380 #
382 #
381 # This loop also aggregates non-revision metadata, such as phase
383 # This loop also aggregates non-revision metadata, such as phase
382 # data.
384 # data.
383 def iterrevisions():
385 def iterrevisions():
384 for cset in objs:
386 for cset in objs:
385 node = cset[b'node']
387 node = cset[b'node']
386
388
387 if b'phase' in cset:
389 if b'phase' in cset:
388 nodesbyphase[cset[b'phase']].add(node)
390 nodesbyphase[cset[b'phase']].add(node)
389
391
390 for mark in cset.get(b'bookmarks', []):
392 for mark in cset.get(b'bookmarks', []):
391 remotebookmarks[mark] = node
393 remotebookmarks[mark] = node
392
394
393 # TODO add mechanism for extensions to examine records so they
395 # TODO add mechanism for extensions to examine records so they
394 # can siphon off custom data fields.
396 # can siphon off custom data fields.
395
397
396 extrafields = {}
398 extrafields = {}
397
399
398 for field, size in cset.get(b'fieldsfollowing', []):
400 for field, size in cset.get(b'fieldsfollowing', []):
399 extrafields[field] = next(objs)
401 extrafields[field] = next(objs)
400
402
401 # Some entries might only be metadata only updates.
403 # Some entries might only be metadata only updates.
402 if b'revision' not in extrafields:
404 if b'revision' not in extrafields:
403 continue
405 continue
404
406
405 data = extrafields[b'revision']
407 data = extrafields[b'revision']
406
408
407 yield (
409 yield (
408 node,
410 node,
409 cset[b'parents'][0],
411 cset[b'parents'][0],
410 cset[b'parents'][1],
412 cset[b'parents'][1],
411 # Linknode is always itself for changesets.
413 # Linknode is always itself for changesets.
412 cset[b'node'],
414 cset[b'node'],
413 # We always send full revisions. So delta base is not set.
415 # We always send full revisions. So delta base is not set.
414 nullid,
416 nullid,
415 mdiff.trivialdiffheader(len(data)) + data,
417 mdiff.trivialdiffheader(len(data)) + data,
416 # Flags not yet supported.
418 # Flags not yet supported.
417 0,
419 0,
418 )
420 )
419
421
420 cl.addgroup(
422 cl.addgroup(
421 iterrevisions(),
423 iterrevisions(),
422 linkrev,
424 linkrev,
423 weakref.proxy(tr),
425 weakref.proxy(tr),
424 addrevisioncb=onchangeset,
426 addrevisioncb=onchangeset,
425 duplicaterevisioncb=ondupchangeset,
427 duplicaterevisioncb=ondupchangeset,
426 )
428 )
427
429
428 progress.complete()
430 progress.complete()
429
431
430 return {
432 return {
431 b'added': added,
433 b'added': added,
432 b'nodesbyphase': nodesbyphase,
434 b'nodesbyphase': nodesbyphase,
433 b'bookmarks': remotebookmarks,
435 b'bookmarks': remotebookmarks,
434 b'manifestnodes': manifestnodes,
436 b'manifestnodes': manifestnodes,
435 }
437 }
436
438
437
439
438 def _fetchmanifests(repo, tr, remote, manifestnodes):
440 def _fetchmanifests(repo, tr, remote, manifestnodes):
439 rootmanifest = repo.manifestlog.getstorage(b'')
441 rootmanifest = repo.manifestlog.getstorage(b'')
440
442
441 # Some manifests can be shared between changesets. Filter out revisions
443 # Some manifests can be shared between changesets. Filter out revisions
442 # we already know about.
444 # we already know about.
443 fetchnodes = []
445 fetchnodes = []
444 linkrevs = {}
446 linkrevs = {}
445 seen = set()
447 seen = set()
446
448
447 for clrev, node in sorted(pycompat.iteritems(manifestnodes)):
449 for clrev, node in sorted(pycompat.iteritems(manifestnodes)):
448 if node in seen:
450 if node in seen:
449 continue
451 continue
450
452
451 try:
453 try:
452 rootmanifest.rev(node)
454 rootmanifest.rev(node)
453 except error.LookupError:
455 except error.LookupError:
454 fetchnodes.append(node)
456 fetchnodes.append(node)
455 linkrevs[node] = clrev
457 linkrevs[node] = clrev
456
458
457 seen.add(node)
459 seen.add(node)
458
460
459 # TODO handle tree manifests
461 # TODO handle tree manifests
460
462
461 # addgroup() expects 7-tuple describing revisions. This normalizes
463 # addgroup() expects 7-tuple describing revisions. This normalizes
462 # the wire data to that format.
464 # the wire data to that format.
463 def iterrevisions(objs, progress):
465 def iterrevisions(objs, progress):
464 for manifest in objs:
466 for manifest in objs:
465 node = manifest[b'node']
467 node = manifest[b'node']
466
468
467 extrafields = {}
469 extrafields = {}
468
470
469 for field, size in manifest.get(b'fieldsfollowing', []):
471 for field, size in manifest.get(b'fieldsfollowing', []):
470 extrafields[field] = next(objs)
472 extrafields[field] = next(objs)
471
473
472 if b'delta' in extrafields:
474 if b'delta' in extrafields:
473 basenode = manifest[b'deltabasenode']
475 basenode = manifest[b'deltabasenode']
474 delta = extrafields[b'delta']
476 delta = extrafields[b'delta']
475 elif b'revision' in extrafields:
477 elif b'revision' in extrafields:
476 basenode = nullid
478 basenode = nullid
477 revision = extrafields[b'revision']
479 revision = extrafields[b'revision']
478 delta = mdiff.trivialdiffheader(len(revision)) + revision
480 delta = mdiff.trivialdiffheader(len(revision)) + revision
479 else:
481 else:
480 continue
482 continue
481
483
482 yield (
484 yield (
483 node,
485 node,
484 manifest[b'parents'][0],
486 manifest[b'parents'][0],
485 manifest[b'parents'][1],
487 manifest[b'parents'][1],
486 # The value passed in is passed to the lookup function passed
488 # The value passed in is passed to the lookup function passed
487 # to addgroup(). We already have a map of manifest node to
489 # to addgroup(). We already have a map of manifest node to
488 # changelog revision number. So we just pass in the
490 # changelog revision number. So we just pass in the
489 # manifest node here and use linkrevs.__getitem__ as the
491 # manifest node here and use linkrevs.__getitem__ as the
490 # resolution function.
492 # resolution function.
491 node,
493 node,
492 basenode,
494 basenode,
493 delta,
495 delta,
494 # Flags not yet supported.
496 # Flags not yet supported.
495 0,
497 0,
496 )
498 )
497
499
498 progress.increment()
500 progress.increment()
499
501
500 progress = repo.ui.makeprogress(
502 progress = repo.ui.makeprogress(
501 _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes)
503 _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes)
502 )
504 )
503
505
504 commandmeta = remote.apidescriptor[b'commands'][b'manifestdata']
506 commandmeta = remote.apidescriptor[b'commands'][b'manifestdata']
505 batchsize = commandmeta.get(b'recommendedbatchsize', 10000)
507 batchsize = commandmeta.get(b'recommendedbatchsize', 10000)
506 # TODO make size configurable on client?
508 # TODO make size configurable on client?
507
509
508 # We send commands 1 at a time to the remote. This is not the most
510 # We send commands 1 at a time to the remote. This is not the most
509 # efficient because we incur a round trip at the end of each batch.
511 # efficient because we incur a round trip at the end of each batch.
510 # However, the existing frame-based reactor keeps consuming server
512 # However, the existing frame-based reactor keeps consuming server
511 # data in the background. And this results in response data buffering
513 # data in the background. And this results in response data buffering
512 # in memory. This can consume gigabytes of memory.
514 # in memory. This can consume gigabytes of memory.
513 # TODO send multiple commands in a request once background buffering
515 # TODO send multiple commands in a request once background buffering
514 # issues are resolved.
516 # issues are resolved.
515
517
516 added = []
518 added = []
517
519
518 for i in pycompat.xrange(0, len(fetchnodes), batchsize):
520 for i in pycompat.xrange(0, len(fetchnodes), batchsize):
519 batch = [node for node in fetchnodes[i : i + batchsize]]
521 batch = [node for node in fetchnodes[i : i + batchsize]]
520 if not batch:
522 if not batch:
521 continue
523 continue
522
524
523 with remote.commandexecutor() as e:
525 with remote.commandexecutor() as e:
524 objs = e.callcommand(
526 objs = e.callcommand(
525 b'manifestdata',
527 b'manifestdata',
526 {
528 {
527 b'tree': b'',
529 b'tree': b'',
528 b'nodes': batch,
530 b'nodes': batch,
529 b'fields': {b'parents', b'revision'},
531 b'fields': {b'parents', b'revision'},
530 b'haveparents': True,
532 b'haveparents': True,
531 },
533 },
532 ).result()
534 ).result()
533
535
534 # Chomp off header object.
536 # Chomp off header object.
535 next(objs)
537 next(objs)
536
538
537 def onchangeset(cl, node):
539 def onchangeset(cl, node):
538 added.append(node)
540 added.append(node)
539
541
540 rootmanifest.addgroup(
542 rootmanifest.addgroup(
541 iterrevisions(objs, progress),
543 iterrevisions(objs, progress),
542 linkrevs.__getitem__,
544 linkrevs.__getitem__,
543 weakref.proxy(tr),
545 weakref.proxy(tr),
544 addrevisioncb=onchangeset,
546 addrevisioncb=onchangeset,
545 duplicaterevisioncb=onchangeset,
547 duplicaterevisioncb=onchangeset,
546 )
548 )
547
549
548 progress.complete()
550 progress.complete()
549
551
550 return {
552 return {
551 b'added': added,
553 b'added': added,
552 b'linkrevs': linkrevs,
554 b'linkrevs': linkrevs,
553 }
555 }
554
556
555
557
556 def _derivefilesfrommanifests(repo, matcher, manifestnodes):
558 def _derivefilesfrommanifests(repo, matcher, manifestnodes):
557 """Determine what file nodes are relevant given a set of manifest nodes.
559 """Determine what file nodes are relevant given a set of manifest nodes.
558
560
559 Returns a dict mapping file paths to dicts of file node to first manifest
561 Returns a dict mapping file paths to dicts of file node to first manifest
560 node.
562 node.
561 """
563 """
562 ml = repo.manifestlog
564 ml = repo.manifestlog
563 fnodes = collections.defaultdict(dict)
565 fnodes = collections.defaultdict(dict)
564
566
565 progress = repo.ui.makeprogress(
567 progress = repo.ui.makeprogress(
566 _(b'scanning manifests'), total=len(manifestnodes)
568 _(b'scanning manifests'), total=len(manifestnodes)
567 )
569 )
568
570
569 with progress:
571 with progress:
570 for manifestnode in manifestnodes:
572 for manifestnode in manifestnodes:
571 m = ml.get(b'', manifestnode)
573 m = ml.get(b'', manifestnode)
572
574
573 # TODO this will pull in unwanted nodes because it takes the storage
575 # TODO this will pull in unwanted nodes because it takes the storage
574 # delta into consideration. What we really want is something that
576 # delta into consideration. What we really want is something that
575 # takes the delta between the manifest's parents. And ideally we
577 # takes the delta between the manifest's parents. And ideally we
576 # would ignore file nodes that are known locally. For now, ignore
578 # would ignore file nodes that are known locally. For now, ignore
577 # both these limitations. This will result in incremental fetches
579 # both these limitations. This will result in incremental fetches
578 # requesting data we already have. So this is far from ideal.
580 # requesting data we already have. So this is far from ideal.
579 md = m.readfast()
581 md = m.readfast()
580
582
581 for path, fnode in md.items():
583 for path, fnode in md.items():
582 if matcher(path):
584 if matcher(path):
583 fnodes[path].setdefault(fnode, manifestnode)
585 fnodes[path].setdefault(fnode, manifestnode)
584
586
585 progress.increment()
587 progress.increment()
586
588
587 return fnodes
589 return fnodes
588
590
589
591
590 def _fetchfiles(repo, tr, remote, fnodes, linkrevs):
592 def _fetchfiles(repo, tr, remote, fnodes, linkrevs):
591 """Fetch file data from explicit file revisions."""
593 """Fetch file data from explicit file revisions."""
592
594
593 def iterrevisions(objs, progress):
595 def iterrevisions(objs, progress):
594 for filerevision in objs:
596 for filerevision in objs:
595 node = filerevision[b'node']
597 node = filerevision[b'node']
596
598
597 extrafields = {}
599 extrafields = {}
598
600
599 for field, size in filerevision.get(b'fieldsfollowing', []):
601 for field, size in filerevision.get(b'fieldsfollowing', []):
600 extrafields[field] = next(objs)
602 extrafields[field] = next(objs)
601
603
602 if b'delta' in extrafields:
604 if b'delta' in extrafields:
603 basenode = filerevision[b'deltabasenode']
605 basenode = filerevision[b'deltabasenode']
604 delta = extrafields[b'delta']
606 delta = extrafields[b'delta']
605 elif b'revision' in extrafields:
607 elif b'revision' in extrafields:
606 basenode = nullid
608 basenode = nullid
607 revision = extrafields[b'revision']
609 revision = extrafields[b'revision']
608 delta = mdiff.trivialdiffheader(len(revision)) + revision
610 delta = mdiff.trivialdiffheader(len(revision)) + revision
609 else:
611 else:
610 continue
612 continue
611
613
612 yield (
614 yield (
613 node,
615 node,
614 filerevision[b'parents'][0],
616 filerevision[b'parents'][0],
615 filerevision[b'parents'][1],
617 filerevision[b'parents'][1],
616 node,
618 node,
617 basenode,
619 basenode,
618 delta,
620 delta,
619 # Flags not yet supported.
621 # Flags not yet supported.
620 0,
622 0,
621 )
623 )
622
624
623 progress.increment()
625 progress.increment()
624
626
625 progress = repo.ui.makeprogress(
627 progress = repo.ui.makeprogress(
626 _(b'files'),
628 _(b'files'),
627 unit=_(b'chunks'),
629 unit=_(b'chunks'),
628 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
630 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
629 )
631 )
630
632
631 # TODO make batch size configurable
633 # TODO make batch size configurable
632 batchsize = 10000
634 batchsize = 10000
633 fnodeslist = [x for x in sorted(fnodes.items())]
635 fnodeslist = [x for x in sorted(fnodes.items())]
634
636
635 for i in pycompat.xrange(0, len(fnodeslist), batchsize):
637 for i in pycompat.xrange(0, len(fnodeslist), batchsize):
636 batch = [x for x in fnodeslist[i : i + batchsize]]
638 batch = [x for x in fnodeslist[i : i + batchsize]]
637 if not batch:
639 if not batch:
638 continue
640 continue
639
641
640 with remote.commandexecutor() as e:
642 with remote.commandexecutor() as e:
641 fs = []
643 fs = []
642 locallinkrevs = {}
644 locallinkrevs = {}
643
645
644 for path, nodes in batch:
646 for path, nodes in batch:
645 fs.append(
647 fs.append(
646 (
648 (
647 path,
649 path,
648 e.callcommand(
650 e.callcommand(
649 b'filedata',
651 b'filedata',
650 {
652 {
651 b'path': path,
653 b'path': path,
652 b'nodes': sorted(nodes),
654 b'nodes': sorted(nodes),
653 b'fields': {b'parents', b'revision'},
655 b'fields': {b'parents', b'revision'},
654 b'haveparents': True,
656 b'haveparents': True,
655 },
657 },
656 ),
658 ),
657 )
659 )
658 )
660 )
659
661
660 locallinkrevs[path] = {
662 locallinkrevs[path] = {
661 node: linkrevs[manifestnode]
663 node: linkrevs[manifestnode]
662 for node, manifestnode in pycompat.iteritems(nodes)
664 for node, manifestnode in pycompat.iteritems(nodes)
663 }
665 }
664
666
665 for path, f in fs:
667 for path, f in fs:
666 objs = f.result()
668 objs = f.result()
667
669
668 # Chomp off header objects.
670 # Chomp off header objects.
669 next(objs)
671 next(objs)
670
672
671 store = repo.file(path)
673 store = repo.file(path)
672 store.addgroup(
674 store.addgroup(
673 iterrevisions(objs, progress),
675 iterrevisions(objs, progress),
674 locallinkrevs[path].__getitem__,
676 locallinkrevs[path].__getitem__,
675 weakref.proxy(tr),
677 weakref.proxy(tr),
676 )
678 )
677
679
678
680
679 def _fetchfilesfromcsets(
681 def _fetchfilesfromcsets(
680 repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False
682 repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False
681 ):
683 ):
682 """Fetch file data from explicit changeset revisions."""
684 """Fetch file data from explicit changeset revisions."""
683
685
684 def iterrevisions(objs, remaining, progress):
686 def iterrevisions(objs, remaining, progress):
685 while remaining:
687 while remaining:
686 filerevision = next(objs)
688 filerevision = next(objs)
687
689
688 node = filerevision[b'node']
690 node = filerevision[b'node']
689
691
690 extrafields = {}
692 extrafields = {}
691
693
692 for field, size in filerevision.get(b'fieldsfollowing', []):
694 for field, size in filerevision.get(b'fieldsfollowing', []):
693 extrafields[field] = next(objs)
695 extrafields[field] = next(objs)
694
696
695 if b'delta' in extrafields:
697 if b'delta' in extrafields:
696 basenode = filerevision[b'deltabasenode']
698 basenode = filerevision[b'deltabasenode']
697 delta = extrafields[b'delta']
699 delta = extrafields[b'delta']
698 elif b'revision' in extrafields:
700 elif b'revision' in extrafields:
699 basenode = nullid
701 basenode = nullid
700 revision = extrafields[b'revision']
702 revision = extrafields[b'revision']
701 delta = mdiff.trivialdiffheader(len(revision)) + revision
703 delta = mdiff.trivialdiffheader(len(revision)) + revision
702 else:
704 else:
703 continue
705 continue
704
706
705 if b'linknode' in filerevision:
707 if b'linknode' in filerevision:
706 linknode = filerevision[b'linknode']
708 linknode = filerevision[b'linknode']
707 else:
709 else:
708 linknode = node
710 linknode = node
709
711
710 yield (
712 yield (
711 node,
713 node,
712 filerevision[b'parents'][0],
714 filerevision[b'parents'][0],
713 filerevision[b'parents'][1],
715 filerevision[b'parents'][1],
714 linknode,
716 linknode,
715 basenode,
717 basenode,
716 delta,
718 delta,
717 # Flags not yet supported.
719 # Flags not yet supported.
718 0,
720 0,
719 )
721 )
720
722
721 progress.increment()
723 progress.increment()
722 remaining -= 1
724 remaining -= 1
723
725
724 progress = repo.ui.makeprogress(
726 progress = repo.ui.makeprogress(
725 _(b'files'),
727 _(b'files'),
726 unit=_(b'chunks'),
728 unit=_(b'chunks'),
727 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
729 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
728 )
730 )
729
731
730 commandmeta = remote.apidescriptor[b'commands'][b'filesdata']
732 commandmeta = remote.apidescriptor[b'commands'][b'filesdata']
731 batchsize = commandmeta.get(b'recommendedbatchsize', 50000)
733 batchsize = commandmeta.get(b'recommendedbatchsize', 50000)
732
734
733 shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features
735 shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features
734 fields = {b'parents', b'revision'}
736 fields = {b'parents', b'revision'}
735 clrev = repo.changelog.rev
737 clrev = repo.changelog.rev
736
738
737 # There are no guarantees that we'll have ancestor revisions if
739 # There are no guarantees that we'll have ancestor revisions if
738 # a) this repo has shallow file storage b) shallow data fetching is enabled.
740 # a) this repo has shallow file storage b) shallow data fetching is enabled.
739 # Force remote to not delta against possibly unknown revisions when these
741 # Force remote to not delta against possibly unknown revisions when these
740 # conditions hold.
742 # conditions hold.
741 haveparents = not (shallowfiles or shallow)
743 haveparents = not (shallowfiles or shallow)
742
744
743 # Similarly, we may not have calculated linkrevs for all incoming file
745 # Similarly, we may not have calculated linkrevs for all incoming file
744 # revisions. Ask the remote to do work for us in this case.
746 # revisions. Ask the remote to do work for us in this case.
745 if not haveparents:
747 if not haveparents:
746 fields.add(b'linknode')
748 fields.add(b'linknode')
747
749
748 for i in pycompat.xrange(0, len(csets), batchsize):
750 for i in pycompat.xrange(0, len(csets), batchsize):
749 batch = [x for x in csets[i : i + batchsize]]
751 batch = [x for x in csets[i : i + batchsize]]
750 if not batch:
752 if not batch:
751 continue
753 continue
752
754
753 with remote.commandexecutor() as e:
755 with remote.commandexecutor() as e:
754 args = {
756 args = {
755 b'revisions': [
757 b'revisions': [
756 {
758 {
757 b'type': b'changesetexplicit',
759 b'type': b'changesetexplicit',
758 b'nodes': batch,
760 b'nodes': batch,
759 }
761 }
760 ],
762 ],
761 b'fields': fields,
763 b'fields': fields,
762 b'haveparents': haveparents,
764 b'haveparents': haveparents,
763 }
765 }
764
766
765 if pathfilter:
767 if pathfilter:
766 args[b'pathfilter'] = pathfilter
768 args[b'pathfilter'] = pathfilter
767
769
768 objs = e.callcommand(b'filesdata', args).result()
770 objs = e.callcommand(b'filesdata', args).result()
769
771
770 # First object is an overall header.
772 # First object is an overall header.
771 overall = next(objs)
773 overall = next(objs)
772
774
773 # We have overall['totalpaths'] segments.
775 # We have overall['totalpaths'] segments.
774 for i in pycompat.xrange(overall[b'totalpaths']):
776 for i in pycompat.xrange(overall[b'totalpaths']):
775 header = next(objs)
777 header = next(objs)
776
778
777 path = header[b'path']
779 path = header[b'path']
778 store = repo.file(path)
780 store = repo.file(path)
779
781
780 linkrevs = {
782 linkrevs = {
781 fnode: manlinkrevs[mnode]
783 fnode: manlinkrevs[mnode]
782 for fnode, mnode in pycompat.iteritems(fnodes[path])
784 for fnode, mnode in pycompat.iteritems(fnodes[path])
783 }
785 }
784
786
785 def getlinkrev(node):
787 def getlinkrev(node):
786 if node in linkrevs:
788 if node in linkrevs:
787 return linkrevs[node]
789 return linkrevs[node]
788 else:
790 else:
789 return clrev(node)
791 return clrev(node)
790
792
791 store.addgroup(
793 store.addgroup(
792 iterrevisions(objs, header[b'totalitems'], progress),
794 iterrevisions(objs, header[b'totalitems'], progress),
793 getlinkrev,
795 getlinkrev,
794 weakref.proxy(tr),
796 weakref.proxy(tr),
795 maybemissingparents=shallow,
797 maybemissingparents=shallow,
796 )
798 )
@@ -1,1979 +1,1987 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from ..i18n import _
10 from ..i18n import _
11 from .. import error
11 from .. import error
12 from . import util as interfaceutil
12 from . import util as interfaceutil
13
13
14 # Local repository feature string.
14 # Local repository feature string.
15
15
16 # Revlogs are being used for file storage.
16 # Revlogs are being used for file storage.
17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
17 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
18 # The storage part of the repository is shared from an external source.
18 # The storage part of the repository is shared from an external source.
19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
19 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
20 # LFS supported for backing file storage.
20 # LFS supported for backing file storage.
21 REPO_FEATURE_LFS = b'lfs'
21 REPO_FEATURE_LFS = b'lfs'
22 # Repository supports being stream cloned.
22 # Repository supports being stream cloned.
23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
23 REPO_FEATURE_STREAM_CLONE = b'streamclone'
24 # Files storage may lack data for all ancestors.
24 # Files storage may lack data for all ancestors.
25 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
25 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
26
26
27 REVISION_FLAG_CENSORED = 1 << 15
27 REVISION_FLAG_CENSORED = 1 << 15
28 REVISION_FLAG_ELLIPSIS = 1 << 14
28 REVISION_FLAG_ELLIPSIS = 1 << 14
29 REVISION_FLAG_EXTSTORED = 1 << 13
29 REVISION_FLAG_EXTSTORED = 1 << 13
30 REVISION_FLAG_SIDEDATA = 1 << 12
30 REVISION_FLAG_SIDEDATA = 1 << 12
31 REVISION_FLAG_HASCOPIESINFO = 1 << 11
31 REVISION_FLAG_HASCOPIESINFO = 1 << 11
32
32
33 REVISION_FLAGS_KNOWN = (
33 REVISION_FLAGS_KNOWN = (
34 REVISION_FLAG_CENSORED
34 REVISION_FLAG_CENSORED
35 | REVISION_FLAG_ELLIPSIS
35 | REVISION_FLAG_ELLIPSIS
36 | REVISION_FLAG_EXTSTORED
36 | REVISION_FLAG_EXTSTORED
37 | REVISION_FLAG_SIDEDATA
37 | REVISION_FLAG_SIDEDATA
38 | REVISION_FLAG_HASCOPIESINFO
38 | REVISION_FLAG_HASCOPIESINFO
39 )
39 )
40
40
41 CG_DELTAMODE_STD = b'default'
41 CG_DELTAMODE_STD = b'default'
42 CG_DELTAMODE_PREV = b'previous'
42 CG_DELTAMODE_PREV = b'previous'
43 CG_DELTAMODE_FULL = b'fulltext'
43 CG_DELTAMODE_FULL = b'fulltext'
44 CG_DELTAMODE_P1 = b'p1'
44 CG_DELTAMODE_P1 = b'p1'
45
45
46
46
47 class ipeerconnection(interfaceutil.Interface):
47 class ipeerconnection(interfaceutil.Interface):
48 """Represents a "connection" to a repository.
48 """Represents a "connection" to a repository.
49
49
50 This is the base interface for representing a connection to a repository.
50 This is the base interface for representing a connection to a repository.
51 It holds basic properties and methods applicable to all peer types.
51 It holds basic properties and methods applicable to all peer types.
52
52
53 This is not a complete interface definition and should not be used
53 This is not a complete interface definition and should not be used
54 outside of this module.
54 outside of this module.
55 """
55 """
56
56
57 ui = interfaceutil.Attribute("""ui.ui instance""")
57 ui = interfaceutil.Attribute("""ui.ui instance""")
58
58
59 def url():
59 def url():
60 """Returns a URL string representing this peer.
60 """Returns a URL string representing this peer.
61
61
62 Currently, implementations expose the raw URL used to construct the
62 Currently, implementations expose the raw URL used to construct the
63 instance. It may contain credentials as part of the URL. The
63 instance. It may contain credentials as part of the URL. The
64 expectations of the value aren't well-defined and this could lead to
64 expectations of the value aren't well-defined and this could lead to
65 data leakage.
65 data leakage.
66
66
67 TODO audit/clean consumers and more clearly define the contents of this
67 TODO audit/clean consumers and more clearly define the contents of this
68 value.
68 value.
69 """
69 """
70
70
71 def local():
71 def local():
72 """Returns a local repository instance.
72 """Returns a local repository instance.
73
73
74 If the peer represents a local repository, returns an object that
74 If the peer represents a local repository, returns an object that
75 can be used to interface with it. Otherwise returns ``None``.
75 can be used to interface with it. Otherwise returns ``None``.
76 """
76 """
77
77
78 def peer():
78 def peer():
79 """Returns an object conforming to this interface.
79 """Returns an object conforming to this interface.
80
80
81 Most implementations will ``return self``.
81 Most implementations will ``return self``.
82 """
82 """
83
83
84 def canpush():
84 def canpush():
85 """Returns a boolean indicating if this peer can be pushed to."""
85 """Returns a boolean indicating if this peer can be pushed to."""
86
86
87 def close():
87 def close():
88 """Close the connection to this peer.
88 """Close the connection to this peer.
89
89
90 This is called when the peer will no longer be used. Resources
90 This is called when the peer will no longer be used. Resources
91 associated with the peer should be cleaned up.
91 associated with the peer should be cleaned up.
92 """
92 """
93
93
94
94
95 class ipeercapabilities(interfaceutil.Interface):
95 class ipeercapabilities(interfaceutil.Interface):
96 """Peer sub-interface related to capabilities."""
96 """Peer sub-interface related to capabilities."""
97
97
98 def capable(name):
98 def capable(name):
99 """Determine support for a named capability.
99 """Determine support for a named capability.
100
100
101 Returns ``False`` if capability not supported.
101 Returns ``False`` if capability not supported.
102
102
103 Returns ``True`` if boolean capability is supported. Returns a string
103 Returns ``True`` if boolean capability is supported. Returns a string
104 if capability support is non-boolean.
104 if capability support is non-boolean.
105
105
106 Capability strings may or may not map to wire protocol capabilities.
106 Capability strings may or may not map to wire protocol capabilities.
107 """
107 """
108
108
109 def requirecap(name, purpose):
109 def requirecap(name, purpose):
110 """Require a capability to be present.
110 """Require a capability to be present.
111
111
112 Raises a ``CapabilityError`` if the capability isn't present.
112 Raises a ``CapabilityError`` if the capability isn't present.
113 """
113 """
114
114
115
115
116 class ipeercommands(interfaceutil.Interface):
116 class ipeercommands(interfaceutil.Interface):
117 """Client-side interface for communicating over the wire protocol.
117 """Client-side interface for communicating over the wire protocol.
118
118
119 This interface is used as a gateway to the Mercurial wire protocol.
119 This interface is used as a gateway to the Mercurial wire protocol.
120 methods commonly call wire protocol commands of the same name.
120 methods commonly call wire protocol commands of the same name.
121 """
121 """
122
122
123 def branchmap():
123 def branchmap():
124 """Obtain heads in named branches.
124 """Obtain heads in named branches.
125
125
126 Returns a dict mapping branch name to an iterable of nodes that are
126 Returns a dict mapping branch name to an iterable of nodes that are
127 heads on that branch.
127 heads on that branch.
128 """
128 """
129
129
130 def capabilities():
130 def capabilities():
131 """Obtain capabilities of the peer.
131 """Obtain capabilities of the peer.
132
132
133 Returns a set of string capabilities.
133 Returns a set of string capabilities.
134 """
134 """
135
135
136 def clonebundles():
136 def clonebundles():
137 """Obtains the clone bundles manifest for the repo.
137 """Obtains the clone bundles manifest for the repo.
138
138
139 Returns the manifest as unparsed bytes.
139 Returns the manifest as unparsed bytes.
140 """
140 """
141
141
142 def debugwireargs(one, two, three=None, four=None, five=None):
142 def debugwireargs(one, two, three=None, four=None, five=None):
143 """Used to facilitate debugging of arguments passed over the wire."""
143 """Used to facilitate debugging of arguments passed over the wire."""
144
144
145 def getbundle(source, **kwargs):
145 def getbundle(source, **kwargs):
146 """Obtain remote repository data as a bundle.
146 """Obtain remote repository data as a bundle.
147
147
148 This command is how the bulk of repository data is transferred from
148 This command is how the bulk of repository data is transferred from
149 the peer to the local repository
149 the peer to the local repository
150
150
151 Returns a generator of bundle data.
151 Returns a generator of bundle data.
152 """
152 """
153
153
154 def heads():
154 def heads():
155 """Determine all known head revisions in the peer.
155 """Determine all known head revisions in the peer.
156
156
157 Returns an iterable of binary nodes.
157 Returns an iterable of binary nodes.
158 """
158 """
159
159
160 def known(nodes):
160 def known(nodes):
161 """Determine whether multiple nodes are known.
161 """Determine whether multiple nodes are known.
162
162
163 Accepts an iterable of nodes whose presence to check for.
163 Accepts an iterable of nodes whose presence to check for.
164
164
165 Returns an iterable of booleans indicating of the corresponding node
165 Returns an iterable of booleans indicating of the corresponding node
166 at that index is known to the peer.
166 at that index is known to the peer.
167 """
167 """
168
168
169 def listkeys(namespace):
169 def listkeys(namespace):
170 """Obtain all keys in a pushkey namespace.
170 """Obtain all keys in a pushkey namespace.
171
171
172 Returns an iterable of key names.
172 Returns an iterable of key names.
173 """
173 """
174
174
175 def lookup(key):
175 def lookup(key):
176 """Resolve a value to a known revision.
176 """Resolve a value to a known revision.
177
177
178 Returns a binary node of the resolved revision on success.
178 Returns a binary node of the resolved revision on success.
179 """
179 """
180
180
181 def pushkey(namespace, key, old, new):
181 def pushkey(namespace, key, old, new):
182 """Set a value using the ``pushkey`` protocol.
182 """Set a value using the ``pushkey`` protocol.
183
183
184 Arguments correspond to the pushkey namespace and key to operate on and
184 Arguments correspond to the pushkey namespace and key to operate on and
185 the old and new values for that key.
185 the old and new values for that key.
186
186
187 Returns a string with the peer result. The value inside varies by the
187 Returns a string with the peer result. The value inside varies by the
188 namespace.
188 namespace.
189 """
189 """
190
190
191 def stream_out():
191 def stream_out():
192 """Obtain streaming clone data.
192 """Obtain streaming clone data.
193
193
194 Successful result should be a generator of data chunks.
194 Successful result should be a generator of data chunks.
195 """
195 """
196
196
197 def unbundle(bundle, heads, url):
197 def unbundle(bundle, heads, url):
198 """Transfer repository data to the peer.
198 """Transfer repository data to the peer.
199
199
200 This is how the bulk of data during a push is transferred.
200 This is how the bulk of data during a push is transferred.
201
201
202 Returns the integer number of heads added to the peer.
202 Returns the integer number of heads added to the peer.
203 """
203 """
204
204
205
205
206 class ipeerlegacycommands(interfaceutil.Interface):
206 class ipeerlegacycommands(interfaceutil.Interface):
207 """Interface for implementing support for legacy wire protocol commands.
207 """Interface for implementing support for legacy wire protocol commands.
208
208
209 Wire protocol commands transition to legacy status when they are no longer
209 Wire protocol commands transition to legacy status when they are no longer
210 used by modern clients. To facilitate identifying which commands are
210 used by modern clients. To facilitate identifying which commands are
211 legacy, the interfaces are split.
211 legacy, the interfaces are split.
212 """
212 """
213
213
214 def between(pairs):
214 def between(pairs):
215 """Obtain nodes between pairs of nodes.
215 """Obtain nodes between pairs of nodes.
216
216
217 ``pairs`` is an iterable of node pairs.
217 ``pairs`` is an iterable of node pairs.
218
218
219 Returns an iterable of iterables of nodes corresponding to each
219 Returns an iterable of iterables of nodes corresponding to each
220 requested pair.
220 requested pair.
221 """
221 """
222
222
223 def branches(nodes):
223 def branches(nodes):
224 """Obtain ancestor changesets of specific nodes back to a branch point.
224 """Obtain ancestor changesets of specific nodes back to a branch point.
225
225
226 For each requested node, the peer finds the first ancestor node that is
226 For each requested node, the peer finds the first ancestor node that is
227 a DAG root or is a merge.
227 a DAG root or is a merge.
228
228
229 Returns an iterable of iterables with the resolved values for each node.
229 Returns an iterable of iterables with the resolved values for each node.
230 """
230 """
231
231
232 def changegroup(nodes, source):
232 def changegroup(nodes, source):
233 """Obtain a changegroup with data for descendants of specified nodes."""
233 """Obtain a changegroup with data for descendants of specified nodes."""
234
234
235 def changegroupsubset(bases, heads, source):
235 def changegroupsubset(bases, heads, source):
236 pass
236 pass
237
237
238
238
239 class ipeercommandexecutor(interfaceutil.Interface):
239 class ipeercommandexecutor(interfaceutil.Interface):
240 """Represents a mechanism to execute remote commands.
240 """Represents a mechanism to execute remote commands.
241
241
242 This is the primary interface for requesting that wire protocol commands
242 This is the primary interface for requesting that wire protocol commands
243 be executed. Instances of this interface are active in a context manager
243 be executed. Instances of this interface are active in a context manager
244 and have a well-defined lifetime. When the context manager exits, all
244 and have a well-defined lifetime. When the context manager exits, all
245 outstanding requests are waited on.
245 outstanding requests are waited on.
246 """
246 """
247
247
248 def callcommand(name, args):
248 def callcommand(name, args):
249 """Request that a named command be executed.
249 """Request that a named command be executed.
250
250
251 Receives the command name and a dictionary of command arguments.
251 Receives the command name and a dictionary of command arguments.
252
252
253 Returns a ``concurrent.futures.Future`` that will resolve to the
253 Returns a ``concurrent.futures.Future`` that will resolve to the
254 result of that command request. That exact value is left up to
254 result of that command request. That exact value is left up to
255 the implementation and possibly varies by command.
255 the implementation and possibly varies by command.
256
256
257 Not all commands can coexist with other commands in an executor
257 Not all commands can coexist with other commands in an executor
258 instance: it depends on the underlying wire protocol transport being
258 instance: it depends on the underlying wire protocol transport being
259 used and the command itself.
259 used and the command itself.
260
260
261 Implementations MAY call ``sendcommands()`` automatically if the
261 Implementations MAY call ``sendcommands()`` automatically if the
262 requested command can not coexist with other commands in this executor.
262 requested command can not coexist with other commands in this executor.
263
263
264 Implementations MAY call ``sendcommands()`` automatically when the
264 Implementations MAY call ``sendcommands()`` automatically when the
265 future's ``result()`` is called. So, consumers using multiple
265 future's ``result()`` is called. So, consumers using multiple
266 commands with an executor MUST ensure that ``result()`` is not called
266 commands with an executor MUST ensure that ``result()`` is not called
267 until all command requests have been issued.
267 until all command requests have been issued.
268 """
268 """
269
269
270 def sendcommands():
270 def sendcommands():
271 """Trigger submission of queued command requests.
271 """Trigger submission of queued command requests.
272
272
273 Not all transports submit commands as soon as they are requested to
273 Not all transports submit commands as soon as they are requested to
274 run. When called, this method forces queued command requests to be
274 run. When called, this method forces queued command requests to be
275 issued. It will no-op if all commands have already been sent.
275 issued. It will no-op if all commands have already been sent.
276
276
277 When called, no more new commands may be issued with this executor.
277 When called, no more new commands may be issued with this executor.
278 """
278 """
279
279
280 def close():
280 def close():
281 """Signal that this command request is finished.
281 """Signal that this command request is finished.
282
282
283 When called, no more new commands may be issued. All outstanding
283 When called, no more new commands may be issued. All outstanding
284 commands that have previously been issued are waited on before
284 commands that have previously been issued are waited on before
285 returning. This not only includes waiting for the futures to resolve,
285 returning. This not only includes waiting for the futures to resolve,
286 but also waiting for all response data to arrive. In other words,
286 but also waiting for all response data to arrive. In other words,
287 calling this waits for all on-wire state for issued command requests
287 calling this waits for all on-wire state for issued command requests
288 to finish.
288 to finish.
289
289
290 When used as a context manager, this method is called when exiting the
290 When used as a context manager, this method is called when exiting the
291 context manager.
291 context manager.
292
292
293 This method may call ``sendcommands()`` if there are buffered commands.
293 This method may call ``sendcommands()`` if there are buffered commands.
294 """
294 """
295
295
296
296
297 class ipeerrequests(interfaceutil.Interface):
297 class ipeerrequests(interfaceutil.Interface):
298 """Interface for executing commands on a peer."""
298 """Interface for executing commands on a peer."""
299
299
300 limitedarguments = interfaceutil.Attribute(
300 limitedarguments = interfaceutil.Attribute(
301 """True if the peer cannot receive large argument value for commands."""
301 """True if the peer cannot receive large argument value for commands."""
302 )
302 )
303
303
304 def commandexecutor():
304 def commandexecutor():
305 """A context manager that resolves to an ipeercommandexecutor.
305 """A context manager that resolves to an ipeercommandexecutor.
306
306
307 The object this resolves to can be used to issue command requests
307 The object this resolves to can be used to issue command requests
308 to the peer.
308 to the peer.
309
309
310 Callers should call its ``callcommand`` method to issue command
310 Callers should call its ``callcommand`` method to issue command
311 requests.
311 requests.
312
312
313 A new executor should be obtained for each distinct set of commands
313 A new executor should be obtained for each distinct set of commands
314 (possibly just a single command) that the consumer wants to execute
314 (possibly just a single command) that the consumer wants to execute
315 as part of a single operation or round trip. This is because some
315 as part of a single operation or round trip. This is because some
316 peers are half-duplex and/or don't support persistent connections.
316 peers are half-duplex and/or don't support persistent connections.
317 e.g. in the case of HTTP peers, commands sent to an executor represent
317 e.g. in the case of HTTP peers, commands sent to an executor represent
318 a single HTTP request. While some peers may support multiple command
318 a single HTTP request. While some peers may support multiple command
319 sends over the wire per executor, consumers need to code to the least
319 sends over the wire per executor, consumers need to code to the least
320 capable peer. So it should be assumed that command executors buffer
320 capable peer. So it should be assumed that command executors buffer
321 called commands until they are told to send them and that each
321 called commands until they are told to send them and that each
322 command executor could result in a new connection or wire-level request
322 command executor could result in a new connection or wire-level request
323 being issued.
323 being issued.
324 """
324 """
325
325
326
326
327 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
327 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
328 """Unified interface for peer repositories.
328 """Unified interface for peer repositories.
329
329
330 All peer instances must conform to this interface.
330 All peer instances must conform to this interface.
331 """
331 """
332
332
333
333
334 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
334 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
335 """Unified peer interface for wire protocol version 2 peers."""
335 """Unified peer interface for wire protocol version 2 peers."""
336
336
337 apidescriptor = interfaceutil.Attribute(
337 apidescriptor = interfaceutil.Attribute(
338 """Data structure holding description of server API."""
338 """Data structure holding description of server API."""
339 )
339 )
340
340
341
341
342 @interfaceutil.implementer(ipeerbase)
342 @interfaceutil.implementer(ipeerbase)
343 class peer(object):
343 class peer(object):
344 """Base class for peer repositories."""
344 """Base class for peer repositories."""
345
345
346 limitedarguments = False
346 limitedarguments = False
347
347
348 def capable(self, name):
348 def capable(self, name):
349 caps = self.capabilities()
349 caps = self.capabilities()
350 if name in caps:
350 if name in caps:
351 return True
351 return True
352
352
353 name = b'%s=' % name
353 name = b'%s=' % name
354 for cap in caps:
354 for cap in caps:
355 if cap.startswith(name):
355 if cap.startswith(name):
356 return cap[len(name) :]
356 return cap[len(name) :]
357
357
358 return False
358 return False
359
359
360 def requirecap(self, name, purpose):
360 def requirecap(self, name, purpose):
361 if self.capable(name):
361 if self.capable(name):
362 return
362 return
363
363
364 raise error.CapabilityError(
364 raise error.CapabilityError(
365 _(
365 _(
366 b'cannot %s; remote repository does not support the '
366 b'cannot %s; remote repository does not support the '
367 b'\'%s\' capability'
367 b'\'%s\' capability'
368 )
368 )
369 % (purpose, name)
369 % (purpose, name)
370 )
370 )
371
371
372
372
373 class iverifyproblem(interfaceutil.Interface):
373 class iverifyproblem(interfaceutil.Interface):
374 """Represents a problem with the integrity of the repository.
374 """Represents a problem with the integrity of the repository.
375
375
376 Instances of this interface are emitted to describe an integrity issue
376 Instances of this interface are emitted to describe an integrity issue
377 with a repository (e.g. corrupt storage, missing data, etc).
377 with a repository (e.g. corrupt storage, missing data, etc).
378
378
379 Instances are essentially messages associated with severity.
379 Instances are essentially messages associated with severity.
380 """
380 """
381
381
382 warning = interfaceutil.Attribute(
382 warning = interfaceutil.Attribute(
383 """Message indicating a non-fatal problem."""
383 """Message indicating a non-fatal problem."""
384 )
384 )
385
385
386 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
386 error = interfaceutil.Attribute("""Message indicating a fatal problem.""")
387
387
388 node = interfaceutil.Attribute(
388 node = interfaceutil.Attribute(
389 """Revision encountering the problem.
389 """Revision encountering the problem.
390
390
391 ``None`` means the problem doesn't apply to a single revision.
391 ``None`` means the problem doesn't apply to a single revision.
392 """
392 """
393 )
393 )
394
394
395
395
396 class irevisiondelta(interfaceutil.Interface):
396 class irevisiondelta(interfaceutil.Interface):
397 """Represents a delta between one revision and another.
397 """Represents a delta between one revision and another.
398
398
399 Instances convey enough information to allow a revision to be exchanged
399 Instances convey enough information to allow a revision to be exchanged
400 with another repository.
400 with another repository.
401
401
402 Instances represent the fulltext revision data or a delta against
402 Instances represent the fulltext revision data or a delta against
403 another revision. Therefore the ``revision`` and ``delta`` attributes
403 another revision. Therefore the ``revision`` and ``delta`` attributes
404 are mutually exclusive.
404 are mutually exclusive.
405
405
406 Typically used for changegroup generation.
406 Typically used for changegroup generation.
407 """
407 """
408
408
409 node = interfaceutil.Attribute("""20 byte node of this revision.""")
409 node = interfaceutil.Attribute("""20 byte node of this revision.""")
410
410
411 p1node = interfaceutil.Attribute(
411 p1node = interfaceutil.Attribute(
412 """20 byte node of 1st parent of this revision."""
412 """20 byte node of 1st parent of this revision."""
413 )
413 )
414
414
415 p2node = interfaceutil.Attribute(
415 p2node = interfaceutil.Attribute(
416 """20 byte node of 2nd parent of this revision."""
416 """20 byte node of 2nd parent of this revision."""
417 )
417 )
418
418
419 linknode = interfaceutil.Attribute(
419 linknode = interfaceutil.Attribute(
420 """20 byte node of the changelog revision this node is linked to."""
420 """20 byte node of the changelog revision this node is linked to."""
421 )
421 )
422
422
423 flags = interfaceutil.Attribute(
423 flags = interfaceutil.Attribute(
424 """2 bytes of integer flags that apply to this revision.
424 """2 bytes of integer flags that apply to this revision.
425
425
426 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
426 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
427 """
427 """
428 )
428 )
429
429
430 basenode = interfaceutil.Attribute(
430 basenode = interfaceutil.Attribute(
431 """20 byte node of the revision this data is a delta against.
431 """20 byte node of the revision this data is a delta against.
432
432
433 ``nullid`` indicates that the revision is a full revision and not
433 ``nullid`` indicates that the revision is a full revision and not
434 a delta.
434 a delta.
435 """
435 """
436 )
436 )
437
437
438 baserevisionsize = interfaceutil.Attribute(
438 baserevisionsize = interfaceutil.Attribute(
439 """Size of base revision this delta is against.
439 """Size of base revision this delta is against.
440
440
441 May be ``None`` if ``basenode`` is ``nullid``.
441 May be ``None`` if ``basenode`` is ``nullid``.
442 """
442 """
443 )
443 )
444
444
445 revision = interfaceutil.Attribute(
445 revision = interfaceutil.Attribute(
446 """Raw fulltext of revision data for this node."""
446 """Raw fulltext of revision data for this node."""
447 )
447 )
448
448
449 delta = interfaceutil.Attribute(
449 delta = interfaceutil.Attribute(
450 """Delta between ``basenode`` and ``node``.
450 """Delta between ``basenode`` and ``node``.
451
451
452 Stored in the bdiff delta format.
452 Stored in the bdiff delta format.
453 """
453 """
454 )
454 )
455
455
456
456
457 class ifilerevisionssequence(interfaceutil.Interface):
457 class ifilerevisionssequence(interfaceutil.Interface):
458 """Contains index data for all revisions of a file.
458 """Contains index data for all revisions of a file.
459
459
460 Types implementing this behave like lists of tuples. The index
460 Types implementing this behave like lists of tuples. The index
461 in the list corresponds to the revision number. The values contain
461 in the list corresponds to the revision number. The values contain
462 index metadata.
462 index metadata.
463
463
464 The *null* revision (revision number -1) is always the last item
464 The *null* revision (revision number -1) is always the last item
465 in the index.
465 in the index.
466 """
466 """
467
467
468 def __len__():
468 def __len__():
469 """The total number of revisions."""
469 """The total number of revisions."""
470
470
471 def __getitem__(rev):
471 def __getitem__(rev):
472 """Returns the object having a specific revision number.
472 """Returns the object having a specific revision number.
473
473
474 Returns an 8-tuple with the following fields:
474 Returns an 8-tuple with the following fields:
475
475
476 offset+flags
476 offset+flags
477 Contains the offset and flags for the revision. 64-bit unsigned
477 Contains the offset and flags for the revision. 64-bit unsigned
478 integer where first 6 bytes are the offset and the next 2 bytes
478 integer where first 6 bytes are the offset and the next 2 bytes
479 are flags. The offset can be 0 if it is not used by the store.
479 are flags. The offset can be 0 if it is not used by the store.
480 compressed size
480 compressed size
481 Size of the revision data in the store. It can be 0 if it isn't
481 Size of the revision data in the store. It can be 0 if it isn't
482 needed by the store.
482 needed by the store.
483 uncompressed size
483 uncompressed size
484 Fulltext size. It can be 0 if it isn't needed by the store.
484 Fulltext size. It can be 0 if it isn't needed by the store.
485 base revision
485 base revision
486 Revision number of revision the delta for storage is encoded
486 Revision number of revision the delta for storage is encoded
487 against. -1 indicates not encoded against a base revision.
487 against. -1 indicates not encoded against a base revision.
488 link revision
488 link revision
489 Revision number of changelog revision this entry is related to.
489 Revision number of changelog revision this entry is related to.
490 p1 revision
490 p1 revision
491 Revision number of 1st parent. -1 if no 1st parent.
491 Revision number of 1st parent. -1 if no 1st parent.
492 p2 revision
492 p2 revision
493 Revision number of 2nd parent. -1 if no 1st parent.
493 Revision number of 2nd parent. -1 if no 1st parent.
494 node
494 node
495 Binary node value for this revision number.
495 Binary node value for this revision number.
496
496
497 Negative values should index off the end of the sequence. ``-1``
497 Negative values should index off the end of the sequence. ``-1``
498 should return the null revision. ``-2`` should return the most
498 should return the null revision. ``-2`` should return the most
499 recent revision.
499 recent revision.
500 """
500 """
501
501
502 def __contains__(rev):
502 def __contains__(rev):
503 """Whether a revision number exists."""
503 """Whether a revision number exists."""
504
504
505 def insert(self, i, entry):
505 def insert(self, i, entry):
506 """Add an item to the index at specific revision."""
506 """Add an item to the index at specific revision."""
507
507
508
508
509 class ifileindex(interfaceutil.Interface):
509 class ifileindex(interfaceutil.Interface):
510 """Storage interface for index data of a single file.
510 """Storage interface for index data of a single file.
511
511
512 File storage data is divided into index metadata and data storage.
512 File storage data is divided into index metadata and data storage.
513 This interface defines the index portion of the interface.
513 This interface defines the index portion of the interface.
514
514
515 The index logically consists of:
515 The index logically consists of:
516
516
517 * A mapping between revision numbers and nodes.
517 * A mapping between revision numbers and nodes.
518 * DAG data (storing and querying the relationship between nodes).
518 * DAG data (storing and querying the relationship between nodes).
519 * Metadata to facilitate storage.
519 * Metadata to facilitate storage.
520 """
520 """
521
521
522 def __len__():
522 def __len__():
523 """Obtain the number of revisions stored for this file."""
523 """Obtain the number of revisions stored for this file."""
524
524
525 def __iter__():
525 def __iter__():
526 """Iterate over revision numbers for this file."""
526 """Iterate over revision numbers for this file."""
527
527
528 def hasnode(node):
528 def hasnode(node):
529 """Returns a bool indicating if a node is known to this store.
529 """Returns a bool indicating if a node is known to this store.
530
530
531 Implementations must only return True for full, binary node values:
531 Implementations must only return True for full, binary node values:
532 hex nodes, revision numbers, and partial node matches must be
532 hex nodes, revision numbers, and partial node matches must be
533 rejected.
533 rejected.
534
534
535 The null node is never present.
535 The null node is never present.
536 """
536 """
537
537
538 def revs(start=0, stop=None):
538 def revs(start=0, stop=None):
539 """Iterate over revision numbers for this file, with control."""
539 """Iterate over revision numbers for this file, with control."""
540
540
541 def parents(node):
541 def parents(node):
542 """Returns a 2-tuple of parent nodes for a revision.
542 """Returns a 2-tuple of parent nodes for a revision.
543
543
544 Values will be ``nullid`` if the parent is empty.
544 Values will be ``nullid`` if the parent is empty.
545 """
545 """
546
546
547 def parentrevs(rev):
547 def parentrevs(rev):
548 """Like parents() but operates on revision numbers."""
548 """Like parents() but operates on revision numbers."""
549
549
550 def rev(node):
550 def rev(node):
551 """Obtain the revision number given a node.
551 """Obtain the revision number given a node.
552
552
553 Raises ``error.LookupError`` if the node is not known.
553 Raises ``error.LookupError`` if the node is not known.
554 """
554 """
555
555
556 def node(rev):
556 def node(rev):
557 """Obtain the node value given a revision number.
557 """Obtain the node value given a revision number.
558
558
559 Raises ``IndexError`` if the node is not known.
559 Raises ``IndexError`` if the node is not known.
560 """
560 """
561
561
562 def lookup(node):
562 def lookup(node):
563 """Attempt to resolve a value to a node.
563 """Attempt to resolve a value to a node.
564
564
565 Value can be a binary node, hex node, revision number, or a string
565 Value can be a binary node, hex node, revision number, or a string
566 that can be converted to an integer.
566 that can be converted to an integer.
567
567
568 Raises ``error.LookupError`` if a node could not be resolved.
568 Raises ``error.LookupError`` if a node could not be resolved.
569 """
569 """
570
570
571 def linkrev(rev):
571 def linkrev(rev):
572 """Obtain the changeset revision number a revision is linked to."""
572 """Obtain the changeset revision number a revision is linked to."""
573
573
574 def iscensored(rev):
574 def iscensored(rev):
575 """Return whether a revision's content has been censored."""
575 """Return whether a revision's content has been censored."""
576
576
577 def commonancestorsheads(node1, node2):
577 def commonancestorsheads(node1, node2):
578 """Obtain an iterable of nodes containing heads of common ancestors.
578 """Obtain an iterable of nodes containing heads of common ancestors.
579
579
580 See ``ancestor.commonancestorsheads()``.
580 See ``ancestor.commonancestorsheads()``.
581 """
581 """
582
582
583 def descendants(revs):
583 def descendants(revs):
584 """Obtain descendant revision numbers for a set of revision numbers.
584 """Obtain descendant revision numbers for a set of revision numbers.
585
585
586 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
586 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
587 """
587 """
588
588
589 def heads(start=None, stop=None):
589 def heads(start=None, stop=None):
590 """Obtain a list of nodes that are DAG heads, with control.
590 """Obtain a list of nodes that are DAG heads, with control.
591
591
592 The set of revisions examined can be limited by specifying
592 The set of revisions examined can be limited by specifying
593 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
593 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
594 iterable of nodes. DAG traversal starts at earlier revision
594 iterable of nodes. DAG traversal starts at earlier revision
595 ``start`` and iterates forward until any node in ``stop`` is
595 ``start`` and iterates forward until any node in ``stop`` is
596 encountered.
596 encountered.
597 """
597 """
598
598
599 def children(node):
599 def children(node):
600 """Obtain nodes that are children of a node.
600 """Obtain nodes that are children of a node.
601
601
602 Returns a list of nodes.
602 Returns a list of nodes.
603 """
603 """
604
604
605
605
606 class ifiledata(interfaceutil.Interface):
606 class ifiledata(interfaceutil.Interface):
607 """Storage interface for data storage of a specific file.
607 """Storage interface for data storage of a specific file.
608
608
609 This complements ``ifileindex`` and provides an interface for accessing
609 This complements ``ifileindex`` and provides an interface for accessing
610 data for a tracked file.
610 data for a tracked file.
611 """
611 """
612
612
613 def size(rev):
613 def size(rev):
614 """Obtain the fulltext size of file data.
614 """Obtain the fulltext size of file data.
615
615
616 Any metadata is excluded from size measurements.
616 Any metadata is excluded from size measurements.
617 """
617 """
618
618
619 def revision(node, raw=False):
619 def revision(node, raw=False):
620 """Obtain fulltext data for a node.
620 """Obtain fulltext data for a node.
621
621
622 By default, any storage transformations are applied before the data
622 By default, any storage transformations are applied before the data
623 is returned. If ``raw`` is True, non-raw storage transformations
623 is returned. If ``raw`` is True, non-raw storage transformations
624 are not applied.
624 are not applied.
625
625
626 The fulltext data may contain a header containing metadata. Most
626 The fulltext data may contain a header containing metadata. Most
627 consumers should use ``read()`` to obtain the actual file data.
627 consumers should use ``read()`` to obtain the actual file data.
628 """
628 """
629
629
630 def rawdata(node):
630 def rawdata(node):
631 """Obtain raw data for a node."""
631 """Obtain raw data for a node."""
632
632
633 def read(node):
633 def read(node):
634 """Resolve file fulltext data.
634 """Resolve file fulltext data.
635
635
636 This is similar to ``revision()`` except any metadata in the data
636 This is similar to ``revision()`` except any metadata in the data
637 headers is stripped.
637 headers is stripped.
638 """
638 """
639
639
640 def renamed(node):
640 def renamed(node):
641 """Obtain copy metadata for a node.
641 """Obtain copy metadata for a node.
642
642
643 Returns ``False`` if no copy metadata is stored or a 2-tuple of
643 Returns ``False`` if no copy metadata is stored or a 2-tuple of
644 (path, node) from which this revision was copied.
644 (path, node) from which this revision was copied.
645 """
645 """
646
646
647 def cmp(node, fulltext):
647 def cmp(node, fulltext):
648 """Compare fulltext to another revision.
648 """Compare fulltext to another revision.
649
649
650 Returns True if the fulltext is different from what is stored.
650 Returns True if the fulltext is different from what is stored.
651
651
652 This takes copy metadata into account.
652 This takes copy metadata into account.
653
653
654 TODO better document the copy metadata and censoring logic.
654 TODO better document the copy metadata and censoring logic.
655 """
655 """
656
656
657 def emitrevisions(
657 def emitrevisions(
658 nodes,
658 nodes,
659 nodesorder=None,
659 nodesorder=None,
660 revisiondata=False,
660 revisiondata=False,
661 assumehaveparentrevisions=False,
661 assumehaveparentrevisions=False,
662 deltamode=CG_DELTAMODE_STD,
662 deltamode=CG_DELTAMODE_STD,
663 ):
663 ):
664 """Produce ``irevisiondelta`` for revisions.
664 """Produce ``irevisiondelta`` for revisions.
665
665
666 Given an iterable of nodes, emits objects conforming to the
666 Given an iterable of nodes, emits objects conforming to the
667 ``irevisiondelta`` interface that describe revisions in storage.
667 ``irevisiondelta`` interface that describe revisions in storage.
668
668
669 This method is a generator.
669 This method is a generator.
670
670
671 The input nodes may be unordered. Implementations must ensure that a
671 The input nodes may be unordered. Implementations must ensure that a
672 node's parents are emitted before the node itself. Transitively, this
672 node's parents are emitted before the node itself. Transitively, this
673 means that a node may only be emitted once all its ancestors in
673 means that a node may only be emitted once all its ancestors in
674 ``nodes`` have also been emitted.
674 ``nodes`` have also been emitted.
675
675
676 By default, emits "index" data (the ``node``, ``p1node``, and
676 By default, emits "index" data (the ``node``, ``p1node``, and
677 ``p2node`` attributes). If ``revisiondata`` is set, revision data
677 ``p2node`` attributes). If ``revisiondata`` is set, revision data
678 will also be present on the emitted objects.
678 will also be present on the emitted objects.
679
679
680 With default argument values, implementations can choose to emit
680 With default argument values, implementations can choose to emit
681 either fulltext revision data or a delta. When emitting deltas,
681 either fulltext revision data or a delta. When emitting deltas,
682 implementations must consider whether the delta's base revision
682 implementations must consider whether the delta's base revision
683 fulltext is available to the receiver.
683 fulltext is available to the receiver.
684
684
685 The base revision fulltext is guaranteed to be available if any of
685 The base revision fulltext is guaranteed to be available if any of
686 the following are met:
686 the following are met:
687
687
688 * Its fulltext revision was emitted by this method call.
688 * Its fulltext revision was emitted by this method call.
689 * A delta for that revision was emitted by this method call.
689 * A delta for that revision was emitted by this method call.
690 * ``assumehaveparentrevisions`` is True and the base revision is a
690 * ``assumehaveparentrevisions`` is True and the base revision is a
691 parent of the node.
691 parent of the node.
692
692
693 ``nodesorder`` can be used to control the order that revisions are
693 ``nodesorder`` can be used to control the order that revisions are
694 emitted. By default, revisions can be reordered as long as they are
694 emitted. By default, revisions can be reordered as long as they are
695 in DAG topological order (see above). If the value is ``nodes``,
695 in DAG topological order (see above). If the value is ``nodes``,
696 the iteration order from ``nodes`` should be used. If the value is
696 the iteration order from ``nodes`` should be used. If the value is
697 ``storage``, then the native order from the backing storage layer
697 ``storage``, then the native order from the backing storage layer
698 is used. (Not all storage layers will have strong ordering and behavior
698 is used. (Not all storage layers will have strong ordering and behavior
699 of this mode is storage-dependent.) ``nodes`` ordering can force
699 of this mode is storage-dependent.) ``nodes`` ordering can force
700 revisions to be emitted before their ancestors, so consumers should
700 revisions to be emitted before their ancestors, so consumers should
701 use it with care.
701 use it with care.
702
702
703 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
703 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
704 be set and it is the caller's responsibility to resolve it, if needed.
704 be set and it is the caller's responsibility to resolve it, if needed.
705
705
706 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
706 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
707 all revision data should be emitted as deltas against the revision
707 all revision data should be emitted as deltas against the revision
708 emitted just prior. The initial revision should be a delta against its
708 emitted just prior. The initial revision should be a delta against its
709 1st parent.
709 1st parent.
710 """
710 """
711
711
712
712
713 class ifilemutation(interfaceutil.Interface):
713 class ifilemutation(interfaceutil.Interface):
714 """Storage interface for mutation events of a tracked file."""
714 """Storage interface for mutation events of a tracked file."""
715
715
716 def add(filedata, meta, transaction, linkrev, p1, p2):
716 def add(filedata, meta, transaction, linkrev, p1, p2):
717 """Add a new revision to the store.
717 """Add a new revision to the store.
718
718
719 Takes file data, dictionary of metadata, a transaction, linkrev,
719 Takes file data, dictionary of metadata, a transaction, linkrev,
720 and parent nodes.
720 and parent nodes.
721
721
722 Returns the node that was added.
722 Returns the node that was added.
723
723
724 May no-op if a revision matching the supplied data is already stored.
724 May no-op if a revision matching the supplied data is already stored.
725 """
725 """
726
726
727 def addrevision(
727 def addrevision(
728 revisiondata,
728 revisiondata,
729 transaction,
729 transaction,
730 linkrev,
730 linkrev,
731 p1,
731 p1,
732 p2,
732 p2,
733 node=None,
733 node=None,
734 flags=0,
734 flags=0,
735 cachedelta=None,
735 cachedelta=None,
736 ):
736 ):
737 """Add a new revision to the store.
737 """Add a new revision to the store.
738
738
739 This is similar to ``add()`` except it operates at a lower level.
739 This is similar to ``add()`` except it operates at a lower level.
740
740
741 The data passed in already contains a metadata header, if any.
741 The data passed in already contains a metadata header, if any.
742
742
743 ``node`` and ``flags`` can be used to define the expected node and
743 ``node`` and ``flags`` can be used to define the expected node and
744 the flags to use with storage. ``flags`` is a bitwise value composed
744 the flags to use with storage. ``flags`` is a bitwise value composed
745 of the various ``REVISION_FLAG_*`` constants.
745 of the various ``REVISION_FLAG_*`` constants.
746
746
747 ``add()`` is usually called when adding files from e.g. the working
747 ``add()`` is usually called when adding files from e.g. the working
748 directory. ``addrevision()`` is often called by ``add()`` and for
748 directory. ``addrevision()`` is often called by ``add()`` and for
749 scenarios where revision data has already been computed, such as when
749 scenarios where revision data has already been computed, such as when
750 applying raw data from a peer repo.
750 applying raw data from a peer repo.
751 """
751 """
752
752
753 def addgroup(
753 def addgroup(
754 deltas,
754 deltas,
755 linkmapper,
755 linkmapper,
756 transaction,
756 transaction,
757 addrevisioncb=None,
757 addrevisioncb=None,
758 duplicaterevisioncb=None,
758 duplicaterevisioncb=None,
759 maybemissingparents=False,
759 maybemissingparents=False,
760 ):
760 ):
761 """Process a series of deltas for storage.
761 """Process a series of deltas for storage.
762
762
763 ``deltas`` is an iterable of 7-tuples of
763 ``deltas`` is an iterable of 7-tuples of
764 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
764 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
765 to add.
765 to add.
766
766
767 The ``delta`` field contains ``mpatch`` data to apply to a base
767 The ``delta`` field contains ``mpatch`` data to apply to a base
768 revision, identified by ``deltabase``. The base node can be
768 revision, identified by ``deltabase``. The base node can be
769 ``nullid``, in which case the header from the delta can be ignored
769 ``nullid``, in which case the header from the delta can be ignored
770 and the delta used as the fulltext.
770 and the delta used as the fulltext.
771
771
772 ``addrevisioncb`` should be called for each node as it is committed.
772 ``addrevisioncb`` should be called for each node as it is committed.
773
773
774 ``maybemissingparents`` is a bool indicating whether the incoming
774 ``maybemissingparents`` is a bool indicating whether the incoming
775 data may reference parents/ancestor revisions that aren't present.
775 data may reference parents/ancestor revisions that aren't present.
776 This flag is set when receiving data into a "shallow" store that
776 This flag is set when receiving data into a "shallow" store that
777 doesn't hold all history.
777 doesn't hold all history.
778
778
779 Returns a list of nodes that were processed. A node will be in the list
779 Returns a list of nodes that were processed. A node will be in the list
780 even if it existed in the store previously.
780 even if it existed in the store previously.
781 """
781 """
782
782
783 def censorrevision(tr, node, tombstone=b''):
783 def censorrevision(tr, node, tombstone=b''):
784 """Remove the content of a single revision.
784 """Remove the content of a single revision.
785
785
786 The specified ``node`` will have its content purged from storage.
786 The specified ``node`` will have its content purged from storage.
787 Future attempts to access the revision data for this node will
787 Future attempts to access the revision data for this node will
788 result in failure.
788 result in failure.
789
789
790 A ``tombstone`` message can optionally be stored. This message may be
790 A ``tombstone`` message can optionally be stored. This message may be
791 displayed to users when they attempt to access the missing revision
791 displayed to users when they attempt to access the missing revision
792 data.
792 data.
793
793
794 Storage backends may have stored deltas against the previous content
794 Storage backends may have stored deltas against the previous content
795 in this revision. As part of censoring a revision, these storage
795 in this revision. As part of censoring a revision, these storage
796 backends are expected to rewrite any internally stored deltas such
796 backends are expected to rewrite any internally stored deltas such
797 that they no longer reference the deleted content.
797 that they no longer reference the deleted content.
798 """
798 """
799
799
800 def getstrippoint(minlink):
800 def getstrippoint(minlink):
801 """Find the minimum revision that must be stripped to strip a linkrev.
801 """Find the minimum revision that must be stripped to strip a linkrev.
802
802
803 Returns a 2-tuple containing the minimum revision number and a set
803 Returns a 2-tuple containing the minimum revision number and a set
804 of all revisions numbers that would be broken by this strip.
804 of all revisions numbers that would be broken by this strip.
805
805
806 TODO this is highly revlog centric and should be abstracted into
806 TODO this is highly revlog centric and should be abstracted into
807 a higher-level deletion API. ``repair.strip()`` relies on this.
807 a higher-level deletion API. ``repair.strip()`` relies on this.
808 """
808 """
809
809
810 def strip(minlink, transaction):
810 def strip(minlink, transaction):
811 """Remove storage of items starting at a linkrev.
811 """Remove storage of items starting at a linkrev.
812
812
813 This uses ``getstrippoint()`` to determine the first node to remove.
813 This uses ``getstrippoint()`` to determine the first node to remove.
814 Then it effectively truncates storage for all revisions after that.
814 Then it effectively truncates storage for all revisions after that.
815
815
816 TODO this is highly revlog centric and should be abstracted into a
816 TODO this is highly revlog centric and should be abstracted into a
817 higher-level deletion API.
817 higher-level deletion API.
818 """
818 """
819
819
820
820
821 class ifilestorage(ifileindex, ifiledata, ifilemutation):
821 class ifilestorage(ifileindex, ifiledata, ifilemutation):
822 """Complete storage interface for a single tracked file."""
822 """Complete storage interface for a single tracked file."""
823
823
824 def files():
824 def files():
825 """Obtain paths that are backing storage for this file.
825 """Obtain paths that are backing storage for this file.
826
826
827 TODO this is used heavily by verify code and there should probably
827 TODO this is used heavily by verify code and there should probably
828 be a better API for that.
828 be a better API for that.
829 """
829 """
830
830
831 def storageinfo(
831 def storageinfo(
832 exclusivefiles=False,
832 exclusivefiles=False,
833 sharedfiles=False,
833 sharedfiles=False,
834 revisionscount=False,
834 revisionscount=False,
835 trackedsize=False,
835 trackedsize=False,
836 storedsize=False,
836 storedsize=False,
837 ):
837 ):
838 """Obtain information about storage for this file's data.
838 """Obtain information about storage for this file's data.
839
839
840 Returns a dict describing storage for this tracked path. The keys
840 Returns a dict describing storage for this tracked path. The keys
841 in the dict map to arguments of the same. The arguments are bools
841 in the dict map to arguments of the same. The arguments are bools
842 indicating whether to calculate and obtain that data.
842 indicating whether to calculate and obtain that data.
843
843
844 exclusivefiles
844 exclusivefiles
845 Iterable of (vfs, path) describing files that are exclusively
845 Iterable of (vfs, path) describing files that are exclusively
846 used to back storage for this tracked path.
846 used to back storage for this tracked path.
847
847
848 sharedfiles
848 sharedfiles
849 Iterable of (vfs, path) describing files that are used to back
849 Iterable of (vfs, path) describing files that are used to back
850 storage for this tracked path. Those files may also provide storage
850 storage for this tracked path. Those files may also provide storage
851 for other stored entities.
851 for other stored entities.
852
852
853 revisionscount
853 revisionscount
854 Number of revisions available for retrieval.
854 Number of revisions available for retrieval.
855
855
856 trackedsize
856 trackedsize
857 Total size in bytes of all tracked revisions. This is a sum of the
857 Total size in bytes of all tracked revisions. This is a sum of the
858 length of the fulltext of all revisions.
858 length of the fulltext of all revisions.
859
859
860 storedsize
860 storedsize
861 Total size in bytes used to store data for all tracked revisions.
861 Total size in bytes used to store data for all tracked revisions.
862 This is commonly less than ``trackedsize`` due to internal usage
862 This is commonly less than ``trackedsize`` due to internal usage
863 of deltas rather than fulltext revisions.
863 of deltas rather than fulltext revisions.
864
864
865 Not all storage backends may support all queries are have a reasonable
865 Not all storage backends may support all queries are have a reasonable
866 value to use. In that case, the value should be set to ``None`` and
866 value to use. In that case, the value should be set to ``None`` and
867 callers are expected to handle this special value.
867 callers are expected to handle this special value.
868 """
868 """
869
869
870 def verifyintegrity(state):
870 def verifyintegrity(state):
871 """Verifies the integrity of file storage.
871 """Verifies the integrity of file storage.
872
872
873 ``state`` is a dict holding state of the verifier process. It can be
873 ``state`` is a dict holding state of the verifier process. It can be
874 used to communicate data between invocations of multiple storage
874 used to communicate data between invocations of multiple storage
875 primitives.
875 primitives.
876
876
877 If individual revisions cannot have their revision content resolved,
877 If individual revisions cannot have their revision content resolved,
878 the method is expected to set the ``skipread`` key to a set of nodes
878 the method is expected to set the ``skipread`` key to a set of nodes
879 that encountered problems. If set, the method can also add the node(s)
879 that encountered problems. If set, the method can also add the node(s)
880 to ``safe_renamed`` in order to indicate nodes that may perform the
880 to ``safe_renamed`` in order to indicate nodes that may perform the
881 rename checks with currently accessible data.
881 rename checks with currently accessible data.
882
882
883 The method yields objects conforming to the ``iverifyproblem``
883 The method yields objects conforming to the ``iverifyproblem``
884 interface.
884 interface.
885 """
885 """
886
886
887
887
888 class idirs(interfaceutil.Interface):
888 class idirs(interfaceutil.Interface):
889 """Interface representing a collection of directories from paths.
889 """Interface representing a collection of directories from paths.
890
890
891 This interface is essentially a derived data structure representing
891 This interface is essentially a derived data structure representing
892 directories from a collection of paths.
892 directories from a collection of paths.
893 """
893 """
894
894
895 def addpath(path):
895 def addpath(path):
896 """Add a path to the collection.
896 """Add a path to the collection.
897
897
898 All directories in the path will be added to the collection.
898 All directories in the path will be added to the collection.
899 """
899 """
900
900
901 def delpath(path):
901 def delpath(path):
902 """Remove a path from the collection.
902 """Remove a path from the collection.
903
903
904 If the removal was the last path in a particular directory, the
904 If the removal was the last path in a particular directory, the
905 directory is removed from the collection.
905 directory is removed from the collection.
906 """
906 """
907
907
908 def __iter__():
908 def __iter__():
909 """Iterate over the directories in this collection of paths."""
909 """Iterate over the directories in this collection of paths."""
910
910
911 def __contains__(path):
911 def __contains__(path):
912 """Whether a specific directory is in this collection."""
912 """Whether a specific directory is in this collection."""
913
913
914
914
915 class imanifestdict(interfaceutil.Interface):
915 class imanifestdict(interfaceutil.Interface):
916 """Interface representing a manifest data structure.
916 """Interface representing a manifest data structure.
917
917
918 A manifest is effectively a dict mapping paths to entries. Each entry
918 A manifest is effectively a dict mapping paths to entries. Each entry
919 consists of a binary node and extra flags affecting that entry.
919 consists of a binary node and extra flags affecting that entry.
920 """
920 """
921
921
922 def __getitem__(path):
922 def __getitem__(path):
923 """Returns the binary node value for a path in the manifest.
923 """Returns the binary node value for a path in the manifest.
924
924
925 Raises ``KeyError`` if the path does not exist in the manifest.
925 Raises ``KeyError`` if the path does not exist in the manifest.
926
926
927 Equivalent to ``self.find(path)[0]``.
927 Equivalent to ``self.find(path)[0]``.
928 """
928 """
929
929
930 def find(path):
930 def find(path):
931 """Returns the entry for a path in the manifest.
931 """Returns the entry for a path in the manifest.
932
932
933 Returns a 2-tuple of (node, flags).
933 Returns a 2-tuple of (node, flags).
934
934
935 Raises ``KeyError`` if the path does not exist in the manifest.
935 Raises ``KeyError`` if the path does not exist in the manifest.
936 """
936 """
937
937
938 def __len__():
938 def __len__():
939 """Return the number of entries in the manifest."""
939 """Return the number of entries in the manifest."""
940
940
941 def __nonzero__():
941 def __nonzero__():
942 """Returns True if the manifest has entries, False otherwise."""
942 """Returns True if the manifest has entries, False otherwise."""
943
943
944 __bool__ = __nonzero__
944 __bool__ = __nonzero__
945
945
946 def __setitem__(path, node):
946 def __setitem__(path, node):
947 """Define the node value for a path in the manifest.
947 """Define the node value for a path in the manifest.
948
948
949 If the path is already in the manifest, its flags will be copied to
949 If the path is already in the manifest, its flags will be copied to
950 the new entry.
950 the new entry.
951 """
951 """
952
952
953 def __contains__(path):
953 def __contains__(path):
954 """Whether a path exists in the manifest."""
954 """Whether a path exists in the manifest."""
955
955
956 def __delitem__(path):
956 def __delitem__(path):
957 """Remove a path from the manifest.
957 """Remove a path from the manifest.
958
958
959 Raises ``KeyError`` if the path is not in the manifest.
959 Raises ``KeyError`` if the path is not in the manifest.
960 """
960 """
961
961
962 def __iter__():
962 def __iter__():
963 """Iterate over paths in the manifest."""
963 """Iterate over paths in the manifest."""
964
964
965 def iterkeys():
965 def iterkeys():
966 """Iterate over paths in the manifest."""
966 """Iterate over paths in the manifest."""
967
967
968 def keys():
968 def keys():
969 """Obtain a list of paths in the manifest."""
969 """Obtain a list of paths in the manifest."""
970
970
971 def filesnotin(other, match=None):
971 def filesnotin(other, match=None):
972 """Obtain the set of paths in this manifest but not in another.
972 """Obtain the set of paths in this manifest but not in another.
973
973
974 ``match`` is an optional matcher function to be applied to both
974 ``match`` is an optional matcher function to be applied to both
975 manifests.
975 manifests.
976
976
977 Returns a set of paths.
977 Returns a set of paths.
978 """
978 """
979
979
980 def dirs():
980 def dirs():
981 """Returns an object implementing the ``idirs`` interface."""
981 """Returns an object implementing the ``idirs`` interface."""
982
982
983 def hasdir(dir):
983 def hasdir(dir):
984 """Returns a bool indicating if a directory is in this manifest."""
984 """Returns a bool indicating if a directory is in this manifest."""
985
985
986 def walk(match):
986 def walk(match):
987 """Generator of paths in manifest satisfying a matcher.
987 """Generator of paths in manifest satisfying a matcher.
988
988
989 If the matcher has explicit files listed and they don't exist in
989 If the matcher has explicit files listed and they don't exist in
990 the manifest, ``match.bad()`` is called for each missing file.
990 the manifest, ``match.bad()`` is called for each missing file.
991 """
991 """
992
992
993 def diff(other, match=None, clean=False):
993 def diff(other, match=None, clean=False):
994 """Find differences between this manifest and another.
994 """Find differences between this manifest and another.
995
995
996 This manifest is compared to ``other``.
996 This manifest is compared to ``other``.
997
997
998 If ``match`` is provided, the two manifests are filtered against this
998 If ``match`` is provided, the two manifests are filtered against this
999 matcher and only entries satisfying the matcher are compared.
999 matcher and only entries satisfying the matcher are compared.
1000
1000
1001 If ``clean`` is True, unchanged files are included in the returned
1001 If ``clean`` is True, unchanged files are included in the returned
1002 object.
1002 object.
1003
1003
1004 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1004 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
1005 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1005 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
1006 represents the node and flags for this manifest and ``(node2, flag2)``
1006 represents the node and flags for this manifest and ``(node2, flag2)``
1007 are the same for the other manifest.
1007 are the same for the other manifest.
1008 """
1008 """
1009
1009
1010 def setflag(path, flag):
1010 def setflag(path, flag):
1011 """Set the flag value for a given path.
1011 """Set the flag value for a given path.
1012
1012
1013 Raises ``KeyError`` if the path is not already in the manifest.
1013 Raises ``KeyError`` if the path is not already in the manifest.
1014 """
1014 """
1015
1015
1016 def get(path, default=None):
1016 def get(path, default=None):
1017 """Obtain the node value for a path or a default value if missing."""
1017 """Obtain the node value for a path or a default value if missing."""
1018
1018
1019 def flags(path):
1019 def flags(path):
1020 """Return the flags value for a path (default: empty bytestring)."""
1020 """Return the flags value for a path (default: empty bytestring)."""
1021
1021
1022 def copy():
1022 def copy():
1023 """Return a copy of this manifest."""
1023 """Return a copy of this manifest."""
1024
1024
1025 def items():
1025 def items():
1026 """Returns an iterable of (path, node) for items in this manifest."""
1026 """Returns an iterable of (path, node) for items in this manifest."""
1027
1027
1028 def iteritems():
1028 def iteritems():
1029 """Identical to items()."""
1029 """Identical to items()."""
1030
1030
1031 def iterentries():
1031 def iterentries():
1032 """Returns an iterable of (path, node, flags) for this manifest.
1032 """Returns an iterable of (path, node, flags) for this manifest.
1033
1033
1034 Similar to ``iteritems()`` except items are a 3-tuple and include
1034 Similar to ``iteritems()`` except items are a 3-tuple and include
1035 flags.
1035 flags.
1036 """
1036 """
1037
1037
1038 def text():
1038 def text():
1039 """Obtain the raw data representation for this manifest.
1039 """Obtain the raw data representation for this manifest.
1040
1040
1041 Result is used to create a manifest revision.
1041 Result is used to create a manifest revision.
1042 """
1042 """
1043
1043
1044 def fastdelta(base, changes):
1044 def fastdelta(base, changes):
1045 """Obtain a delta between this manifest and another given changes.
1045 """Obtain a delta between this manifest and another given changes.
1046
1046
1047 ``base`` in the raw data representation for another manifest.
1047 ``base`` in the raw data representation for another manifest.
1048
1048
1049 ``changes`` is an iterable of ``(path, to_delete)``.
1049 ``changes`` is an iterable of ``(path, to_delete)``.
1050
1050
1051 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1051 Returns a 2-tuple containing ``bytearray(self.text())`` and the
1052 delta between ``base`` and this manifest.
1052 delta between ``base`` and this manifest.
1053
1053
1054 If this manifest implementation can't support ``fastdelta()``,
1054 If this manifest implementation can't support ``fastdelta()``,
1055 raise ``mercurial.manifest.FastdeltaUnavailable``.
1055 raise ``mercurial.manifest.FastdeltaUnavailable``.
1056 """
1056 """
1057
1057
1058
1058
1059 class imanifestrevisionbase(interfaceutil.Interface):
1059 class imanifestrevisionbase(interfaceutil.Interface):
1060 """Base interface representing a single revision of a manifest.
1060 """Base interface representing a single revision of a manifest.
1061
1061
1062 Should not be used as a primary interface: should always be inherited
1062 Should not be used as a primary interface: should always be inherited
1063 as part of a larger interface.
1063 as part of a larger interface.
1064 """
1064 """
1065
1065
1066 def copy():
1066 def copy():
1067 """Obtain a copy of this manifest instance.
1067 """Obtain a copy of this manifest instance.
1068
1068
1069 Returns an object conforming to the ``imanifestrevisionwritable``
1069 Returns an object conforming to the ``imanifestrevisionwritable``
1070 interface. The instance will be associated with the same
1070 interface. The instance will be associated with the same
1071 ``imanifestlog`` collection as this instance.
1071 ``imanifestlog`` collection as this instance.
1072 """
1072 """
1073
1073
1074 def read():
1074 def read():
1075 """Obtain the parsed manifest data structure.
1075 """Obtain the parsed manifest data structure.
1076
1076
1077 The returned object conforms to the ``imanifestdict`` interface.
1077 The returned object conforms to the ``imanifestdict`` interface.
1078 """
1078 """
1079
1079
1080
1080
1081 class imanifestrevisionstored(imanifestrevisionbase):
1081 class imanifestrevisionstored(imanifestrevisionbase):
1082 """Interface representing a manifest revision committed to storage."""
1082 """Interface representing a manifest revision committed to storage."""
1083
1083
1084 def node():
1084 def node():
1085 """The binary node for this manifest."""
1085 """The binary node for this manifest."""
1086
1086
1087 parents = interfaceutil.Attribute(
1087 parents = interfaceutil.Attribute(
1088 """List of binary nodes that are parents for this manifest revision."""
1088 """List of binary nodes that are parents for this manifest revision."""
1089 )
1089 )
1090
1090
1091 def readdelta(shallow=False):
1091 def readdelta(shallow=False):
1092 """Obtain the manifest data structure representing changes from parent.
1092 """Obtain the manifest data structure representing changes from parent.
1093
1093
1094 This manifest is compared to its 1st parent. A new manifest representing
1094 This manifest is compared to its 1st parent. A new manifest representing
1095 those differences is constructed.
1095 those differences is constructed.
1096
1096
1097 The returned object conforms to the ``imanifestdict`` interface.
1097 The returned object conforms to the ``imanifestdict`` interface.
1098 """
1098 """
1099
1099
1100 def readfast(shallow=False):
1100 def readfast(shallow=False):
1101 """Calls either ``read()`` or ``readdelta()``.
1101 """Calls either ``read()`` or ``readdelta()``.
1102
1102
1103 The faster of the two options is called.
1103 The faster of the two options is called.
1104 """
1104 """
1105
1105
1106 def find(key):
1106 def find(key):
1107 """Calls self.read().find(key)``.
1107 """Calls self.read().find(key)``.
1108
1108
1109 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1109 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1110 """
1110 """
1111
1111
1112
1112
1113 class imanifestrevisionwritable(imanifestrevisionbase):
1113 class imanifestrevisionwritable(imanifestrevisionbase):
1114 """Interface representing a manifest revision that can be committed."""
1114 """Interface representing a manifest revision that can be committed."""
1115
1115
1116 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1116 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1117 """Add this revision to storage.
1117 """Add this revision to storage.
1118
1118
1119 Takes a transaction object, the changeset revision number it will
1119 Takes a transaction object, the changeset revision number it will
1120 be associated with, its parent nodes, and lists of added and
1120 be associated with, its parent nodes, and lists of added and
1121 removed paths.
1121 removed paths.
1122
1122
1123 If match is provided, storage can choose not to inspect or write out
1123 If match is provided, storage can choose not to inspect or write out
1124 items that do not match. Storage is still required to be able to provide
1124 items that do not match. Storage is still required to be able to provide
1125 the full manifest in the future for any directories written (these
1125 the full manifest in the future for any directories written (these
1126 manifests should not be "narrowed on disk").
1126 manifests should not be "narrowed on disk").
1127
1127
1128 Returns the binary node of the created revision.
1128 Returns the binary node of the created revision.
1129 """
1129 """
1130
1130
1131
1131
1132 class imanifeststorage(interfaceutil.Interface):
1132 class imanifeststorage(interfaceutil.Interface):
1133 """Storage interface for manifest data."""
1133 """Storage interface for manifest data."""
1134
1134
1135 tree = interfaceutil.Attribute(
1135 tree = interfaceutil.Attribute(
1136 """The path to the directory this manifest tracks.
1136 """The path to the directory this manifest tracks.
1137
1137
1138 The empty bytestring represents the root manifest.
1138 The empty bytestring represents the root manifest.
1139 """
1139 """
1140 )
1140 )
1141
1141
1142 index = interfaceutil.Attribute(
1142 index = interfaceutil.Attribute(
1143 """An ``ifilerevisionssequence`` instance."""
1143 """An ``ifilerevisionssequence`` instance."""
1144 )
1144 )
1145
1145
1146 indexfile = interfaceutil.Attribute(
1146 indexfile = interfaceutil.Attribute(
1147 """Path of revlog index file.
1147 """Path of revlog index file.
1148
1148
1149 TODO this is revlog specific and should not be exposed.
1149 TODO this is revlog specific and should not be exposed.
1150 """
1150 """
1151 )
1151 )
1152
1152
1153 opener = interfaceutil.Attribute(
1153 opener = interfaceutil.Attribute(
1154 """VFS opener to use to access underlying files used for storage.
1154 """VFS opener to use to access underlying files used for storage.
1155
1155
1156 TODO this is revlog specific and should not be exposed.
1156 TODO this is revlog specific and should not be exposed.
1157 """
1157 """
1158 )
1158 )
1159
1159
1160 version = interfaceutil.Attribute(
1160 version = interfaceutil.Attribute(
1161 """Revlog version number.
1161 """Revlog version number.
1162
1162
1163 TODO this is revlog specific and should not be exposed.
1163 TODO this is revlog specific and should not be exposed.
1164 """
1164 """
1165 )
1165 )
1166
1166
1167 _generaldelta = interfaceutil.Attribute(
1167 _generaldelta = interfaceutil.Attribute(
1168 """Whether generaldelta storage is being used.
1168 """Whether generaldelta storage is being used.
1169
1169
1170 TODO this is revlog specific and should not be exposed.
1170 TODO this is revlog specific and should not be exposed.
1171 """
1171 """
1172 )
1172 )
1173
1173
1174 fulltextcache = interfaceutil.Attribute(
1174 fulltextcache = interfaceutil.Attribute(
1175 """Dict with cache of fulltexts.
1175 """Dict with cache of fulltexts.
1176
1176
1177 TODO this doesn't feel appropriate for the storage interface.
1177 TODO this doesn't feel appropriate for the storage interface.
1178 """
1178 """
1179 )
1179 )
1180
1180
1181 def __len__():
1181 def __len__():
1182 """Obtain the number of revisions stored for this manifest."""
1182 """Obtain the number of revisions stored for this manifest."""
1183
1183
1184 def __iter__():
1184 def __iter__():
1185 """Iterate over revision numbers for this manifest."""
1185 """Iterate over revision numbers for this manifest."""
1186
1186
1187 def rev(node):
1187 def rev(node):
1188 """Obtain the revision number given a binary node.
1188 """Obtain the revision number given a binary node.
1189
1189
1190 Raises ``error.LookupError`` if the node is not known.
1190 Raises ``error.LookupError`` if the node is not known.
1191 """
1191 """
1192
1192
1193 def node(rev):
1193 def node(rev):
1194 """Obtain the node value given a revision number.
1194 """Obtain the node value given a revision number.
1195
1195
1196 Raises ``error.LookupError`` if the revision is not known.
1196 Raises ``error.LookupError`` if the revision is not known.
1197 """
1197 """
1198
1198
1199 def lookup(value):
1199 def lookup(value):
1200 """Attempt to resolve a value to a node.
1200 """Attempt to resolve a value to a node.
1201
1201
1202 Value can be a binary node, hex node, revision number, or a bytes
1202 Value can be a binary node, hex node, revision number, or a bytes
1203 that can be converted to an integer.
1203 that can be converted to an integer.
1204
1204
1205 Raises ``error.LookupError`` if a ndoe could not be resolved.
1205 Raises ``error.LookupError`` if a ndoe could not be resolved.
1206 """
1206 """
1207
1207
1208 def parents(node):
1208 def parents(node):
1209 """Returns a 2-tuple of parent nodes for a node.
1209 """Returns a 2-tuple of parent nodes for a node.
1210
1210
1211 Values will be ``nullid`` if the parent is empty.
1211 Values will be ``nullid`` if the parent is empty.
1212 """
1212 """
1213
1213
1214 def parentrevs(rev):
1214 def parentrevs(rev):
1215 """Like parents() but operates on revision numbers."""
1215 """Like parents() but operates on revision numbers."""
1216
1216
1217 def linkrev(rev):
1217 def linkrev(rev):
1218 """Obtain the changeset revision number a revision is linked to."""
1218 """Obtain the changeset revision number a revision is linked to."""
1219
1219
1220 def revision(node, _df=None, raw=False):
1220 def revision(node, _df=None, raw=False):
1221 """Obtain fulltext data for a node."""
1221 """Obtain fulltext data for a node."""
1222
1222
1223 def rawdata(node, _df=None):
1223 def rawdata(node, _df=None):
1224 """Obtain raw data for a node."""
1224 """Obtain raw data for a node."""
1225
1225
1226 def revdiff(rev1, rev2):
1226 def revdiff(rev1, rev2):
1227 """Obtain a delta between two revision numbers.
1227 """Obtain a delta between two revision numbers.
1228
1228
1229 The returned data is the result of ``bdiff.bdiff()`` on the raw
1229 The returned data is the result of ``bdiff.bdiff()`` on the raw
1230 revision data.
1230 revision data.
1231 """
1231 """
1232
1232
1233 def cmp(node, fulltext):
1233 def cmp(node, fulltext):
1234 """Compare fulltext to another revision.
1234 """Compare fulltext to another revision.
1235
1235
1236 Returns True if the fulltext is different from what is stored.
1236 Returns True if the fulltext is different from what is stored.
1237 """
1237 """
1238
1238
1239 def emitrevisions(
1239 def emitrevisions(
1240 nodes,
1240 nodes,
1241 nodesorder=None,
1241 nodesorder=None,
1242 revisiondata=False,
1242 revisiondata=False,
1243 assumehaveparentrevisions=False,
1243 assumehaveparentrevisions=False,
1244 ):
1244 ):
1245 """Produce ``irevisiondelta`` describing revisions.
1245 """Produce ``irevisiondelta`` describing revisions.
1246
1246
1247 See the documentation for ``ifiledata`` for more.
1247 See the documentation for ``ifiledata`` for more.
1248 """
1248 """
1249
1249
1250 def addgroup(
1250 def addgroup(
1251 deltas,
1251 deltas,
1252 linkmapper,
1252 linkmapper,
1253 transaction,
1253 transaction,
1254 addrevisioncb=None,
1254 addrevisioncb=None,
1255 duplicaterevisioncb=None,
1255 duplicaterevisioncb=None,
1256 ):
1256 ):
1257 """Process a series of deltas for storage.
1257 """Process a series of deltas for storage.
1258
1258
1259 See the documentation in ``ifilemutation`` for more.
1259 See the documentation in ``ifilemutation`` for more.
1260 """
1260 """
1261
1261
1262 def rawsize(rev):
1262 def rawsize(rev):
1263 """Obtain the size of tracked data.
1263 """Obtain the size of tracked data.
1264
1264
1265 Is equivalent to ``len(m.rawdata(node))``.
1265 Is equivalent to ``len(m.rawdata(node))``.
1266
1266
1267 TODO this method is only used by upgrade code and may be removed.
1267 TODO this method is only used by upgrade code and may be removed.
1268 """
1268 """
1269
1269
1270 def getstrippoint(minlink):
1270 def getstrippoint(minlink):
1271 """Find minimum revision that must be stripped to strip a linkrev.
1271 """Find minimum revision that must be stripped to strip a linkrev.
1272
1272
1273 See the documentation in ``ifilemutation`` for more.
1273 See the documentation in ``ifilemutation`` for more.
1274 """
1274 """
1275
1275
1276 def strip(minlink, transaction):
1276 def strip(minlink, transaction):
1277 """Remove storage of items starting at a linkrev.
1277 """Remove storage of items starting at a linkrev.
1278
1278
1279 See the documentation in ``ifilemutation`` for more.
1279 See the documentation in ``ifilemutation`` for more.
1280 """
1280 """
1281
1281
1282 def checksize():
1282 def checksize():
1283 """Obtain the expected sizes of backing files.
1283 """Obtain the expected sizes of backing files.
1284
1284
1285 TODO this is used by verify and it should not be part of the interface.
1285 TODO this is used by verify and it should not be part of the interface.
1286 """
1286 """
1287
1287
1288 def files():
1288 def files():
1289 """Obtain paths that are backing storage for this manifest.
1289 """Obtain paths that are backing storage for this manifest.
1290
1290
1291 TODO this is used by verify and there should probably be a better API
1291 TODO this is used by verify and there should probably be a better API
1292 for this functionality.
1292 for this functionality.
1293 """
1293 """
1294
1294
1295 def deltaparent(rev):
1295 def deltaparent(rev):
1296 """Obtain the revision that a revision is delta'd against.
1296 """Obtain the revision that a revision is delta'd against.
1297
1297
1298 TODO delta encoding is an implementation detail of storage and should
1298 TODO delta encoding is an implementation detail of storage and should
1299 not be exposed to the storage interface.
1299 not be exposed to the storage interface.
1300 """
1300 """
1301
1301
1302 def clone(tr, dest, **kwargs):
1302 def clone(tr, dest, **kwargs):
1303 """Clone this instance to another."""
1303 """Clone this instance to another."""
1304
1304
1305 def clearcaches(clear_persisted_data=False):
1305 def clearcaches(clear_persisted_data=False):
1306 """Clear any caches associated with this instance."""
1306 """Clear any caches associated with this instance."""
1307
1307
1308 def dirlog(d):
1308 def dirlog(d):
1309 """Obtain a manifest storage instance for a tree."""
1309 """Obtain a manifest storage instance for a tree."""
1310
1310
1311 def add(
1311 def add(
1312 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1312 m, transaction, link, p1, p2, added, removed, readtree=None, match=None
1313 ):
1313 ):
1314 """Add a revision to storage.
1314 """Add a revision to storage.
1315
1315
1316 ``m`` is an object conforming to ``imanifestdict``.
1316 ``m`` is an object conforming to ``imanifestdict``.
1317
1317
1318 ``link`` is the linkrev revision number.
1318 ``link`` is the linkrev revision number.
1319
1319
1320 ``p1`` and ``p2`` are the parent revision numbers.
1320 ``p1`` and ``p2`` are the parent revision numbers.
1321
1321
1322 ``added`` and ``removed`` are iterables of added and removed paths,
1322 ``added`` and ``removed`` are iterables of added and removed paths,
1323 respectively.
1323 respectively.
1324
1324
1325 ``readtree`` is a function that can be used to read the child tree(s)
1325 ``readtree`` is a function that can be used to read the child tree(s)
1326 when recursively writing the full tree structure when using
1326 when recursively writing the full tree structure when using
1327 treemanifets.
1327 treemanifets.
1328
1328
1329 ``match`` is a matcher that can be used to hint to storage that not all
1329 ``match`` is a matcher that can be used to hint to storage that not all
1330 paths must be inspected; this is an optimization and can be safely
1330 paths must be inspected; this is an optimization and can be safely
1331 ignored. Note that the storage must still be able to reproduce a full
1331 ignored. Note that the storage must still be able to reproduce a full
1332 manifest including files that did not match.
1332 manifest including files that did not match.
1333 """
1333 """
1334
1334
1335 def storageinfo(
1335 def storageinfo(
1336 exclusivefiles=False,
1336 exclusivefiles=False,
1337 sharedfiles=False,
1337 sharedfiles=False,
1338 revisionscount=False,
1338 revisionscount=False,
1339 trackedsize=False,
1339 trackedsize=False,
1340 storedsize=False,
1340 storedsize=False,
1341 ):
1341 ):
1342 """Obtain information about storage for this manifest's data.
1342 """Obtain information about storage for this manifest's data.
1343
1343
1344 See ``ifilestorage.storageinfo()`` for a description of this method.
1344 See ``ifilestorage.storageinfo()`` for a description of this method.
1345 This one behaves the same way, except for manifest data.
1345 This one behaves the same way, except for manifest data.
1346 """
1346 """
1347
1347
1348
1348
1349 class imanifestlog(interfaceutil.Interface):
1349 class imanifestlog(interfaceutil.Interface):
1350 """Interface representing a collection of manifest snapshots.
1350 """Interface representing a collection of manifest snapshots.
1351
1351
1352 Represents the root manifest in a repository.
1352 Represents the root manifest in a repository.
1353
1353
1354 Also serves as a means to access nested tree manifests and to cache
1354 Also serves as a means to access nested tree manifests and to cache
1355 tree manifests.
1355 tree manifests.
1356 """
1356 """
1357
1357
1358 def __getitem__(node):
1358 def __getitem__(node):
1359 """Obtain a manifest instance for a given binary node.
1359 """Obtain a manifest instance for a given binary node.
1360
1360
1361 Equivalent to calling ``self.get('', node)``.
1361 Equivalent to calling ``self.get('', node)``.
1362
1362
1363 The returned object conforms to the ``imanifestrevisionstored``
1363 The returned object conforms to the ``imanifestrevisionstored``
1364 interface.
1364 interface.
1365 """
1365 """
1366
1366
1367 def get(tree, node, verify=True):
1367 def get(tree, node, verify=True):
1368 """Retrieve the manifest instance for a given directory and binary node.
1368 """Retrieve the manifest instance for a given directory and binary node.
1369
1369
1370 ``node`` always refers to the node of the root manifest (which will be
1370 ``node`` always refers to the node of the root manifest (which will be
1371 the only manifest if flat manifests are being used).
1371 the only manifest if flat manifests are being used).
1372
1372
1373 If ``tree`` is the empty string, the root manifest is returned.
1373 If ``tree`` is the empty string, the root manifest is returned.
1374 Otherwise the manifest for the specified directory will be returned
1374 Otherwise the manifest for the specified directory will be returned
1375 (requires tree manifests).
1375 (requires tree manifests).
1376
1376
1377 If ``verify`` is True, ``LookupError`` is raised if the node is not
1377 If ``verify`` is True, ``LookupError`` is raised if the node is not
1378 known.
1378 known.
1379
1379
1380 The returned object conforms to the ``imanifestrevisionstored``
1380 The returned object conforms to the ``imanifestrevisionstored``
1381 interface.
1381 interface.
1382 """
1382 """
1383
1383
1384 def getstorage(tree):
1384 def getstorage(tree):
1385 """Retrieve an interface to storage for a particular tree.
1385 """Retrieve an interface to storage for a particular tree.
1386
1386
1387 If ``tree`` is the empty bytestring, storage for the root manifest will
1387 If ``tree`` is the empty bytestring, storage for the root manifest will
1388 be returned. Otherwise storage for a tree manifest is returned.
1388 be returned. Otherwise storage for a tree manifest is returned.
1389
1389
1390 TODO formalize interface for returned object.
1390 TODO formalize interface for returned object.
1391 """
1391 """
1392
1392
1393 def clearcaches():
1393 def clearcaches():
1394 """Clear caches associated with this collection."""
1394 """Clear caches associated with this collection."""
1395
1395
1396 def rev(node):
1396 def rev(node):
1397 """Obtain the revision number for a binary node.
1397 """Obtain the revision number for a binary node.
1398
1398
1399 Raises ``error.LookupError`` if the node is not known.
1399 Raises ``error.LookupError`` if the node is not known.
1400 """
1400 """
1401
1401
1402 def update_caches(transaction):
1402 def update_caches(transaction):
1403 """update whatever cache are relevant for the used storage."""
1403 """update whatever cache are relevant for the used storage."""
1404
1404
1405
1405
1406 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1406 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1407 """Local repository sub-interface providing access to tracked file storage.
1407 """Local repository sub-interface providing access to tracked file storage.
1408
1408
1409 This interface defines how a repository accesses storage for a single
1409 This interface defines how a repository accesses storage for a single
1410 tracked file path.
1410 tracked file path.
1411 """
1411 """
1412
1412
1413 def file(f):
1413 def file(f):
1414 """Obtain a filelog for a tracked path.
1414 """Obtain a filelog for a tracked path.
1415
1415
1416 The returned type conforms to the ``ifilestorage`` interface.
1416 The returned type conforms to the ``ifilestorage`` interface.
1417 """
1417 """
1418
1418
1419
1419
1420 class ilocalrepositorymain(interfaceutil.Interface):
1420 class ilocalrepositorymain(interfaceutil.Interface):
1421 """Main interface for local repositories.
1421 """Main interface for local repositories.
1422
1422
1423 This currently captures the reality of things - not how things should be.
1423 This currently captures the reality of things - not how things should be.
1424 """
1424 """
1425
1425
1426 supportedformats = interfaceutil.Attribute(
1426 supportedformats = interfaceutil.Attribute(
1427 """Set of requirements that apply to stream clone.
1427 """Set of requirements that apply to stream clone.
1428
1428
1429 This is actually a class attribute and is shared among all instances.
1429 This is actually a class attribute and is shared among all instances.
1430 """
1430 """
1431 )
1431 )
1432
1432
1433 supported = interfaceutil.Attribute(
1433 supported = interfaceutil.Attribute(
1434 """Set of requirements that this repo is capable of opening."""
1434 """Set of requirements that this repo is capable of opening."""
1435 )
1435 )
1436
1436
1437 requirements = interfaceutil.Attribute(
1437 requirements = interfaceutil.Attribute(
1438 """Set of requirements this repo uses."""
1438 """Set of requirements this repo uses."""
1439 )
1439 )
1440
1440
1441 features = interfaceutil.Attribute(
1441 features = interfaceutil.Attribute(
1442 """Set of "features" this repository supports.
1442 """Set of "features" this repository supports.
1443
1443
1444 A "feature" is a loosely-defined term. It can refer to a feature
1444 A "feature" is a loosely-defined term. It can refer to a feature
1445 in the classical sense or can describe an implementation detail
1445 in the classical sense or can describe an implementation detail
1446 of the repository. For example, a ``readonly`` feature may denote
1446 of the repository. For example, a ``readonly`` feature may denote
1447 the repository as read-only. Or a ``revlogfilestore`` feature may
1447 the repository as read-only. Or a ``revlogfilestore`` feature may
1448 denote that the repository is using revlogs for file storage.
1448 denote that the repository is using revlogs for file storage.
1449
1449
1450 The intent of features is to provide a machine-queryable mechanism
1450 The intent of features is to provide a machine-queryable mechanism
1451 for repo consumers to test for various repository characteristics.
1451 for repo consumers to test for various repository characteristics.
1452
1452
1453 Features are similar to ``requirements``. The main difference is that
1453 Features are similar to ``requirements``. The main difference is that
1454 requirements are stored on-disk and represent requirements to open the
1454 requirements are stored on-disk and represent requirements to open the
1455 repository. Features are more run-time capabilities of the repository
1455 repository. Features are more run-time capabilities of the repository
1456 and more granular capabilities (which may be derived from requirements).
1456 and more granular capabilities (which may be derived from requirements).
1457 """
1457 """
1458 )
1458 )
1459
1459
1460 filtername = interfaceutil.Attribute(
1460 filtername = interfaceutil.Attribute(
1461 """Name of the repoview that is active on this repo."""
1461 """Name of the repoview that is active on this repo."""
1462 )
1462 )
1463
1463
1464 wvfs = interfaceutil.Attribute(
1464 wvfs = interfaceutil.Attribute(
1465 """VFS used to access the working directory."""
1465 """VFS used to access the working directory."""
1466 )
1466 )
1467
1467
1468 vfs = interfaceutil.Attribute(
1468 vfs = interfaceutil.Attribute(
1469 """VFS rooted at the .hg directory.
1469 """VFS rooted at the .hg directory.
1470
1470
1471 Used to access repository data not in the store.
1471 Used to access repository data not in the store.
1472 """
1472 """
1473 )
1473 )
1474
1474
1475 svfs = interfaceutil.Attribute(
1475 svfs = interfaceutil.Attribute(
1476 """VFS rooted at the store.
1476 """VFS rooted at the store.
1477
1477
1478 Used to access repository data in the store. Typically .hg/store.
1478 Used to access repository data in the store. Typically .hg/store.
1479 But can point elsewhere if the store is shared.
1479 But can point elsewhere if the store is shared.
1480 """
1480 """
1481 )
1481 )
1482
1482
1483 root = interfaceutil.Attribute(
1483 root = interfaceutil.Attribute(
1484 """Path to the root of the working directory."""
1484 """Path to the root of the working directory."""
1485 )
1485 )
1486
1486
1487 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1487 path = interfaceutil.Attribute("""Path to the .hg directory.""")
1488
1488
1489 origroot = interfaceutil.Attribute(
1489 origroot = interfaceutil.Attribute(
1490 """The filesystem path that was used to construct the repo."""
1490 """The filesystem path that was used to construct the repo."""
1491 )
1491 )
1492
1492
1493 auditor = interfaceutil.Attribute(
1493 auditor = interfaceutil.Attribute(
1494 """A pathauditor for the working directory.
1494 """A pathauditor for the working directory.
1495
1495
1496 This checks if a path refers to a nested repository.
1496 This checks if a path refers to a nested repository.
1497
1497
1498 Operates on the filesystem.
1498 Operates on the filesystem.
1499 """
1499 """
1500 )
1500 )
1501
1501
1502 nofsauditor = interfaceutil.Attribute(
1502 nofsauditor = interfaceutil.Attribute(
1503 """A pathauditor for the working directory.
1503 """A pathauditor for the working directory.
1504
1504
1505 This is like ``auditor`` except it doesn't do filesystem checks.
1505 This is like ``auditor`` except it doesn't do filesystem checks.
1506 """
1506 """
1507 )
1507 )
1508
1508
1509 baseui = interfaceutil.Attribute(
1509 baseui = interfaceutil.Attribute(
1510 """Original ui instance passed into constructor."""
1510 """Original ui instance passed into constructor."""
1511 )
1511 )
1512
1512
1513 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1513 ui = interfaceutil.Attribute("""Main ui instance for this instance.""")
1514
1514
1515 sharedpath = interfaceutil.Attribute(
1515 sharedpath = interfaceutil.Attribute(
1516 """Path to the .hg directory of the repo this repo was shared from."""
1516 """Path to the .hg directory of the repo this repo was shared from."""
1517 )
1517 )
1518
1518
1519 store = interfaceutil.Attribute("""A store instance.""")
1519 store = interfaceutil.Attribute("""A store instance.""")
1520
1520
1521 spath = interfaceutil.Attribute("""Path to the store.""")
1521 spath = interfaceutil.Attribute("""Path to the store.""")
1522
1522
1523 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1523 sjoin = interfaceutil.Attribute("""Alias to self.store.join.""")
1524
1524
1525 cachevfs = interfaceutil.Attribute(
1525 cachevfs = interfaceutil.Attribute(
1526 """A VFS used to access the cache directory.
1526 """A VFS used to access the cache directory.
1527
1527
1528 Typically .hg/cache.
1528 Typically .hg/cache.
1529 """
1529 """
1530 )
1530 )
1531
1531
1532 wcachevfs = interfaceutil.Attribute(
1532 wcachevfs = interfaceutil.Attribute(
1533 """A VFS used to access the cache directory dedicated to working copy
1533 """A VFS used to access the cache directory dedicated to working copy
1534
1534
1535 Typically .hg/wcache.
1535 Typically .hg/wcache.
1536 """
1536 """
1537 )
1537 )
1538
1538
1539 filteredrevcache = interfaceutil.Attribute(
1539 filteredrevcache = interfaceutil.Attribute(
1540 """Holds sets of revisions to be filtered."""
1540 """Holds sets of revisions to be filtered."""
1541 )
1541 )
1542
1542
1543 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1543 names = interfaceutil.Attribute("""A ``namespaces`` instance.""")
1544
1544
1545 filecopiesmode = interfaceutil.Attribute(
1545 filecopiesmode = interfaceutil.Attribute(
1546 """The way files copies should be dealt with in this repo."""
1546 """The way files copies should be dealt with in this repo."""
1547 )
1547 )
1548
1548
1549 def close():
1549 def close():
1550 """Close the handle on this repository."""
1550 """Close the handle on this repository."""
1551
1551
1552 def peer():
1552 def peer():
1553 """Obtain an object conforming to the ``peer`` interface."""
1553 """Obtain an object conforming to the ``peer`` interface."""
1554
1554
1555 def unfiltered():
1555 def unfiltered():
1556 """Obtain an unfiltered/raw view of this repo."""
1556 """Obtain an unfiltered/raw view of this repo."""
1557
1557
1558 def filtered(name, visibilityexceptions=None):
1558 def filtered(name, visibilityexceptions=None):
1559 """Obtain a named view of this repository."""
1559 """Obtain a named view of this repository."""
1560
1560
1561 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1561 obsstore = interfaceutil.Attribute("""A store of obsolescence data.""")
1562
1562
1563 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1563 changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""")
1564
1564
1565 manifestlog = interfaceutil.Attribute(
1565 manifestlog = interfaceutil.Attribute(
1566 """An instance conforming to the ``imanifestlog`` interface.
1566 """An instance conforming to the ``imanifestlog`` interface.
1567
1567
1568 Provides access to manifests for the repository.
1568 Provides access to manifests for the repository.
1569 """
1569 """
1570 )
1570 )
1571
1571
1572 dirstate = interfaceutil.Attribute("""Working directory state.""")
1572 dirstate = interfaceutil.Attribute("""Working directory state.""")
1573
1573
1574 narrowpats = interfaceutil.Attribute(
1574 narrowpats = interfaceutil.Attribute(
1575 """Matcher patterns for this repository's narrowspec."""
1575 """Matcher patterns for this repository's narrowspec."""
1576 )
1576 )
1577
1577
1578 def narrowmatch(match=None, includeexact=False):
1578 def narrowmatch(match=None, includeexact=False):
1579 """Obtain a matcher for the narrowspec."""
1579 """Obtain a matcher for the narrowspec."""
1580
1580
1581 def setnarrowpats(newincludes, newexcludes):
1581 def setnarrowpats(newincludes, newexcludes):
1582 """Define the narrowspec for this repository."""
1582 """Define the narrowspec for this repository."""
1583
1583
1584 def __getitem__(changeid):
1584 def __getitem__(changeid):
1585 """Try to resolve a changectx."""
1585 """Try to resolve a changectx."""
1586
1586
1587 def __contains__(changeid):
1587 def __contains__(changeid):
1588 """Whether a changeset exists."""
1588 """Whether a changeset exists."""
1589
1589
1590 def __nonzero__():
1590 def __nonzero__():
1591 """Always returns True."""
1591 """Always returns True."""
1592 return True
1592 return True
1593
1593
1594 __bool__ = __nonzero__
1594 __bool__ = __nonzero__
1595
1595
1596 def __len__():
1596 def __len__():
1597 """Returns the number of changesets in the repo."""
1597 """Returns the number of changesets in the repo."""
1598
1598
1599 def __iter__():
1599 def __iter__():
1600 """Iterate over revisions in the changelog."""
1600 """Iterate over revisions in the changelog."""
1601
1601
1602 def revs(expr, *args):
1602 def revs(expr, *args):
1603 """Evaluate a revset.
1603 """Evaluate a revset.
1604
1604
1605 Emits revisions.
1605 Emits revisions.
1606 """
1606 """
1607
1607
1608 def set(expr, *args):
1608 def set(expr, *args):
1609 """Evaluate a revset.
1609 """Evaluate a revset.
1610
1610
1611 Emits changectx instances.
1611 Emits changectx instances.
1612 """
1612 """
1613
1613
1614 def anyrevs(specs, user=False, localalias=None):
1614 def anyrevs(specs, user=False, localalias=None):
1615 """Find revisions matching one of the given revsets."""
1615 """Find revisions matching one of the given revsets."""
1616
1616
1617 def url():
1617 def url():
1618 """Returns a string representing the location of this repo."""
1618 """Returns a string representing the location of this repo."""
1619
1619
1620 def hook(name, throw=False, **args):
1620 def hook(name, throw=False, **args):
1621 """Call a hook."""
1621 """Call a hook."""
1622
1622
1623 def tags():
1623 def tags():
1624 """Return a mapping of tag to node."""
1624 """Return a mapping of tag to node."""
1625
1625
1626 def tagtype(tagname):
1626 def tagtype(tagname):
1627 """Return the type of a given tag."""
1627 """Return the type of a given tag."""
1628
1628
1629 def tagslist():
1629 def tagslist():
1630 """Return a list of tags ordered by revision."""
1630 """Return a list of tags ordered by revision."""
1631
1631
1632 def nodetags(node):
1632 def nodetags(node):
1633 """Return the tags associated with a node."""
1633 """Return the tags associated with a node."""
1634
1634
1635 def nodebookmarks(node):
1635 def nodebookmarks(node):
1636 """Return the list of bookmarks pointing to the specified node."""
1636 """Return the list of bookmarks pointing to the specified node."""
1637
1637
1638 def branchmap():
1638 def branchmap():
1639 """Return a mapping of branch to heads in that branch."""
1639 """Return a mapping of branch to heads in that branch."""
1640
1640
1641 def revbranchcache():
1641 def revbranchcache():
1642 pass
1642 pass
1643
1643
1644 def register_changeset(rev, changelogrevision):
1645 """Extension point for caches for new nodes.
1646
1647 Multiple consumers are expected to need parts of the changelogrevision,
1648 so it is provided as optimization to avoid duplicate lookups. A simple
1649 cache would be fragile when other revisions are accessed, too."""
1650 pass
1651
1644 def branchtip(branchtip, ignoremissing=False):
1652 def branchtip(branchtip, ignoremissing=False):
1645 """Return the tip node for a given branch."""
1653 """Return the tip node for a given branch."""
1646
1654
1647 def lookup(key):
1655 def lookup(key):
1648 """Resolve the node for a revision."""
1656 """Resolve the node for a revision."""
1649
1657
1650 def lookupbranch(key):
1658 def lookupbranch(key):
1651 """Look up the branch name of the given revision or branch name."""
1659 """Look up the branch name of the given revision or branch name."""
1652
1660
1653 def known(nodes):
1661 def known(nodes):
1654 """Determine whether a series of nodes is known.
1662 """Determine whether a series of nodes is known.
1655
1663
1656 Returns a list of bools.
1664 Returns a list of bools.
1657 """
1665 """
1658
1666
1659 def local():
1667 def local():
1660 """Whether the repository is local."""
1668 """Whether the repository is local."""
1661 return True
1669 return True
1662
1670
1663 def publishing():
1671 def publishing():
1664 """Whether the repository is a publishing repository."""
1672 """Whether the repository is a publishing repository."""
1665
1673
1666 def cancopy():
1674 def cancopy():
1667 pass
1675 pass
1668
1676
1669 def shared():
1677 def shared():
1670 """The type of shared repository or None."""
1678 """The type of shared repository or None."""
1671
1679
1672 def wjoin(f, *insidef):
1680 def wjoin(f, *insidef):
1673 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1681 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1674
1682
1675 def setparents(p1, p2):
1683 def setparents(p1, p2):
1676 """Set the parent nodes of the working directory."""
1684 """Set the parent nodes of the working directory."""
1677
1685
1678 def filectx(path, changeid=None, fileid=None):
1686 def filectx(path, changeid=None, fileid=None):
1679 """Obtain a filectx for the given file revision."""
1687 """Obtain a filectx for the given file revision."""
1680
1688
1681 def getcwd():
1689 def getcwd():
1682 """Obtain the current working directory from the dirstate."""
1690 """Obtain the current working directory from the dirstate."""
1683
1691
1684 def pathto(f, cwd=None):
1692 def pathto(f, cwd=None):
1685 """Obtain the relative path to a file."""
1693 """Obtain the relative path to a file."""
1686
1694
1687 def adddatafilter(name, fltr):
1695 def adddatafilter(name, fltr):
1688 pass
1696 pass
1689
1697
1690 def wread(filename):
1698 def wread(filename):
1691 """Read a file from wvfs, using data filters."""
1699 """Read a file from wvfs, using data filters."""
1692
1700
1693 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1701 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1694 """Write data to a file in the wvfs, using data filters."""
1702 """Write data to a file in the wvfs, using data filters."""
1695
1703
1696 def wwritedata(filename, data):
1704 def wwritedata(filename, data):
1697 """Resolve data for writing to the wvfs, using data filters."""
1705 """Resolve data for writing to the wvfs, using data filters."""
1698
1706
1699 def currenttransaction():
1707 def currenttransaction():
1700 """Obtain the current transaction instance or None."""
1708 """Obtain the current transaction instance or None."""
1701
1709
1702 def transaction(desc, report=None):
1710 def transaction(desc, report=None):
1703 """Open a new transaction to write to the repository."""
1711 """Open a new transaction to write to the repository."""
1704
1712
1705 def undofiles():
1713 def undofiles():
1706 """Returns a list of (vfs, path) for files to undo transactions."""
1714 """Returns a list of (vfs, path) for files to undo transactions."""
1707
1715
1708 def recover():
1716 def recover():
1709 """Roll back an interrupted transaction."""
1717 """Roll back an interrupted transaction."""
1710
1718
1711 def rollback(dryrun=False, force=False):
1719 def rollback(dryrun=False, force=False):
1712 """Undo the last transaction.
1720 """Undo the last transaction.
1713
1721
1714 DANGEROUS.
1722 DANGEROUS.
1715 """
1723 """
1716
1724
1717 def updatecaches(tr=None, full=False):
1725 def updatecaches(tr=None, full=False):
1718 """Warm repo caches."""
1726 """Warm repo caches."""
1719
1727
1720 def invalidatecaches():
1728 def invalidatecaches():
1721 """Invalidate cached data due to the repository mutating."""
1729 """Invalidate cached data due to the repository mutating."""
1722
1730
1723 def invalidatevolatilesets():
1731 def invalidatevolatilesets():
1724 pass
1732 pass
1725
1733
1726 def invalidatedirstate():
1734 def invalidatedirstate():
1727 """Invalidate the dirstate."""
1735 """Invalidate the dirstate."""
1728
1736
1729 def invalidate(clearfilecache=False):
1737 def invalidate(clearfilecache=False):
1730 pass
1738 pass
1731
1739
1732 def invalidateall():
1740 def invalidateall():
1733 pass
1741 pass
1734
1742
1735 def lock(wait=True):
1743 def lock(wait=True):
1736 """Lock the repository store and return a lock instance."""
1744 """Lock the repository store and return a lock instance."""
1737
1745
1738 def wlock(wait=True):
1746 def wlock(wait=True):
1739 """Lock the non-store parts of the repository."""
1747 """Lock the non-store parts of the repository."""
1740
1748
1741 def currentwlock():
1749 def currentwlock():
1742 """Return the wlock if it's held or None."""
1750 """Return the wlock if it's held or None."""
1743
1751
1744 def checkcommitpatterns(wctx, match, status, fail):
1752 def checkcommitpatterns(wctx, match, status, fail):
1745 pass
1753 pass
1746
1754
1747 def commit(
1755 def commit(
1748 text=b'',
1756 text=b'',
1749 user=None,
1757 user=None,
1750 date=None,
1758 date=None,
1751 match=None,
1759 match=None,
1752 force=False,
1760 force=False,
1753 editor=False,
1761 editor=False,
1754 extra=None,
1762 extra=None,
1755 ):
1763 ):
1756 """Add a new revision to the repository."""
1764 """Add a new revision to the repository."""
1757
1765
1758 def commitctx(ctx, error=False, origctx=None):
1766 def commitctx(ctx, error=False, origctx=None):
1759 """Commit a commitctx instance to the repository."""
1767 """Commit a commitctx instance to the repository."""
1760
1768
1761 def destroying():
1769 def destroying():
1762 """Inform the repository that nodes are about to be destroyed."""
1770 """Inform the repository that nodes are about to be destroyed."""
1763
1771
1764 def destroyed():
1772 def destroyed():
1765 """Inform the repository that nodes have been destroyed."""
1773 """Inform the repository that nodes have been destroyed."""
1766
1774
1767 def status(
1775 def status(
1768 node1=b'.',
1776 node1=b'.',
1769 node2=None,
1777 node2=None,
1770 match=None,
1778 match=None,
1771 ignored=False,
1779 ignored=False,
1772 clean=False,
1780 clean=False,
1773 unknown=False,
1781 unknown=False,
1774 listsubrepos=False,
1782 listsubrepos=False,
1775 ):
1783 ):
1776 """Convenience method to call repo[x].status()."""
1784 """Convenience method to call repo[x].status()."""
1777
1785
1778 def addpostdsstatus(ps):
1786 def addpostdsstatus(ps):
1779 pass
1787 pass
1780
1788
1781 def postdsstatus():
1789 def postdsstatus():
1782 pass
1790 pass
1783
1791
1784 def clearpostdsstatus():
1792 def clearpostdsstatus():
1785 pass
1793 pass
1786
1794
1787 def heads(start=None):
1795 def heads(start=None):
1788 """Obtain list of nodes that are DAG heads."""
1796 """Obtain list of nodes that are DAG heads."""
1789
1797
1790 def branchheads(branch=None, start=None, closed=False):
1798 def branchheads(branch=None, start=None, closed=False):
1791 pass
1799 pass
1792
1800
1793 def branches(nodes):
1801 def branches(nodes):
1794 pass
1802 pass
1795
1803
1796 def between(pairs):
1804 def between(pairs):
1797 pass
1805 pass
1798
1806
1799 def checkpush(pushop):
1807 def checkpush(pushop):
1800 pass
1808 pass
1801
1809
1802 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1810 prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""")
1803
1811
1804 def pushkey(namespace, key, old, new):
1812 def pushkey(namespace, key, old, new):
1805 pass
1813 pass
1806
1814
1807 def listkeys(namespace):
1815 def listkeys(namespace):
1808 pass
1816 pass
1809
1817
1810 def debugwireargs(one, two, three=None, four=None, five=None):
1818 def debugwireargs(one, two, three=None, four=None, five=None):
1811 pass
1819 pass
1812
1820
1813 def savecommitmessage(text):
1821 def savecommitmessage(text):
1814 pass
1822 pass
1815
1823
1816
1824
1817 class completelocalrepository(
1825 class completelocalrepository(
1818 ilocalrepositorymain, ilocalrepositoryfilestorage
1826 ilocalrepositorymain, ilocalrepositoryfilestorage
1819 ):
1827 ):
1820 """Complete interface for a local repository."""
1828 """Complete interface for a local repository."""
1821
1829
1822
1830
1823 class iwireprotocolcommandcacher(interfaceutil.Interface):
1831 class iwireprotocolcommandcacher(interfaceutil.Interface):
1824 """Represents a caching backend for wire protocol commands.
1832 """Represents a caching backend for wire protocol commands.
1825
1833
1826 Wire protocol version 2 supports transparent caching of many commands.
1834 Wire protocol version 2 supports transparent caching of many commands.
1827 To leverage this caching, servers can activate objects that cache
1835 To leverage this caching, servers can activate objects that cache
1828 command responses. Objects handle both cache writing and reading.
1836 command responses. Objects handle both cache writing and reading.
1829 This interface defines how that response caching mechanism works.
1837 This interface defines how that response caching mechanism works.
1830
1838
1831 Wire protocol version 2 commands emit a series of objects that are
1839 Wire protocol version 2 commands emit a series of objects that are
1832 serialized and sent to the client. The caching layer exists between
1840 serialized and sent to the client. The caching layer exists between
1833 the invocation of the command function and the sending of its output
1841 the invocation of the command function and the sending of its output
1834 objects to an output layer.
1842 objects to an output layer.
1835
1843
1836 Instances of this interface represent a binding to a cache that
1844 Instances of this interface represent a binding to a cache that
1837 can serve a response (in place of calling a command function) and/or
1845 can serve a response (in place of calling a command function) and/or
1838 write responses to a cache for subsequent use.
1846 write responses to a cache for subsequent use.
1839
1847
1840 When a command request arrives, the following happens with regards
1848 When a command request arrives, the following happens with regards
1841 to this interface:
1849 to this interface:
1842
1850
1843 1. The server determines whether the command request is cacheable.
1851 1. The server determines whether the command request is cacheable.
1844 2. If it is, an instance of this interface is spawned.
1852 2. If it is, an instance of this interface is spawned.
1845 3. The cacher is activated in a context manager (``__enter__`` is called).
1853 3. The cacher is activated in a context manager (``__enter__`` is called).
1846 4. A cache *key* for that request is derived. This will call the
1854 4. A cache *key* for that request is derived. This will call the
1847 instance's ``adjustcachekeystate()`` method so the derivation
1855 instance's ``adjustcachekeystate()`` method so the derivation
1848 can be influenced.
1856 can be influenced.
1849 5. The cacher is informed of the derived cache key via a call to
1857 5. The cacher is informed of the derived cache key via a call to
1850 ``setcachekey()``.
1858 ``setcachekey()``.
1851 6. The cacher's ``lookup()`` method is called to test for presence of
1859 6. The cacher's ``lookup()`` method is called to test for presence of
1852 the derived key in the cache.
1860 the derived key in the cache.
1853 7. If ``lookup()`` returns a hit, that cached result is used in place
1861 7. If ``lookup()`` returns a hit, that cached result is used in place
1854 of invoking the command function. ``__exit__`` is called and the instance
1862 of invoking the command function. ``__exit__`` is called and the instance
1855 is discarded.
1863 is discarded.
1856 8. The command function is invoked.
1864 8. The command function is invoked.
1857 9. ``onobject()`` is called for each object emitted by the command
1865 9. ``onobject()`` is called for each object emitted by the command
1858 function.
1866 function.
1859 10. After the final object is seen, ``onfinished()`` is called.
1867 10. After the final object is seen, ``onfinished()`` is called.
1860 11. ``__exit__`` is called to signal the end of use of the instance.
1868 11. ``__exit__`` is called to signal the end of use of the instance.
1861
1869
1862 Cache *key* derivation can be influenced by the instance.
1870 Cache *key* derivation can be influenced by the instance.
1863
1871
1864 Cache keys are initially derived by a deterministic representation of
1872 Cache keys are initially derived by a deterministic representation of
1865 the command request. This includes the command name, arguments, protocol
1873 the command request. This includes the command name, arguments, protocol
1866 version, etc. This initial key derivation is performed by CBOR-encoding a
1874 version, etc. This initial key derivation is performed by CBOR-encoding a
1867 data structure and feeding that output into a hasher.
1875 data structure and feeding that output into a hasher.
1868
1876
1869 Instances of this interface can influence this initial key derivation
1877 Instances of this interface can influence this initial key derivation
1870 via ``adjustcachekeystate()``.
1878 via ``adjustcachekeystate()``.
1871
1879
1872 The instance is informed of the derived cache key via a call to
1880 The instance is informed of the derived cache key via a call to
1873 ``setcachekey()``. The instance must store the key locally so it can
1881 ``setcachekey()``. The instance must store the key locally so it can
1874 be consulted on subsequent operations that may require it.
1882 be consulted on subsequent operations that may require it.
1875
1883
1876 When constructed, the instance has access to a callable that can be used
1884 When constructed, the instance has access to a callable that can be used
1877 for encoding response objects. This callable receives as its single
1885 for encoding response objects. This callable receives as its single
1878 argument an object emitted by a command function. It returns an iterable
1886 argument an object emitted by a command function. It returns an iterable
1879 of bytes chunks representing the encoded object. Unless the cacher is
1887 of bytes chunks representing the encoded object. Unless the cacher is
1880 caching native Python objects in memory or has a way of reconstructing
1888 caching native Python objects in memory or has a way of reconstructing
1881 the original Python objects, implementations typically call this function
1889 the original Python objects, implementations typically call this function
1882 to produce bytes from the output objects and then store those bytes in
1890 to produce bytes from the output objects and then store those bytes in
1883 the cache. When it comes time to re-emit those bytes, they are wrapped
1891 the cache. When it comes time to re-emit those bytes, they are wrapped
1884 in a ``wireprototypes.encodedresponse`` instance to tell the output
1892 in a ``wireprototypes.encodedresponse`` instance to tell the output
1885 layer that they are pre-encoded.
1893 layer that they are pre-encoded.
1886
1894
1887 When receiving the objects emitted by the command function, instances
1895 When receiving the objects emitted by the command function, instances
1888 can choose what to do with those objects. The simplest thing to do is
1896 can choose what to do with those objects. The simplest thing to do is
1889 re-emit the original objects. They will be forwarded to the output
1897 re-emit the original objects. They will be forwarded to the output
1890 layer and will be processed as if the cacher did not exist.
1898 layer and will be processed as if the cacher did not exist.
1891
1899
1892 Implementations could also choose to not emit objects - instead locally
1900 Implementations could also choose to not emit objects - instead locally
1893 buffering objects or their encoded representation. They could then emit
1901 buffering objects or their encoded representation. They could then emit
1894 a single "coalesced" object when ``onfinished()`` is called. In
1902 a single "coalesced" object when ``onfinished()`` is called. In
1895 this way, the implementation would function as a filtering layer of
1903 this way, the implementation would function as a filtering layer of
1896 sorts.
1904 sorts.
1897
1905
1898 When caching objects, typically the encoded form of the object will
1906 When caching objects, typically the encoded form of the object will
1899 be stored. Keep in mind that if the original object is forwarded to
1907 be stored. Keep in mind that if the original object is forwarded to
1900 the output layer, it will need to be encoded there as well. For large
1908 the output layer, it will need to be encoded there as well. For large
1901 output, this redundant encoding could add overhead. Implementations
1909 output, this redundant encoding could add overhead. Implementations
1902 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1910 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1903 instances to avoid this overhead.
1911 instances to avoid this overhead.
1904 """
1912 """
1905
1913
1906 def __enter__():
1914 def __enter__():
1907 """Marks the instance as active.
1915 """Marks the instance as active.
1908
1916
1909 Should return self.
1917 Should return self.
1910 """
1918 """
1911
1919
1912 def __exit__(exctype, excvalue, exctb):
1920 def __exit__(exctype, excvalue, exctb):
1913 """Called when cacher is no longer used.
1921 """Called when cacher is no longer used.
1914
1922
1915 This can be used by implementations to perform cleanup actions (e.g.
1923 This can be used by implementations to perform cleanup actions (e.g.
1916 disconnecting network sockets, aborting a partially cached response.
1924 disconnecting network sockets, aborting a partially cached response.
1917 """
1925 """
1918
1926
1919 def adjustcachekeystate(state):
1927 def adjustcachekeystate(state):
1920 """Influences cache key derivation by adjusting state to derive key.
1928 """Influences cache key derivation by adjusting state to derive key.
1921
1929
1922 A dict defining the state used to derive the cache key is passed.
1930 A dict defining the state used to derive the cache key is passed.
1923
1931
1924 Implementations can modify this dict to record additional state that
1932 Implementations can modify this dict to record additional state that
1925 is wanted to influence key derivation.
1933 is wanted to influence key derivation.
1926
1934
1927 Implementations are *highly* encouraged to not modify or delete
1935 Implementations are *highly* encouraged to not modify or delete
1928 existing keys.
1936 existing keys.
1929 """
1937 """
1930
1938
1931 def setcachekey(key):
1939 def setcachekey(key):
1932 """Record the derived cache key for this request.
1940 """Record the derived cache key for this request.
1933
1941
1934 Instances may mutate the key for internal usage, as desired. e.g.
1942 Instances may mutate the key for internal usage, as desired. e.g.
1935 instances may wish to prepend the repo name, introduce path
1943 instances may wish to prepend the repo name, introduce path
1936 components for filesystem or URL addressing, etc. Behavior is up to
1944 components for filesystem or URL addressing, etc. Behavior is up to
1937 the cache.
1945 the cache.
1938
1946
1939 Returns a bool indicating if the request is cacheable by this
1947 Returns a bool indicating if the request is cacheable by this
1940 instance.
1948 instance.
1941 """
1949 """
1942
1950
1943 def lookup():
1951 def lookup():
1944 """Attempt to resolve an entry in the cache.
1952 """Attempt to resolve an entry in the cache.
1945
1953
1946 The instance is instructed to look for the cache key that it was
1954 The instance is instructed to look for the cache key that it was
1947 informed about via the call to ``setcachekey()``.
1955 informed about via the call to ``setcachekey()``.
1948
1956
1949 If there's no cache hit or the cacher doesn't wish to use the cached
1957 If there's no cache hit or the cacher doesn't wish to use the cached
1950 entry, ``None`` should be returned.
1958 entry, ``None`` should be returned.
1951
1959
1952 Else, a dict defining the cached result should be returned. The
1960 Else, a dict defining the cached result should be returned. The
1953 dict may have the following keys:
1961 dict may have the following keys:
1954
1962
1955 objs
1963 objs
1956 An iterable of objects that should be sent to the client. That
1964 An iterable of objects that should be sent to the client. That
1957 iterable of objects is expected to be what the command function
1965 iterable of objects is expected to be what the command function
1958 would return if invoked or an equivalent representation thereof.
1966 would return if invoked or an equivalent representation thereof.
1959 """
1967 """
1960
1968
1961 def onobject(obj):
1969 def onobject(obj):
1962 """Called when a new object is emitted from the command function.
1970 """Called when a new object is emitted from the command function.
1963
1971
1964 Receives as its argument the object that was emitted from the
1972 Receives as its argument the object that was emitted from the
1965 command function.
1973 command function.
1966
1974
1967 This method returns an iterator of objects to forward to the output
1975 This method returns an iterator of objects to forward to the output
1968 layer. The easiest implementation is a generator that just
1976 layer. The easiest implementation is a generator that just
1969 ``yield obj``.
1977 ``yield obj``.
1970 """
1978 """
1971
1979
1972 def onfinished():
1980 def onfinished():
1973 """Called after all objects have been emitted from the command function.
1981 """Called after all objects have been emitted from the command function.
1974
1982
1975 Implementations should return an iterator of objects to forward to
1983 Implementations should return an iterator of objects to forward to
1976 the output layer.
1984 the output layer.
1977
1985
1978 This method can be a generator.
1986 This method can be a generator.
1979 """
1987 """
@@ -1,3692 +1,3695 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 )
74 )
75
75
76 from .interfaces import (
76 from .interfaces import (
77 repository,
77 repository,
78 util as interfaceutil,
78 util as interfaceutil,
79 )
79 )
80
80
81 from .utils import (
81 from .utils import (
82 hashutil,
82 hashutil,
83 procutil,
83 procutil,
84 stringutil,
84 stringutil,
85 )
85 )
86
86
87 from .revlogutils import constants as revlogconst
87 from .revlogutils import constants as revlogconst
88
88
89 release = lockmod.release
89 release = lockmod.release
90 urlerr = util.urlerr
90 urlerr = util.urlerr
91 urlreq = util.urlreq
91 urlreq = util.urlreq
92
92
93 # set of (path, vfs-location) tuples. vfs-location is:
93 # set of (path, vfs-location) tuples. vfs-location is:
94 # - 'plain for vfs relative paths
94 # - 'plain for vfs relative paths
95 # - '' for svfs relative paths
95 # - '' for svfs relative paths
96 _cachedfiles = set()
96 _cachedfiles = set()
97
97
98
98
99 class _basefilecache(scmutil.filecache):
99 class _basefilecache(scmutil.filecache):
100 """All filecache usage on repo are done for logic that should be unfiltered"""
100 """All filecache usage on repo are done for logic that should be unfiltered"""
101
101
102 def __get__(self, repo, type=None):
102 def __get__(self, repo, type=None):
103 if repo is None:
103 if repo is None:
104 return self
104 return self
105 # proxy to unfiltered __dict__ since filtered repo has no entry
105 # proxy to unfiltered __dict__ since filtered repo has no entry
106 unfi = repo.unfiltered()
106 unfi = repo.unfiltered()
107 try:
107 try:
108 return unfi.__dict__[self.sname]
108 return unfi.__dict__[self.sname]
109 except KeyError:
109 except KeyError:
110 pass
110 pass
111 return super(_basefilecache, self).__get__(unfi, type)
111 return super(_basefilecache, self).__get__(unfi, type)
112
112
113 def set(self, repo, value):
113 def set(self, repo, value):
114 return super(_basefilecache, self).set(repo.unfiltered(), value)
114 return super(_basefilecache, self).set(repo.unfiltered(), value)
115
115
116
116
117 class repofilecache(_basefilecache):
117 class repofilecache(_basefilecache):
118 """filecache for files in .hg but outside of .hg/store"""
118 """filecache for files in .hg but outside of .hg/store"""
119
119
120 def __init__(self, *paths):
120 def __init__(self, *paths):
121 super(repofilecache, self).__init__(*paths)
121 super(repofilecache, self).__init__(*paths)
122 for path in paths:
122 for path in paths:
123 _cachedfiles.add((path, b'plain'))
123 _cachedfiles.add((path, b'plain'))
124
124
125 def join(self, obj, fname):
125 def join(self, obj, fname):
126 return obj.vfs.join(fname)
126 return obj.vfs.join(fname)
127
127
128
128
129 class storecache(_basefilecache):
129 class storecache(_basefilecache):
130 """filecache for files in the store"""
130 """filecache for files in the store"""
131
131
132 def __init__(self, *paths):
132 def __init__(self, *paths):
133 super(storecache, self).__init__(*paths)
133 super(storecache, self).__init__(*paths)
134 for path in paths:
134 for path in paths:
135 _cachedfiles.add((path, b''))
135 _cachedfiles.add((path, b''))
136
136
137 def join(self, obj, fname):
137 def join(self, obj, fname):
138 return obj.sjoin(fname)
138 return obj.sjoin(fname)
139
139
140
140
141 class mixedrepostorecache(_basefilecache):
141 class mixedrepostorecache(_basefilecache):
142 """filecache for a mix files in .hg/store and outside"""
142 """filecache for a mix files in .hg/store and outside"""
143
143
144 def __init__(self, *pathsandlocations):
144 def __init__(self, *pathsandlocations):
145 # scmutil.filecache only uses the path for passing back into our
145 # scmutil.filecache only uses the path for passing back into our
146 # join(), so we can safely pass a list of paths and locations
146 # join(), so we can safely pass a list of paths and locations
147 super(mixedrepostorecache, self).__init__(*pathsandlocations)
147 super(mixedrepostorecache, self).__init__(*pathsandlocations)
148 _cachedfiles.update(pathsandlocations)
148 _cachedfiles.update(pathsandlocations)
149
149
150 def join(self, obj, fnameandlocation):
150 def join(self, obj, fnameandlocation):
151 fname, location = fnameandlocation
151 fname, location = fnameandlocation
152 if location == b'plain':
152 if location == b'plain':
153 return obj.vfs.join(fname)
153 return obj.vfs.join(fname)
154 else:
154 else:
155 if location != b'':
155 if location != b'':
156 raise error.ProgrammingError(
156 raise error.ProgrammingError(
157 b'unexpected location: %s' % location
157 b'unexpected location: %s' % location
158 )
158 )
159 return obj.sjoin(fname)
159 return obj.sjoin(fname)
160
160
161
161
162 def isfilecached(repo, name):
162 def isfilecached(repo, name):
163 """check if a repo has already cached "name" filecache-ed property
163 """check if a repo has already cached "name" filecache-ed property
164
164
165 This returns (cachedobj-or-None, iscached) tuple.
165 This returns (cachedobj-or-None, iscached) tuple.
166 """
166 """
167 cacheentry = repo.unfiltered()._filecache.get(name, None)
167 cacheentry = repo.unfiltered()._filecache.get(name, None)
168 if not cacheentry:
168 if not cacheentry:
169 return None, False
169 return None, False
170 return cacheentry.obj, True
170 return cacheentry.obj, True
171
171
172
172
173 class unfilteredpropertycache(util.propertycache):
173 class unfilteredpropertycache(util.propertycache):
174 """propertycache that apply to unfiltered repo only"""
174 """propertycache that apply to unfiltered repo only"""
175
175
176 def __get__(self, repo, type=None):
176 def __get__(self, repo, type=None):
177 unfi = repo.unfiltered()
177 unfi = repo.unfiltered()
178 if unfi is repo:
178 if unfi is repo:
179 return super(unfilteredpropertycache, self).__get__(unfi)
179 return super(unfilteredpropertycache, self).__get__(unfi)
180 return getattr(unfi, self.name)
180 return getattr(unfi, self.name)
181
181
182
182
183 class filteredpropertycache(util.propertycache):
183 class filteredpropertycache(util.propertycache):
184 """propertycache that must take filtering in account"""
184 """propertycache that must take filtering in account"""
185
185
186 def cachevalue(self, obj, value):
186 def cachevalue(self, obj, value):
187 object.__setattr__(obj, self.name, value)
187 object.__setattr__(obj, self.name, value)
188
188
189
189
190 def hasunfilteredcache(repo, name):
190 def hasunfilteredcache(repo, name):
191 """check if a repo has an unfilteredpropertycache value for <name>"""
191 """check if a repo has an unfilteredpropertycache value for <name>"""
192 return name in vars(repo.unfiltered())
192 return name in vars(repo.unfiltered())
193
193
194
194
195 def unfilteredmethod(orig):
195 def unfilteredmethod(orig):
196 """decorate method that always need to be run on unfiltered version"""
196 """decorate method that always need to be run on unfiltered version"""
197
197
198 @functools.wraps(orig)
198 @functools.wraps(orig)
199 def wrapper(repo, *args, **kwargs):
199 def wrapper(repo, *args, **kwargs):
200 return orig(repo.unfiltered(), *args, **kwargs)
200 return orig(repo.unfiltered(), *args, **kwargs)
201
201
202 return wrapper
202 return wrapper
203
203
204
204
205 moderncaps = {
205 moderncaps = {
206 b'lookup',
206 b'lookup',
207 b'branchmap',
207 b'branchmap',
208 b'pushkey',
208 b'pushkey',
209 b'known',
209 b'known',
210 b'getbundle',
210 b'getbundle',
211 b'unbundle',
211 b'unbundle',
212 }
212 }
213 legacycaps = moderncaps.union({b'changegroupsubset'})
213 legacycaps = moderncaps.union({b'changegroupsubset'})
214
214
215
215
216 @interfaceutil.implementer(repository.ipeercommandexecutor)
216 @interfaceutil.implementer(repository.ipeercommandexecutor)
217 class localcommandexecutor(object):
217 class localcommandexecutor(object):
218 def __init__(self, peer):
218 def __init__(self, peer):
219 self._peer = peer
219 self._peer = peer
220 self._sent = False
220 self._sent = False
221 self._closed = False
221 self._closed = False
222
222
223 def __enter__(self):
223 def __enter__(self):
224 return self
224 return self
225
225
226 def __exit__(self, exctype, excvalue, exctb):
226 def __exit__(self, exctype, excvalue, exctb):
227 self.close()
227 self.close()
228
228
229 def callcommand(self, command, args):
229 def callcommand(self, command, args):
230 if self._sent:
230 if self._sent:
231 raise error.ProgrammingError(
231 raise error.ProgrammingError(
232 b'callcommand() cannot be used after sendcommands()'
232 b'callcommand() cannot be used after sendcommands()'
233 )
233 )
234
234
235 if self._closed:
235 if self._closed:
236 raise error.ProgrammingError(
236 raise error.ProgrammingError(
237 b'callcommand() cannot be used after close()'
237 b'callcommand() cannot be used after close()'
238 )
238 )
239
239
240 # We don't need to support anything fancy. Just call the named
240 # We don't need to support anything fancy. Just call the named
241 # method on the peer and return a resolved future.
241 # method on the peer and return a resolved future.
242 fn = getattr(self._peer, pycompat.sysstr(command))
242 fn = getattr(self._peer, pycompat.sysstr(command))
243
243
244 f = pycompat.futures.Future()
244 f = pycompat.futures.Future()
245
245
246 try:
246 try:
247 result = fn(**pycompat.strkwargs(args))
247 result = fn(**pycompat.strkwargs(args))
248 except Exception:
248 except Exception:
249 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
249 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
250 else:
250 else:
251 f.set_result(result)
251 f.set_result(result)
252
252
253 return f
253 return f
254
254
255 def sendcommands(self):
255 def sendcommands(self):
256 self._sent = True
256 self._sent = True
257
257
258 def close(self):
258 def close(self):
259 self._closed = True
259 self._closed = True
260
260
261
261
262 @interfaceutil.implementer(repository.ipeercommands)
262 @interfaceutil.implementer(repository.ipeercommands)
263 class localpeer(repository.peer):
263 class localpeer(repository.peer):
264 '''peer for a local repo; reflects only the most recent API'''
264 '''peer for a local repo; reflects only the most recent API'''
265
265
266 def __init__(self, repo, caps=None):
266 def __init__(self, repo, caps=None):
267 super(localpeer, self).__init__()
267 super(localpeer, self).__init__()
268
268
269 if caps is None:
269 if caps is None:
270 caps = moderncaps.copy()
270 caps = moderncaps.copy()
271 self._repo = repo.filtered(b'served')
271 self._repo = repo.filtered(b'served')
272 self.ui = repo.ui
272 self.ui = repo.ui
273 self._caps = repo._restrictcapabilities(caps)
273 self._caps = repo._restrictcapabilities(caps)
274
274
275 # Begin of _basepeer interface.
275 # Begin of _basepeer interface.
276
276
277 def url(self):
277 def url(self):
278 return self._repo.url()
278 return self._repo.url()
279
279
280 def local(self):
280 def local(self):
281 return self._repo
281 return self._repo
282
282
283 def peer(self):
283 def peer(self):
284 return self
284 return self
285
285
286 def canpush(self):
286 def canpush(self):
287 return True
287 return True
288
288
289 def close(self):
289 def close(self):
290 self._repo.close()
290 self._repo.close()
291
291
292 # End of _basepeer interface.
292 # End of _basepeer interface.
293
293
294 # Begin of _basewirecommands interface.
294 # Begin of _basewirecommands interface.
295
295
296 def branchmap(self):
296 def branchmap(self):
297 return self._repo.branchmap()
297 return self._repo.branchmap()
298
298
299 def capabilities(self):
299 def capabilities(self):
300 return self._caps
300 return self._caps
301
301
302 def clonebundles(self):
302 def clonebundles(self):
303 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
303 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
304
304
305 def debugwireargs(self, one, two, three=None, four=None, five=None):
305 def debugwireargs(self, one, two, three=None, four=None, five=None):
306 """Used to test argument passing over the wire"""
306 """Used to test argument passing over the wire"""
307 return b"%s %s %s %s %s" % (
307 return b"%s %s %s %s %s" % (
308 one,
308 one,
309 two,
309 two,
310 pycompat.bytestr(three),
310 pycompat.bytestr(three),
311 pycompat.bytestr(four),
311 pycompat.bytestr(four),
312 pycompat.bytestr(five),
312 pycompat.bytestr(five),
313 )
313 )
314
314
315 def getbundle(
315 def getbundle(
316 self, source, heads=None, common=None, bundlecaps=None, **kwargs
316 self, source, heads=None, common=None, bundlecaps=None, **kwargs
317 ):
317 ):
318 chunks = exchange.getbundlechunks(
318 chunks = exchange.getbundlechunks(
319 self._repo,
319 self._repo,
320 source,
320 source,
321 heads=heads,
321 heads=heads,
322 common=common,
322 common=common,
323 bundlecaps=bundlecaps,
323 bundlecaps=bundlecaps,
324 **kwargs
324 **kwargs
325 )[1]
325 )[1]
326 cb = util.chunkbuffer(chunks)
326 cb = util.chunkbuffer(chunks)
327
327
328 if exchange.bundle2requested(bundlecaps):
328 if exchange.bundle2requested(bundlecaps):
329 # When requesting a bundle2, getbundle returns a stream to make the
329 # When requesting a bundle2, getbundle returns a stream to make the
330 # wire level function happier. We need to build a proper object
330 # wire level function happier. We need to build a proper object
331 # from it in local peer.
331 # from it in local peer.
332 return bundle2.getunbundler(self.ui, cb)
332 return bundle2.getunbundler(self.ui, cb)
333 else:
333 else:
334 return changegroup.getunbundler(b'01', cb, None)
334 return changegroup.getunbundler(b'01', cb, None)
335
335
336 def heads(self):
336 def heads(self):
337 return self._repo.heads()
337 return self._repo.heads()
338
338
339 def known(self, nodes):
339 def known(self, nodes):
340 return self._repo.known(nodes)
340 return self._repo.known(nodes)
341
341
342 def listkeys(self, namespace):
342 def listkeys(self, namespace):
343 return self._repo.listkeys(namespace)
343 return self._repo.listkeys(namespace)
344
344
345 def lookup(self, key):
345 def lookup(self, key):
346 return self._repo.lookup(key)
346 return self._repo.lookup(key)
347
347
348 def pushkey(self, namespace, key, old, new):
348 def pushkey(self, namespace, key, old, new):
349 return self._repo.pushkey(namespace, key, old, new)
349 return self._repo.pushkey(namespace, key, old, new)
350
350
351 def stream_out(self):
351 def stream_out(self):
352 raise error.Abort(_(b'cannot perform stream clone against local peer'))
352 raise error.Abort(_(b'cannot perform stream clone against local peer'))
353
353
354 def unbundle(self, bundle, heads, url):
354 def unbundle(self, bundle, heads, url):
355 """apply a bundle on a repo
355 """apply a bundle on a repo
356
356
357 This function handles the repo locking itself."""
357 This function handles the repo locking itself."""
358 try:
358 try:
359 try:
359 try:
360 bundle = exchange.readbundle(self.ui, bundle, None)
360 bundle = exchange.readbundle(self.ui, bundle, None)
361 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
361 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
362 if util.safehasattr(ret, b'getchunks'):
362 if util.safehasattr(ret, b'getchunks'):
363 # This is a bundle20 object, turn it into an unbundler.
363 # This is a bundle20 object, turn it into an unbundler.
364 # This little dance should be dropped eventually when the
364 # This little dance should be dropped eventually when the
365 # API is finally improved.
365 # API is finally improved.
366 stream = util.chunkbuffer(ret.getchunks())
366 stream = util.chunkbuffer(ret.getchunks())
367 ret = bundle2.getunbundler(self.ui, stream)
367 ret = bundle2.getunbundler(self.ui, stream)
368 return ret
368 return ret
369 except Exception as exc:
369 except Exception as exc:
370 # If the exception contains output salvaged from a bundle2
370 # If the exception contains output salvaged from a bundle2
371 # reply, we need to make sure it is printed before continuing
371 # reply, we need to make sure it is printed before continuing
372 # to fail. So we build a bundle2 with such output and consume
372 # to fail. So we build a bundle2 with such output and consume
373 # it directly.
373 # it directly.
374 #
374 #
375 # This is not very elegant but allows a "simple" solution for
375 # This is not very elegant but allows a "simple" solution for
376 # issue4594
376 # issue4594
377 output = getattr(exc, '_bundle2salvagedoutput', ())
377 output = getattr(exc, '_bundle2salvagedoutput', ())
378 if output:
378 if output:
379 bundler = bundle2.bundle20(self._repo.ui)
379 bundler = bundle2.bundle20(self._repo.ui)
380 for out in output:
380 for out in output:
381 bundler.addpart(out)
381 bundler.addpart(out)
382 stream = util.chunkbuffer(bundler.getchunks())
382 stream = util.chunkbuffer(bundler.getchunks())
383 b = bundle2.getunbundler(self.ui, stream)
383 b = bundle2.getunbundler(self.ui, stream)
384 bundle2.processbundle(self._repo, b)
384 bundle2.processbundle(self._repo, b)
385 raise
385 raise
386 except error.PushRaced as exc:
386 except error.PushRaced as exc:
387 raise error.ResponseError(
387 raise error.ResponseError(
388 _(b'push failed:'), stringutil.forcebytestr(exc)
388 _(b'push failed:'), stringutil.forcebytestr(exc)
389 )
389 )
390
390
391 # End of _basewirecommands interface.
391 # End of _basewirecommands interface.
392
392
393 # Begin of peer interface.
393 # Begin of peer interface.
394
394
395 def commandexecutor(self):
395 def commandexecutor(self):
396 return localcommandexecutor(self)
396 return localcommandexecutor(self)
397
397
398 # End of peer interface.
398 # End of peer interface.
399
399
400
400
401 @interfaceutil.implementer(repository.ipeerlegacycommands)
401 @interfaceutil.implementer(repository.ipeerlegacycommands)
402 class locallegacypeer(localpeer):
402 class locallegacypeer(localpeer):
403 """peer extension which implements legacy methods too; used for tests with
403 """peer extension which implements legacy methods too; used for tests with
404 restricted capabilities"""
404 restricted capabilities"""
405
405
406 def __init__(self, repo):
406 def __init__(self, repo):
407 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
407 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
408
408
409 # Begin of baselegacywirecommands interface.
409 # Begin of baselegacywirecommands interface.
410
410
411 def between(self, pairs):
411 def between(self, pairs):
412 return self._repo.between(pairs)
412 return self._repo.between(pairs)
413
413
414 def branches(self, nodes):
414 def branches(self, nodes):
415 return self._repo.branches(nodes)
415 return self._repo.branches(nodes)
416
416
417 def changegroup(self, nodes, source):
417 def changegroup(self, nodes, source):
418 outgoing = discovery.outgoing(
418 outgoing = discovery.outgoing(
419 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
419 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
420 )
420 )
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422
422
423 def changegroupsubset(self, bases, heads, source):
423 def changegroupsubset(self, bases, heads, source):
424 outgoing = discovery.outgoing(
424 outgoing = discovery.outgoing(
425 self._repo, missingroots=bases, ancestorsof=heads
425 self._repo, missingroots=bases, ancestorsof=heads
426 )
426 )
427 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
427 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
428
428
429 # End of baselegacywirecommands interface.
429 # End of baselegacywirecommands interface.
430
430
431
431
432 # Functions receiving (ui, features) that extensions can register to impact
432 # Functions receiving (ui, features) that extensions can register to impact
433 # the ability to load repositories with custom requirements. Only
433 # the ability to load repositories with custom requirements. Only
434 # functions defined in loaded extensions are called.
434 # functions defined in loaded extensions are called.
435 #
435 #
436 # The function receives a set of requirement strings that the repository
436 # The function receives a set of requirement strings that the repository
437 # is capable of opening. Functions will typically add elements to the
437 # is capable of opening. Functions will typically add elements to the
438 # set to reflect that the extension knows how to handle that requirements.
438 # set to reflect that the extension knows how to handle that requirements.
439 featuresetupfuncs = set()
439 featuresetupfuncs = set()
440
440
441
441
442 def _getsharedvfs(hgvfs, requirements):
442 def _getsharedvfs(hgvfs, requirements):
443 """returns the vfs object pointing to root of shared source
443 """returns the vfs object pointing to root of shared source
444 repo for a shared repository
444 repo for a shared repository
445
445
446 hgvfs is vfs pointing at .hg/ of current repo (shared one)
446 hgvfs is vfs pointing at .hg/ of current repo (shared one)
447 requirements is a set of requirements of current repo (shared one)
447 requirements is a set of requirements of current repo (shared one)
448 """
448 """
449 # The ``shared`` or ``relshared`` requirements indicate the
449 # The ``shared`` or ``relshared`` requirements indicate the
450 # store lives in the path contained in the ``.hg/sharedpath`` file.
450 # store lives in the path contained in the ``.hg/sharedpath`` file.
451 # This is an absolute path for ``shared`` and relative to
451 # This is an absolute path for ``shared`` and relative to
452 # ``.hg/`` for ``relshared``.
452 # ``.hg/`` for ``relshared``.
453 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
453 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
454 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
454 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
455 sharedpath = hgvfs.join(sharedpath)
455 sharedpath = hgvfs.join(sharedpath)
456
456
457 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
457 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
458
458
459 if not sharedvfs.exists():
459 if not sharedvfs.exists():
460 raise error.RepoError(
460 raise error.RepoError(
461 _(b'.hg/sharedpath points to nonexistent directory %s')
461 _(b'.hg/sharedpath points to nonexistent directory %s')
462 % sharedvfs.base
462 % sharedvfs.base
463 )
463 )
464 return sharedvfs
464 return sharedvfs
465
465
466
466
467 def _readrequires(vfs, allowmissing):
467 def _readrequires(vfs, allowmissing):
468 """reads the require file present at root of this vfs
468 """reads the require file present at root of this vfs
469 and return a set of requirements
469 and return a set of requirements
470
470
471 If allowmissing is True, we suppress ENOENT if raised"""
471 If allowmissing is True, we suppress ENOENT if raised"""
472 # requires file contains a newline-delimited list of
472 # requires file contains a newline-delimited list of
473 # features/capabilities the opener (us) must have in order to use
473 # features/capabilities the opener (us) must have in order to use
474 # the repository. This file was introduced in Mercurial 0.9.2,
474 # the repository. This file was introduced in Mercurial 0.9.2,
475 # which means very old repositories may not have one. We assume
475 # which means very old repositories may not have one. We assume
476 # a missing file translates to no requirements.
476 # a missing file translates to no requirements.
477 try:
477 try:
478 requirements = set(vfs.read(b'requires').splitlines())
478 requirements = set(vfs.read(b'requires').splitlines())
479 except IOError as e:
479 except IOError as e:
480 if not (allowmissing and e.errno == errno.ENOENT):
480 if not (allowmissing and e.errno == errno.ENOENT):
481 raise
481 raise
482 requirements = set()
482 requirements = set()
483 return requirements
483 return requirements
484
484
485
485
486 def makelocalrepository(baseui, path, intents=None):
486 def makelocalrepository(baseui, path, intents=None):
487 """Create a local repository object.
487 """Create a local repository object.
488
488
489 Given arguments needed to construct a local repository, this function
489 Given arguments needed to construct a local repository, this function
490 performs various early repository loading functionality (such as
490 performs various early repository loading functionality (such as
491 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
491 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
492 the repository can be opened, derives a type suitable for representing
492 the repository can be opened, derives a type suitable for representing
493 that repository, and returns an instance of it.
493 that repository, and returns an instance of it.
494
494
495 The returned object conforms to the ``repository.completelocalrepository``
495 The returned object conforms to the ``repository.completelocalrepository``
496 interface.
496 interface.
497
497
498 The repository type is derived by calling a series of factory functions
498 The repository type is derived by calling a series of factory functions
499 for each aspect/interface of the final repository. These are defined by
499 for each aspect/interface of the final repository. These are defined by
500 ``REPO_INTERFACES``.
500 ``REPO_INTERFACES``.
501
501
502 Each factory function is called to produce a type implementing a specific
502 Each factory function is called to produce a type implementing a specific
503 interface. The cumulative list of returned types will be combined into a
503 interface. The cumulative list of returned types will be combined into a
504 new type and that type will be instantiated to represent the local
504 new type and that type will be instantiated to represent the local
505 repository.
505 repository.
506
506
507 The factory functions each receive various state that may be consulted
507 The factory functions each receive various state that may be consulted
508 as part of deriving a type.
508 as part of deriving a type.
509
509
510 Extensions should wrap these factory functions to customize repository type
510 Extensions should wrap these factory functions to customize repository type
511 creation. Note that an extension's wrapped function may be called even if
511 creation. Note that an extension's wrapped function may be called even if
512 that extension is not loaded for the repo being constructed. Extensions
512 that extension is not loaded for the repo being constructed. Extensions
513 should check if their ``__name__`` appears in the
513 should check if their ``__name__`` appears in the
514 ``extensionmodulenames`` set passed to the factory function and no-op if
514 ``extensionmodulenames`` set passed to the factory function and no-op if
515 not.
515 not.
516 """
516 """
517 ui = baseui.copy()
517 ui = baseui.copy()
518 # Prevent copying repo configuration.
518 # Prevent copying repo configuration.
519 ui.copy = baseui.copy
519 ui.copy = baseui.copy
520
520
521 # Working directory VFS rooted at repository root.
521 # Working directory VFS rooted at repository root.
522 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
522 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
523
523
524 # Main VFS for .hg/ directory.
524 # Main VFS for .hg/ directory.
525 hgpath = wdirvfs.join(b'.hg')
525 hgpath = wdirvfs.join(b'.hg')
526 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
526 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
527 # Whether this repository is shared one or not
527 # Whether this repository is shared one or not
528 shared = False
528 shared = False
529 # If this repository is shared, vfs pointing to shared repo
529 # If this repository is shared, vfs pointing to shared repo
530 sharedvfs = None
530 sharedvfs = None
531
531
532 # The .hg/ path should exist and should be a directory. All other
532 # The .hg/ path should exist and should be a directory. All other
533 # cases are errors.
533 # cases are errors.
534 if not hgvfs.isdir():
534 if not hgvfs.isdir():
535 try:
535 try:
536 hgvfs.stat()
536 hgvfs.stat()
537 except OSError as e:
537 except OSError as e:
538 if e.errno != errno.ENOENT:
538 if e.errno != errno.ENOENT:
539 raise
539 raise
540 except ValueError as e:
540 except ValueError as e:
541 # Can be raised on Python 3.8 when path is invalid.
541 # Can be raised on Python 3.8 when path is invalid.
542 raise error.Abort(
542 raise error.Abort(
543 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
543 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
544 )
544 )
545
545
546 raise error.RepoError(_(b'repository %s not found') % path)
546 raise error.RepoError(_(b'repository %s not found') % path)
547
547
548 requirements = _readrequires(hgvfs, True)
548 requirements = _readrequires(hgvfs, True)
549 shared = (
549 shared = (
550 requirementsmod.SHARED_REQUIREMENT in requirements
550 requirementsmod.SHARED_REQUIREMENT in requirements
551 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
551 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
552 )
552 )
553 storevfs = None
553 storevfs = None
554 if shared:
554 if shared:
555 # This is a shared repo
555 # This is a shared repo
556 sharedvfs = _getsharedvfs(hgvfs, requirements)
556 sharedvfs = _getsharedvfs(hgvfs, requirements)
557 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
557 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
558 else:
558 else:
559 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
559 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
560
560
561 # if .hg/requires contains the sharesafe requirement, it means
561 # if .hg/requires contains the sharesafe requirement, it means
562 # there exists a `.hg/store/requires` too and we should read it
562 # there exists a `.hg/store/requires` too and we should read it
563 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
563 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
564 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
564 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
565 # is not present, refer checkrequirementscompat() for that
565 # is not present, refer checkrequirementscompat() for that
566 #
566 #
567 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
567 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
568 # repository was shared the old way. We check the share source .hg/requires
568 # repository was shared the old way. We check the share source .hg/requires
569 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
569 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
570 # to be reshared
570 # to be reshared
571 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
571 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
572
572
573 if (
573 if (
574 shared
574 shared
575 and requirementsmod.SHARESAFE_REQUIREMENT
575 and requirementsmod.SHARESAFE_REQUIREMENT
576 not in _readrequires(sharedvfs, True)
576 not in _readrequires(sharedvfs, True)
577 ):
577 ):
578 mismatch_warn = ui.configbool(
578 mismatch_warn = ui.configbool(
579 b'share', b'safe-mismatch.source-not-safe.warn'
579 b'share', b'safe-mismatch.source-not-safe.warn'
580 )
580 )
581 mismatch_config = ui.config(
581 mismatch_config = ui.config(
582 b'share', b'safe-mismatch.source-not-safe'
582 b'share', b'safe-mismatch.source-not-safe'
583 )
583 )
584 if mismatch_config in (
584 if mismatch_config in (
585 b'downgrade-allow',
585 b'downgrade-allow',
586 b'allow',
586 b'allow',
587 b'downgrade-abort',
587 b'downgrade-abort',
588 ):
588 ):
589 # prevent cyclic import localrepo -> upgrade -> localrepo
589 # prevent cyclic import localrepo -> upgrade -> localrepo
590 from . import upgrade
590 from . import upgrade
591
591
592 upgrade.downgrade_share_to_non_safe(
592 upgrade.downgrade_share_to_non_safe(
593 ui,
593 ui,
594 hgvfs,
594 hgvfs,
595 sharedvfs,
595 sharedvfs,
596 requirements,
596 requirements,
597 mismatch_config,
597 mismatch_config,
598 mismatch_warn,
598 mismatch_warn,
599 )
599 )
600 elif mismatch_config == b'abort':
600 elif mismatch_config == b'abort':
601 raise error.Abort(
601 raise error.Abort(
602 _(
602 _(
603 b"share source does not support exp-sharesafe requirement"
603 b"share source does not support exp-sharesafe requirement"
604 )
604 )
605 )
605 )
606 else:
606 else:
607 hint = _(
607 hint = _(
608 "run `hg help config.share.safe-mismatch.source-not-safe`"
608 "run `hg help config.share.safe-mismatch.source-not-safe`"
609 )
609 )
610 raise error.Abort(
610 raise error.Abort(
611 _(
611 _(
612 b"share-safe mismatch with source.\nUnrecognized"
612 b"share-safe mismatch with source.\nUnrecognized"
613 b" value '%s' of `share.safe-mismatch.source-not-safe`"
613 b" value '%s' of `share.safe-mismatch.source-not-safe`"
614 b" set."
614 b" set."
615 )
615 )
616 % mismatch_config,
616 % mismatch_config,
617 hint=hint,
617 hint=hint,
618 )
618 )
619 else:
619 else:
620 requirements |= _readrequires(storevfs, False)
620 requirements |= _readrequires(storevfs, False)
621 elif shared:
621 elif shared:
622 sourcerequires = _readrequires(sharedvfs, False)
622 sourcerequires = _readrequires(sharedvfs, False)
623 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
623 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
624 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
624 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
625 mismatch_warn = ui.configbool(
625 mismatch_warn = ui.configbool(
626 b'share', b'safe-mismatch.source-safe.warn'
626 b'share', b'safe-mismatch.source-safe.warn'
627 )
627 )
628 if mismatch_config in (
628 if mismatch_config in (
629 b'upgrade-allow',
629 b'upgrade-allow',
630 b'allow',
630 b'allow',
631 b'upgrade-abort',
631 b'upgrade-abort',
632 ):
632 ):
633 # prevent cyclic import localrepo -> upgrade -> localrepo
633 # prevent cyclic import localrepo -> upgrade -> localrepo
634 from . import upgrade
634 from . import upgrade
635
635
636 upgrade.upgrade_share_to_safe(
636 upgrade.upgrade_share_to_safe(
637 ui,
637 ui,
638 hgvfs,
638 hgvfs,
639 storevfs,
639 storevfs,
640 requirements,
640 requirements,
641 mismatch_config,
641 mismatch_config,
642 mismatch_warn,
642 mismatch_warn,
643 )
643 )
644 elif mismatch_config == b'abort':
644 elif mismatch_config == b'abort':
645 raise error.Abort(
645 raise error.Abort(
646 _(
646 _(
647 b'version mismatch: source uses share-safe'
647 b'version mismatch: source uses share-safe'
648 b' functionality while the current share does not'
648 b' functionality while the current share does not'
649 )
649 )
650 )
650 )
651 else:
651 else:
652 hint = _("run `hg help config.share.safe-mismatch.source-safe`")
652 hint = _("run `hg help config.share.safe-mismatch.source-safe`")
653 raise error.Abort(
653 raise error.Abort(
654 _(
654 _(
655 b"share-safe mismatch with source.\nUnrecognized"
655 b"share-safe mismatch with source.\nUnrecognized"
656 b" value '%s' of `share.safe-mismatch.source-safe` set."
656 b" value '%s' of `share.safe-mismatch.source-safe` set."
657 )
657 )
658 % mismatch_config,
658 % mismatch_config,
659 hint=hint,
659 hint=hint,
660 )
660 )
661
661
662 # The .hg/hgrc file may load extensions or contain config options
662 # The .hg/hgrc file may load extensions or contain config options
663 # that influence repository construction. Attempt to load it and
663 # that influence repository construction. Attempt to load it and
664 # process any new extensions that it may have pulled in.
664 # process any new extensions that it may have pulled in.
665 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
665 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
666 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
666 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
667 extensions.loadall(ui)
667 extensions.loadall(ui)
668 extensions.populateui(ui)
668 extensions.populateui(ui)
669
669
670 # Set of module names of extensions loaded for this repository.
670 # Set of module names of extensions loaded for this repository.
671 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
671 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
672
672
673 supportedrequirements = gathersupportedrequirements(ui)
673 supportedrequirements = gathersupportedrequirements(ui)
674
674
675 # We first validate the requirements are known.
675 # We first validate the requirements are known.
676 ensurerequirementsrecognized(requirements, supportedrequirements)
676 ensurerequirementsrecognized(requirements, supportedrequirements)
677
677
678 # Then we validate that the known set is reasonable to use together.
678 # Then we validate that the known set is reasonable to use together.
679 ensurerequirementscompatible(ui, requirements)
679 ensurerequirementscompatible(ui, requirements)
680
680
681 # TODO there are unhandled edge cases related to opening repositories with
681 # TODO there are unhandled edge cases related to opening repositories with
682 # shared storage. If storage is shared, we should also test for requirements
682 # shared storage. If storage is shared, we should also test for requirements
683 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
683 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
684 # that repo, as that repo may load extensions needed to open it. This is a
684 # that repo, as that repo may load extensions needed to open it. This is a
685 # bit complicated because we don't want the other hgrc to overwrite settings
685 # bit complicated because we don't want the other hgrc to overwrite settings
686 # in this hgrc.
686 # in this hgrc.
687 #
687 #
688 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
688 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
689 # file when sharing repos. But if a requirement is added after the share is
689 # file when sharing repos. But if a requirement is added after the share is
690 # performed, thereby introducing a new requirement for the opener, we may
690 # performed, thereby introducing a new requirement for the opener, we may
691 # will not see that and could encounter a run-time error interacting with
691 # will not see that and could encounter a run-time error interacting with
692 # that shared store since it has an unknown-to-us requirement.
692 # that shared store since it has an unknown-to-us requirement.
693
693
694 # At this point, we know we should be capable of opening the repository.
694 # At this point, we know we should be capable of opening the repository.
695 # Now get on with doing that.
695 # Now get on with doing that.
696
696
697 features = set()
697 features = set()
698
698
699 # The "store" part of the repository holds versioned data. How it is
699 # The "store" part of the repository holds versioned data. How it is
700 # accessed is determined by various requirements. If `shared` or
700 # accessed is determined by various requirements. If `shared` or
701 # `relshared` requirements are present, this indicates current repository
701 # `relshared` requirements are present, this indicates current repository
702 # is a share and store exists in path mentioned in `.hg/sharedpath`
702 # is a share and store exists in path mentioned in `.hg/sharedpath`
703 if shared:
703 if shared:
704 storebasepath = sharedvfs.base
704 storebasepath = sharedvfs.base
705 cachepath = sharedvfs.join(b'cache')
705 cachepath = sharedvfs.join(b'cache')
706 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
706 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
707 else:
707 else:
708 storebasepath = hgvfs.base
708 storebasepath = hgvfs.base
709 cachepath = hgvfs.join(b'cache')
709 cachepath = hgvfs.join(b'cache')
710 wcachepath = hgvfs.join(b'wcache')
710 wcachepath = hgvfs.join(b'wcache')
711
711
712 # The store has changed over time and the exact layout is dictated by
712 # The store has changed over time and the exact layout is dictated by
713 # requirements. The store interface abstracts differences across all
713 # requirements. The store interface abstracts differences across all
714 # of them.
714 # of them.
715 store = makestore(
715 store = makestore(
716 requirements,
716 requirements,
717 storebasepath,
717 storebasepath,
718 lambda base: vfsmod.vfs(base, cacheaudited=True),
718 lambda base: vfsmod.vfs(base, cacheaudited=True),
719 )
719 )
720 hgvfs.createmode = store.createmode
720 hgvfs.createmode = store.createmode
721
721
722 storevfs = store.vfs
722 storevfs = store.vfs
723 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
723 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
724
724
725 # The cache vfs is used to manage cache files.
725 # The cache vfs is used to manage cache files.
726 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
726 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
727 cachevfs.createmode = store.createmode
727 cachevfs.createmode = store.createmode
728 # The cache vfs is used to manage cache files related to the working copy
728 # The cache vfs is used to manage cache files related to the working copy
729 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
729 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
730 wcachevfs.createmode = store.createmode
730 wcachevfs.createmode = store.createmode
731
731
732 # Now resolve the type for the repository object. We do this by repeatedly
732 # Now resolve the type for the repository object. We do this by repeatedly
733 # calling a factory function to produces types for specific aspects of the
733 # calling a factory function to produces types for specific aspects of the
734 # repo's operation. The aggregate returned types are used as base classes
734 # repo's operation. The aggregate returned types are used as base classes
735 # for a dynamically-derived type, which will represent our new repository.
735 # for a dynamically-derived type, which will represent our new repository.
736
736
737 bases = []
737 bases = []
738 extrastate = {}
738 extrastate = {}
739
739
740 for iface, fn in REPO_INTERFACES:
740 for iface, fn in REPO_INTERFACES:
741 # We pass all potentially useful state to give extensions tons of
741 # We pass all potentially useful state to give extensions tons of
742 # flexibility.
742 # flexibility.
743 typ = fn()(
743 typ = fn()(
744 ui=ui,
744 ui=ui,
745 intents=intents,
745 intents=intents,
746 requirements=requirements,
746 requirements=requirements,
747 features=features,
747 features=features,
748 wdirvfs=wdirvfs,
748 wdirvfs=wdirvfs,
749 hgvfs=hgvfs,
749 hgvfs=hgvfs,
750 store=store,
750 store=store,
751 storevfs=storevfs,
751 storevfs=storevfs,
752 storeoptions=storevfs.options,
752 storeoptions=storevfs.options,
753 cachevfs=cachevfs,
753 cachevfs=cachevfs,
754 wcachevfs=wcachevfs,
754 wcachevfs=wcachevfs,
755 extensionmodulenames=extensionmodulenames,
755 extensionmodulenames=extensionmodulenames,
756 extrastate=extrastate,
756 extrastate=extrastate,
757 baseclasses=bases,
757 baseclasses=bases,
758 )
758 )
759
759
760 if not isinstance(typ, type):
760 if not isinstance(typ, type):
761 raise error.ProgrammingError(
761 raise error.ProgrammingError(
762 b'unable to construct type for %s' % iface
762 b'unable to construct type for %s' % iface
763 )
763 )
764
764
765 bases.append(typ)
765 bases.append(typ)
766
766
767 # type() allows you to use characters in type names that wouldn't be
767 # type() allows you to use characters in type names that wouldn't be
768 # recognized as Python symbols in source code. We abuse that to add
768 # recognized as Python symbols in source code. We abuse that to add
769 # rich information about our constructed repo.
769 # rich information about our constructed repo.
770 name = pycompat.sysstr(
770 name = pycompat.sysstr(
771 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
771 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
772 )
772 )
773
773
774 cls = type(name, tuple(bases), {})
774 cls = type(name, tuple(bases), {})
775
775
776 return cls(
776 return cls(
777 baseui=baseui,
777 baseui=baseui,
778 ui=ui,
778 ui=ui,
779 origroot=path,
779 origroot=path,
780 wdirvfs=wdirvfs,
780 wdirvfs=wdirvfs,
781 hgvfs=hgvfs,
781 hgvfs=hgvfs,
782 requirements=requirements,
782 requirements=requirements,
783 supportedrequirements=supportedrequirements,
783 supportedrequirements=supportedrequirements,
784 sharedpath=storebasepath,
784 sharedpath=storebasepath,
785 store=store,
785 store=store,
786 cachevfs=cachevfs,
786 cachevfs=cachevfs,
787 wcachevfs=wcachevfs,
787 wcachevfs=wcachevfs,
788 features=features,
788 features=features,
789 intents=intents,
789 intents=intents,
790 )
790 )
791
791
792
792
793 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
793 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
794 """Load hgrc files/content into a ui instance.
794 """Load hgrc files/content into a ui instance.
795
795
796 This is called during repository opening to load any additional
796 This is called during repository opening to load any additional
797 config files or settings relevant to the current repository.
797 config files or settings relevant to the current repository.
798
798
799 Returns a bool indicating whether any additional configs were loaded.
799 Returns a bool indicating whether any additional configs were loaded.
800
800
801 Extensions should monkeypatch this function to modify how per-repo
801 Extensions should monkeypatch this function to modify how per-repo
802 configs are loaded. For example, an extension may wish to pull in
802 configs are loaded. For example, an extension may wish to pull in
803 configs from alternate files or sources.
803 configs from alternate files or sources.
804
804
805 sharedvfs is vfs object pointing to source repo if the current one is a
805 sharedvfs is vfs object pointing to source repo if the current one is a
806 shared one
806 shared one
807 """
807 """
808 if not rcutil.use_repo_hgrc():
808 if not rcutil.use_repo_hgrc():
809 return False
809 return False
810
810
811 ret = False
811 ret = False
812 # first load config from shared source if we has to
812 # first load config from shared source if we has to
813 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
813 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
814 try:
814 try:
815 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
815 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
816 ret = True
816 ret = True
817 except IOError:
817 except IOError:
818 pass
818 pass
819
819
820 try:
820 try:
821 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
821 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
822 ret = True
822 ret = True
823 except IOError:
823 except IOError:
824 pass
824 pass
825
825
826 try:
826 try:
827 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
827 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
828 ret = True
828 ret = True
829 except IOError:
829 except IOError:
830 pass
830 pass
831
831
832 return ret
832 return ret
833
833
834
834
835 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
835 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
836 """Perform additional actions after .hg/hgrc is loaded.
836 """Perform additional actions after .hg/hgrc is loaded.
837
837
838 This function is called during repository loading immediately after
838 This function is called during repository loading immediately after
839 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
839 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
840
840
841 The function can be used to validate configs, automatically add
841 The function can be used to validate configs, automatically add
842 options (including extensions) based on requirements, etc.
842 options (including extensions) based on requirements, etc.
843 """
843 """
844
844
845 # Map of requirements to list of extensions to load automatically when
845 # Map of requirements to list of extensions to load automatically when
846 # requirement is present.
846 # requirement is present.
847 autoextensions = {
847 autoextensions = {
848 b'git': [b'git'],
848 b'git': [b'git'],
849 b'largefiles': [b'largefiles'],
849 b'largefiles': [b'largefiles'],
850 b'lfs': [b'lfs'],
850 b'lfs': [b'lfs'],
851 }
851 }
852
852
853 for requirement, names in sorted(autoextensions.items()):
853 for requirement, names in sorted(autoextensions.items()):
854 if requirement not in requirements:
854 if requirement not in requirements:
855 continue
855 continue
856
856
857 for name in names:
857 for name in names:
858 if not ui.hasconfig(b'extensions', name):
858 if not ui.hasconfig(b'extensions', name):
859 ui.setconfig(b'extensions', name, b'', source=b'autoload')
859 ui.setconfig(b'extensions', name, b'', source=b'autoload')
860
860
861
861
862 def gathersupportedrequirements(ui):
862 def gathersupportedrequirements(ui):
863 """Determine the complete set of recognized requirements."""
863 """Determine the complete set of recognized requirements."""
864 # Start with all requirements supported by this file.
864 # Start with all requirements supported by this file.
865 supported = set(localrepository._basesupported)
865 supported = set(localrepository._basesupported)
866
866
867 # Execute ``featuresetupfuncs`` entries if they belong to an extension
867 # Execute ``featuresetupfuncs`` entries if they belong to an extension
868 # relevant to this ui instance.
868 # relevant to this ui instance.
869 modules = {m.__name__ for n, m in extensions.extensions(ui)}
869 modules = {m.__name__ for n, m in extensions.extensions(ui)}
870
870
871 for fn in featuresetupfuncs:
871 for fn in featuresetupfuncs:
872 if fn.__module__ in modules:
872 if fn.__module__ in modules:
873 fn(ui, supported)
873 fn(ui, supported)
874
874
875 # Add derived requirements from registered compression engines.
875 # Add derived requirements from registered compression engines.
876 for name in util.compengines:
876 for name in util.compengines:
877 engine = util.compengines[name]
877 engine = util.compengines[name]
878 if engine.available() and engine.revlogheader():
878 if engine.available() and engine.revlogheader():
879 supported.add(b'exp-compression-%s' % name)
879 supported.add(b'exp-compression-%s' % name)
880 if engine.name() == b'zstd':
880 if engine.name() == b'zstd':
881 supported.add(b'revlog-compression-zstd')
881 supported.add(b'revlog-compression-zstd')
882
882
883 return supported
883 return supported
884
884
885
885
886 def ensurerequirementsrecognized(requirements, supported):
886 def ensurerequirementsrecognized(requirements, supported):
887 """Validate that a set of local requirements is recognized.
887 """Validate that a set of local requirements is recognized.
888
888
889 Receives a set of requirements. Raises an ``error.RepoError`` if there
889 Receives a set of requirements. Raises an ``error.RepoError`` if there
890 exists any requirement in that set that currently loaded code doesn't
890 exists any requirement in that set that currently loaded code doesn't
891 recognize.
891 recognize.
892
892
893 Returns a set of supported requirements.
893 Returns a set of supported requirements.
894 """
894 """
895 missing = set()
895 missing = set()
896
896
897 for requirement in requirements:
897 for requirement in requirements:
898 if requirement in supported:
898 if requirement in supported:
899 continue
899 continue
900
900
901 if not requirement or not requirement[0:1].isalnum():
901 if not requirement or not requirement[0:1].isalnum():
902 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
902 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
903
903
904 missing.add(requirement)
904 missing.add(requirement)
905
905
906 if missing:
906 if missing:
907 raise error.RequirementError(
907 raise error.RequirementError(
908 _(b'repository requires features unknown to this Mercurial: %s')
908 _(b'repository requires features unknown to this Mercurial: %s')
909 % b' '.join(sorted(missing)),
909 % b' '.join(sorted(missing)),
910 hint=_(
910 hint=_(
911 b'see https://mercurial-scm.org/wiki/MissingRequirement '
911 b'see https://mercurial-scm.org/wiki/MissingRequirement '
912 b'for more information'
912 b'for more information'
913 ),
913 ),
914 )
914 )
915
915
916
916
917 def ensurerequirementscompatible(ui, requirements):
917 def ensurerequirementscompatible(ui, requirements):
918 """Validates that a set of recognized requirements is mutually compatible.
918 """Validates that a set of recognized requirements is mutually compatible.
919
919
920 Some requirements may not be compatible with others or require
920 Some requirements may not be compatible with others or require
921 config options that aren't enabled. This function is called during
921 config options that aren't enabled. This function is called during
922 repository opening to ensure that the set of requirements needed
922 repository opening to ensure that the set of requirements needed
923 to open a repository is sane and compatible with config options.
923 to open a repository is sane and compatible with config options.
924
924
925 Extensions can monkeypatch this function to perform additional
925 Extensions can monkeypatch this function to perform additional
926 checking.
926 checking.
927
927
928 ``error.RepoError`` should be raised on failure.
928 ``error.RepoError`` should be raised on failure.
929 """
929 """
930 if (
930 if (
931 requirementsmod.SPARSE_REQUIREMENT in requirements
931 requirementsmod.SPARSE_REQUIREMENT in requirements
932 and not sparse.enabled
932 and not sparse.enabled
933 ):
933 ):
934 raise error.RepoError(
934 raise error.RepoError(
935 _(
935 _(
936 b'repository is using sparse feature but '
936 b'repository is using sparse feature but '
937 b'sparse is not enabled; enable the '
937 b'sparse is not enabled; enable the '
938 b'"sparse" extensions to access'
938 b'"sparse" extensions to access'
939 )
939 )
940 )
940 )
941
941
942
942
943 def makestore(requirements, path, vfstype):
943 def makestore(requirements, path, vfstype):
944 """Construct a storage object for a repository."""
944 """Construct a storage object for a repository."""
945 if b'store' in requirements:
945 if b'store' in requirements:
946 if b'fncache' in requirements:
946 if b'fncache' in requirements:
947 return storemod.fncachestore(
947 return storemod.fncachestore(
948 path, vfstype, b'dotencode' in requirements
948 path, vfstype, b'dotencode' in requirements
949 )
949 )
950
950
951 return storemod.encodedstore(path, vfstype)
951 return storemod.encodedstore(path, vfstype)
952
952
953 return storemod.basicstore(path, vfstype)
953 return storemod.basicstore(path, vfstype)
954
954
955
955
956 def resolvestorevfsoptions(ui, requirements, features):
956 def resolvestorevfsoptions(ui, requirements, features):
957 """Resolve the options to pass to the store vfs opener.
957 """Resolve the options to pass to the store vfs opener.
958
958
959 The returned dict is used to influence behavior of the storage layer.
959 The returned dict is used to influence behavior of the storage layer.
960 """
960 """
961 options = {}
961 options = {}
962
962
963 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
963 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
964 options[b'treemanifest'] = True
964 options[b'treemanifest'] = True
965
965
966 # experimental config: format.manifestcachesize
966 # experimental config: format.manifestcachesize
967 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
967 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
968 if manifestcachesize is not None:
968 if manifestcachesize is not None:
969 options[b'manifestcachesize'] = manifestcachesize
969 options[b'manifestcachesize'] = manifestcachesize
970
970
971 # In the absence of another requirement superseding a revlog-related
971 # In the absence of another requirement superseding a revlog-related
972 # requirement, we have to assume the repo is using revlog version 0.
972 # requirement, we have to assume the repo is using revlog version 0.
973 # This revlog format is super old and we don't bother trying to parse
973 # This revlog format is super old and we don't bother trying to parse
974 # opener options for it because those options wouldn't do anything
974 # opener options for it because those options wouldn't do anything
975 # meaningful on such old repos.
975 # meaningful on such old repos.
976 if (
976 if (
977 b'revlogv1' in requirements
977 b'revlogv1' in requirements
978 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
978 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
979 ):
979 ):
980 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
980 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
981 else: # explicitly mark repo as using revlogv0
981 else: # explicitly mark repo as using revlogv0
982 options[b'revlogv0'] = True
982 options[b'revlogv0'] = True
983
983
984 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
984 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
985 options[b'copies-storage'] = b'changeset-sidedata'
985 options[b'copies-storage'] = b'changeset-sidedata'
986 else:
986 else:
987 writecopiesto = ui.config(b'experimental', b'copies.write-to')
987 writecopiesto = ui.config(b'experimental', b'copies.write-to')
988 copiesextramode = (b'changeset-only', b'compatibility')
988 copiesextramode = (b'changeset-only', b'compatibility')
989 if writecopiesto in copiesextramode:
989 if writecopiesto in copiesextramode:
990 options[b'copies-storage'] = b'extra'
990 options[b'copies-storage'] = b'extra'
991
991
992 return options
992 return options
993
993
994
994
995 def resolverevlogstorevfsoptions(ui, requirements, features):
995 def resolverevlogstorevfsoptions(ui, requirements, features):
996 """Resolve opener options specific to revlogs."""
996 """Resolve opener options specific to revlogs."""
997
997
998 options = {}
998 options = {}
999 options[b'flagprocessors'] = {}
999 options[b'flagprocessors'] = {}
1000
1000
1001 if b'revlogv1' in requirements:
1001 if b'revlogv1' in requirements:
1002 options[b'revlogv1'] = True
1002 options[b'revlogv1'] = True
1003 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1003 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1004 options[b'revlogv2'] = True
1004 options[b'revlogv2'] = True
1005
1005
1006 if b'generaldelta' in requirements:
1006 if b'generaldelta' in requirements:
1007 options[b'generaldelta'] = True
1007 options[b'generaldelta'] = True
1008
1008
1009 # experimental config: format.chunkcachesize
1009 # experimental config: format.chunkcachesize
1010 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1010 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1011 if chunkcachesize is not None:
1011 if chunkcachesize is not None:
1012 options[b'chunkcachesize'] = chunkcachesize
1012 options[b'chunkcachesize'] = chunkcachesize
1013
1013
1014 deltabothparents = ui.configbool(
1014 deltabothparents = ui.configbool(
1015 b'storage', b'revlog.optimize-delta-parent-choice'
1015 b'storage', b'revlog.optimize-delta-parent-choice'
1016 )
1016 )
1017 options[b'deltabothparents'] = deltabothparents
1017 options[b'deltabothparents'] = deltabothparents
1018
1018
1019 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1019 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1020 lazydeltabase = False
1020 lazydeltabase = False
1021 if lazydelta:
1021 if lazydelta:
1022 lazydeltabase = ui.configbool(
1022 lazydeltabase = ui.configbool(
1023 b'storage', b'revlog.reuse-external-delta-parent'
1023 b'storage', b'revlog.reuse-external-delta-parent'
1024 )
1024 )
1025 if lazydeltabase is None:
1025 if lazydeltabase is None:
1026 lazydeltabase = not scmutil.gddeltaconfig(ui)
1026 lazydeltabase = not scmutil.gddeltaconfig(ui)
1027 options[b'lazydelta'] = lazydelta
1027 options[b'lazydelta'] = lazydelta
1028 options[b'lazydeltabase'] = lazydeltabase
1028 options[b'lazydeltabase'] = lazydeltabase
1029
1029
1030 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1030 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1031 if 0 <= chainspan:
1031 if 0 <= chainspan:
1032 options[b'maxdeltachainspan'] = chainspan
1032 options[b'maxdeltachainspan'] = chainspan
1033
1033
1034 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1034 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1035 if mmapindexthreshold is not None:
1035 if mmapindexthreshold is not None:
1036 options[b'mmapindexthreshold'] = mmapindexthreshold
1036 options[b'mmapindexthreshold'] = mmapindexthreshold
1037
1037
1038 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1038 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1039 srdensitythres = float(
1039 srdensitythres = float(
1040 ui.config(b'experimental', b'sparse-read.density-threshold')
1040 ui.config(b'experimental', b'sparse-read.density-threshold')
1041 )
1041 )
1042 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1042 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1043 options[b'with-sparse-read'] = withsparseread
1043 options[b'with-sparse-read'] = withsparseread
1044 options[b'sparse-read-density-threshold'] = srdensitythres
1044 options[b'sparse-read-density-threshold'] = srdensitythres
1045 options[b'sparse-read-min-gap-size'] = srmingapsize
1045 options[b'sparse-read-min-gap-size'] = srmingapsize
1046
1046
1047 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1047 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1048 options[b'sparse-revlog'] = sparserevlog
1048 options[b'sparse-revlog'] = sparserevlog
1049 if sparserevlog:
1049 if sparserevlog:
1050 options[b'generaldelta'] = True
1050 options[b'generaldelta'] = True
1051
1051
1052 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1052 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1053 options[b'side-data'] = sidedata
1053 options[b'side-data'] = sidedata
1054
1054
1055 maxchainlen = None
1055 maxchainlen = None
1056 if sparserevlog:
1056 if sparserevlog:
1057 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1057 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1058 # experimental config: format.maxchainlen
1058 # experimental config: format.maxchainlen
1059 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1059 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1060 if maxchainlen is not None:
1060 if maxchainlen is not None:
1061 options[b'maxchainlen'] = maxchainlen
1061 options[b'maxchainlen'] = maxchainlen
1062
1062
1063 for r in requirements:
1063 for r in requirements:
1064 # we allow multiple compression engine requirement to co-exist because
1064 # we allow multiple compression engine requirement to co-exist because
1065 # strickly speaking, revlog seems to support mixed compression style.
1065 # strickly speaking, revlog seems to support mixed compression style.
1066 #
1066 #
1067 # The compression used for new entries will be "the last one"
1067 # The compression used for new entries will be "the last one"
1068 prefix = r.startswith
1068 prefix = r.startswith
1069 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1069 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1070 options[b'compengine'] = r.split(b'-', 2)[2]
1070 options[b'compengine'] = r.split(b'-', 2)[2]
1071
1071
1072 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1072 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1073 if options[b'zlib.level'] is not None:
1073 if options[b'zlib.level'] is not None:
1074 if not (0 <= options[b'zlib.level'] <= 9):
1074 if not (0 <= options[b'zlib.level'] <= 9):
1075 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1075 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1076 raise error.Abort(msg % options[b'zlib.level'])
1076 raise error.Abort(msg % options[b'zlib.level'])
1077 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1077 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1078 if options[b'zstd.level'] is not None:
1078 if options[b'zstd.level'] is not None:
1079 if not (0 <= options[b'zstd.level'] <= 22):
1079 if not (0 <= options[b'zstd.level'] <= 22):
1080 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1080 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1081 raise error.Abort(msg % options[b'zstd.level'])
1081 raise error.Abort(msg % options[b'zstd.level'])
1082
1082
1083 if requirementsmod.NARROW_REQUIREMENT in requirements:
1083 if requirementsmod.NARROW_REQUIREMENT in requirements:
1084 options[b'enableellipsis'] = True
1084 options[b'enableellipsis'] = True
1085
1085
1086 if ui.configbool(b'experimental', b'rust.index'):
1086 if ui.configbool(b'experimental', b'rust.index'):
1087 options[b'rust.index'] = True
1087 options[b'rust.index'] = True
1088 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1088 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1089 slow_path = ui.config(
1089 slow_path = ui.config(
1090 b'storage', b'revlog.persistent-nodemap.slow-path'
1090 b'storage', b'revlog.persistent-nodemap.slow-path'
1091 )
1091 )
1092 if slow_path not in (b'allow', b'warn', b'abort'):
1092 if slow_path not in (b'allow', b'warn', b'abort'):
1093 default = ui.config_default(
1093 default = ui.config_default(
1094 b'storage', b'revlog.persistent-nodemap.slow-path'
1094 b'storage', b'revlog.persistent-nodemap.slow-path'
1095 )
1095 )
1096 msg = _(
1096 msg = _(
1097 b'unknown value for config '
1097 b'unknown value for config '
1098 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1098 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1099 )
1099 )
1100 ui.warn(msg % slow_path)
1100 ui.warn(msg % slow_path)
1101 if not ui.quiet:
1101 if not ui.quiet:
1102 ui.warn(_(b'falling back to default value: %s\n') % default)
1102 ui.warn(_(b'falling back to default value: %s\n') % default)
1103 slow_path = default
1103 slow_path = default
1104
1104
1105 msg = _(
1105 msg = _(
1106 b"accessing `persistent-nodemap` repository without associated "
1106 b"accessing `persistent-nodemap` repository without associated "
1107 b"fast implementation."
1107 b"fast implementation."
1108 )
1108 )
1109 hint = _(
1109 hint = _(
1110 b"check `hg help config.format.use-persistent-nodemap` "
1110 b"check `hg help config.format.use-persistent-nodemap` "
1111 b"for details"
1111 b"for details"
1112 )
1112 )
1113 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1113 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1114 if slow_path == b'warn':
1114 if slow_path == b'warn':
1115 msg = b"warning: " + msg + b'\n'
1115 msg = b"warning: " + msg + b'\n'
1116 ui.warn(msg)
1116 ui.warn(msg)
1117 if not ui.quiet:
1117 if not ui.quiet:
1118 hint = b'(' + hint + b')\n'
1118 hint = b'(' + hint + b')\n'
1119 ui.warn(hint)
1119 ui.warn(hint)
1120 if slow_path == b'abort':
1120 if slow_path == b'abort':
1121 raise error.Abort(msg, hint=hint)
1121 raise error.Abort(msg, hint=hint)
1122 options[b'persistent-nodemap'] = True
1122 options[b'persistent-nodemap'] = True
1123 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1123 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1124 options[b'persistent-nodemap.mmap'] = True
1124 options[b'persistent-nodemap.mmap'] = True
1125 if ui.configbool(b'devel', b'persistent-nodemap'):
1125 if ui.configbool(b'devel', b'persistent-nodemap'):
1126 options[b'devel-force-nodemap'] = True
1126 options[b'devel-force-nodemap'] = True
1127
1127
1128 return options
1128 return options
1129
1129
1130
1130
1131 def makemain(**kwargs):
1131 def makemain(**kwargs):
1132 """Produce a type conforming to ``ilocalrepositorymain``."""
1132 """Produce a type conforming to ``ilocalrepositorymain``."""
1133 return localrepository
1133 return localrepository
1134
1134
1135
1135
1136 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1136 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1137 class revlogfilestorage(object):
1137 class revlogfilestorage(object):
1138 """File storage when using revlogs."""
1138 """File storage when using revlogs."""
1139
1139
1140 def file(self, path):
1140 def file(self, path):
1141 if path[0] == b'/':
1141 if path[0] == b'/':
1142 path = path[1:]
1142 path = path[1:]
1143
1143
1144 return filelog.filelog(self.svfs, path)
1144 return filelog.filelog(self.svfs, path)
1145
1145
1146
1146
1147 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1147 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1148 class revlognarrowfilestorage(object):
1148 class revlognarrowfilestorage(object):
1149 """File storage when using revlogs and narrow files."""
1149 """File storage when using revlogs and narrow files."""
1150
1150
1151 def file(self, path):
1151 def file(self, path):
1152 if path[0] == b'/':
1152 if path[0] == b'/':
1153 path = path[1:]
1153 path = path[1:]
1154
1154
1155 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1155 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1156
1156
1157
1157
1158 def makefilestorage(requirements, features, **kwargs):
1158 def makefilestorage(requirements, features, **kwargs):
1159 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1159 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1160 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1160 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1161 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1161 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1162
1162
1163 if requirementsmod.NARROW_REQUIREMENT in requirements:
1163 if requirementsmod.NARROW_REQUIREMENT in requirements:
1164 return revlognarrowfilestorage
1164 return revlognarrowfilestorage
1165 else:
1165 else:
1166 return revlogfilestorage
1166 return revlogfilestorage
1167
1167
1168
1168
1169 # List of repository interfaces and factory functions for them. Each
1169 # List of repository interfaces and factory functions for them. Each
1170 # will be called in order during ``makelocalrepository()`` to iteratively
1170 # will be called in order during ``makelocalrepository()`` to iteratively
1171 # derive the final type for a local repository instance. We capture the
1171 # derive the final type for a local repository instance. We capture the
1172 # function as a lambda so we don't hold a reference and the module-level
1172 # function as a lambda so we don't hold a reference and the module-level
1173 # functions can be wrapped.
1173 # functions can be wrapped.
1174 REPO_INTERFACES = [
1174 REPO_INTERFACES = [
1175 (repository.ilocalrepositorymain, lambda: makemain),
1175 (repository.ilocalrepositorymain, lambda: makemain),
1176 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1176 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1177 ]
1177 ]
1178
1178
1179
1179
1180 @interfaceutil.implementer(repository.ilocalrepositorymain)
1180 @interfaceutil.implementer(repository.ilocalrepositorymain)
1181 class localrepository(object):
1181 class localrepository(object):
1182 """Main class for representing local repositories.
1182 """Main class for representing local repositories.
1183
1183
1184 All local repositories are instances of this class.
1184 All local repositories are instances of this class.
1185
1185
1186 Constructed on its own, instances of this class are not usable as
1186 Constructed on its own, instances of this class are not usable as
1187 repository objects. To obtain a usable repository object, call
1187 repository objects. To obtain a usable repository object, call
1188 ``hg.repository()``, ``localrepo.instance()``, or
1188 ``hg.repository()``, ``localrepo.instance()``, or
1189 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1189 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1190 ``instance()`` adds support for creating new repositories.
1190 ``instance()`` adds support for creating new repositories.
1191 ``hg.repository()`` adds more extension integration, including calling
1191 ``hg.repository()`` adds more extension integration, including calling
1192 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1192 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1193 used.
1193 used.
1194 """
1194 """
1195
1195
1196 # obsolete experimental requirements:
1196 # obsolete experimental requirements:
1197 # - manifestv2: An experimental new manifest format that allowed
1197 # - manifestv2: An experimental new manifest format that allowed
1198 # for stem compression of long paths. Experiment ended up not
1198 # for stem compression of long paths. Experiment ended up not
1199 # being successful (repository sizes went up due to worse delta
1199 # being successful (repository sizes went up due to worse delta
1200 # chains), and the code was deleted in 4.6.
1200 # chains), and the code was deleted in 4.6.
1201 supportedformats = {
1201 supportedformats = {
1202 b'revlogv1',
1202 b'revlogv1',
1203 b'generaldelta',
1203 b'generaldelta',
1204 requirementsmod.TREEMANIFEST_REQUIREMENT,
1204 requirementsmod.TREEMANIFEST_REQUIREMENT,
1205 requirementsmod.COPIESSDC_REQUIREMENT,
1205 requirementsmod.COPIESSDC_REQUIREMENT,
1206 requirementsmod.REVLOGV2_REQUIREMENT,
1206 requirementsmod.REVLOGV2_REQUIREMENT,
1207 requirementsmod.SIDEDATA_REQUIREMENT,
1207 requirementsmod.SIDEDATA_REQUIREMENT,
1208 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1208 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1209 requirementsmod.NODEMAP_REQUIREMENT,
1209 requirementsmod.NODEMAP_REQUIREMENT,
1210 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1210 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1211 requirementsmod.SHARESAFE_REQUIREMENT,
1211 requirementsmod.SHARESAFE_REQUIREMENT,
1212 }
1212 }
1213 _basesupported = supportedformats | {
1213 _basesupported = supportedformats | {
1214 b'store',
1214 b'store',
1215 b'fncache',
1215 b'fncache',
1216 requirementsmod.SHARED_REQUIREMENT,
1216 requirementsmod.SHARED_REQUIREMENT,
1217 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1217 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1218 b'dotencode',
1218 b'dotencode',
1219 requirementsmod.SPARSE_REQUIREMENT,
1219 requirementsmod.SPARSE_REQUIREMENT,
1220 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1220 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1221 }
1221 }
1222
1222
1223 # list of prefix for file which can be written without 'wlock'
1223 # list of prefix for file which can be written without 'wlock'
1224 # Extensions should extend this list when needed
1224 # Extensions should extend this list when needed
1225 _wlockfreeprefix = {
1225 _wlockfreeprefix = {
1226 # We migh consider requiring 'wlock' for the next
1226 # We migh consider requiring 'wlock' for the next
1227 # two, but pretty much all the existing code assume
1227 # two, but pretty much all the existing code assume
1228 # wlock is not needed so we keep them excluded for
1228 # wlock is not needed so we keep them excluded for
1229 # now.
1229 # now.
1230 b'hgrc',
1230 b'hgrc',
1231 b'requires',
1231 b'requires',
1232 # XXX cache is a complicatged business someone
1232 # XXX cache is a complicatged business someone
1233 # should investigate this in depth at some point
1233 # should investigate this in depth at some point
1234 b'cache/',
1234 b'cache/',
1235 # XXX shouldn't be dirstate covered by the wlock?
1235 # XXX shouldn't be dirstate covered by the wlock?
1236 b'dirstate',
1236 b'dirstate',
1237 # XXX bisect was still a bit too messy at the time
1237 # XXX bisect was still a bit too messy at the time
1238 # this changeset was introduced. Someone should fix
1238 # this changeset was introduced. Someone should fix
1239 # the remainig bit and drop this line
1239 # the remainig bit and drop this line
1240 b'bisect.state',
1240 b'bisect.state',
1241 }
1241 }
1242
1242
1243 def __init__(
1243 def __init__(
1244 self,
1244 self,
1245 baseui,
1245 baseui,
1246 ui,
1246 ui,
1247 origroot,
1247 origroot,
1248 wdirvfs,
1248 wdirvfs,
1249 hgvfs,
1249 hgvfs,
1250 requirements,
1250 requirements,
1251 supportedrequirements,
1251 supportedrequirements,
1252 sharedpath,
1252 sharedpath,
1253 store,
1253 store,
1254 cachevfs,
1254 cachevfs,
1255 wcachevfs,
1255 wcachevfs,
1256 features,
1256 features,
1257 intents=None,
1257 intents=None,
1258 ):
1258 ):
1259 """Create a new local repository instance.
1259 """Create a new local repository instance.
1260
1260
1261 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1261 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1262 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1262 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1263 object.
1263 object.
1264
1264
1265 Arguments:
1265 Arguments:
1266
1266
1267 baseui
1267 baseui
1268 ``ui.ui`` instance that ``ui`` argument was based off of.
1268 ``ui.ui`` instance that ``ui`` argument was based off of.
1269
1269
1270 ui
1270 ui
1271 ``ui.ui`` instance for use by the repository.
1271 ``ui.ui`` instance for use by the repository.
1272
1272
1273 origroot
1273 origroot
1274 ``bytes`` path to working directory root of this repository.
1274 ``bytes`` path to working directory root of this repository.
1275
1275
1276 wdirvfs
1276 wdirvfs
1277 ``vfs.vfs`` rooted at the working directory.
1277 ``vfs.vfs`` rooted at the working directory.
1278
1278
1279 hgvfs
1279 hgvfs
1280 ``vfs.vfs`` rooted at .hg/
1280 ``vfs.vfs`` rooted at .hg/
1281
1281
1282 requirements
1282 requirements
1283 ``set`` of bytestrings representing repository opening requirements.
1283 ``set`` of bytestrings representing repository opening requirements.
1284
1284
1285 supportedrequirements
1285 supportedrequirements
1286 ``set`` of bytestrings representing repository requirements that we
1286 ``set`` of bytestrings representing repository requirements that we
1287 know how to open. May be a supetset of ``requirements``.
1287 know how to open. May be a supetset of ``requirements``.
1288
1288
1289 sharedpath
1289 sharedpath
1290 ``bytes`` Defining path to storage base directory. Points to a
1290 ``bytes`` Defining path to storage base directory. Points to a
1291 ``.hg/`` directory somewhere.
1291 ``.hg/`` directory somewhere.
1292
1292
1293 store
1293 store
1294 ``store.basicstore`` (or derived) instance providing access to
1294 ``store.basicstore`` (or derived) instance providing access to
1295 versioned storage.
1295 versioned storage.
1296
1296
1297 cachevfs
1297 cachevfs
1298 ``vfs.vfs`` used for cache files.
1298 ``vfs.vfs`` used for cache files.
1299
1299
1300 wcachevfs
1300 wcachevfs
1301 ``vfs.vfs`` used for cache files related to the working copy.
1301 ``vfs.vfs`` used for cache files related to the working copy.
1302
1302
1303 features
1303 features
1304 ``set`` of bytestrings defining features/capabilities of this
1304 ``set`` of bytestrings defining features/capabilities of this
1305 instance.
1305 instance.
1306
1306
1307 intents
1307 intents
1308 ``set`` of system strings indicating what this repo will be used
1308 ``set`` of system strings indicating what this repo will be used
1309 for.
1309 for.
1310 """
1310 """
1311 self.baseui = baseui
1311 self.baseui = baseui
1312 self.ui = ui
1312 self.ui = ui
1313 self.origroot = origroot
1313 self.origroot = origroot
1314 # vfs rooted at working directory.
1314 # vfs rooted at working directory.
1315 self.wvfs = wdirvfs
1315 self.wvfs = wdirvfs
1316 self.root = wdirvfs.base
1316 self.root = wdirvfs.base
1317 # vfs rooted at .hg/. Used to access most non-store paths.
1317 # vfs rooted at .hg/. Used to access most non-store paths.
1318 self.vfs = hgvfs
1318 self.vfs = hgvfs
1319 self.path = hgvfs.base
1319 self.path = hgvfs.base
1320 self.requirements = requirements
1320 self.requirements = requirements
1321 self.supported = supportedrequirements
1321 self.supported = supportedrequirements
1322 self.sharedpath = sharedpath
1322 self.sharedpath = sharedpath
1323 self.store = store
1323 self.store = store
1324 self.cachevfs = cachevfs
1324 self.cachevfs = cachevfs
1325 self.wcachevfs = wcachevfs
1325 self.wcachevfs = wcachevfs
1326 self.features = features
1326 self.features = features
1327
1327
1328 self.filtername = None
1328 self.filtername = None
1329
1329
1330 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1330 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1331 b'devel', b'check-locks'
1331 b'devel', b'check-locks'
1332 ):
1332 ):
1333 self.vfs.audit = self._getvfsward(self.vfs.audit)
1333 self.vfs.audit = self._getvfsward(self.vfs.audit)
1334 # A list of callback to shape the phase if no data were found.
1334 # A list of callback to shape the phase if no data were found.
1335 # Callback are in the form: func(repo, roots) --> processed root.
1335 # Callback are in the form: func(repo, roots) --> processed root.
1336 # This list it to be filled by extension during repo setup
1336 # This list it to be filled by extension during repo setup
1337 self._phasedefaults = []
1337 self._phasedefaults = []
1338
1338
1339 color.setup(self.ui)
1339 color.setup(self.ui)
1340
1340
1341 self.spath = self.store.path
1341 self.spath = self.store.path
1342 self.svfs = self.store.vfs
1342 self.svfs = self.store.vfs
1343 self.sjoin = self.store.join
1343 self.sjoin = self.store.join
1344 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1344 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1345 b'devel', b'check-locks'
1345 b'devel', b'check-locks'
1346 ):
1346 ):
1347 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1347 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1348 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1348 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1349 else: # standard vfs
1349 else: # standard vfs
1350 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1350 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1351
1351
1352 self._dirstatevalidatewarned = False
1352 self._dirstatevalidatewarned = False
1353
1353
1354 self._branchcaches = branchmap.BranchMapCache()
1354 self._branchcaches = branchmap.BranchMapCache()
1355 self._revbranchcache = None
1355 self._revbranchcache = None
1356 self._filterpats = {}
1356 self._filterpats = {}
1357 self._datafilters = {}
1357 self._datafilters = {}
1358 self._transref = self._lockref = self._wlockref = None
1358 self._transref = self._lockref = self._wlockref = None
1359
1359
1360 # A cache for various files under .hg/ that tracks file changes,
1360 # A cache for various files under .hg/ that tracks file changes,
1361 # (used by the filecache decorator)
1361 # (used by the filecache decorator)
1362 #
1362 #
1363 # Maps a property name to its util.filecacheentry
1363 # Maps a property name to its util.filecacheentry
1364 self._filecache = {}
1364 self._filecache = {}
1365
1365
1366 # hold sets of revision to be filtered
1366 # hold sets of revision to be filtered
1367 # should be cleared when something might have changed the filter value:
1367 # should be cleared when something might have changed the filter value:
1368 # - new changesets,
1368 # - new changesets,
1369 # - phase change,
1369 # - phase change,
1370 # - new obsolescence marker,
1370 # - new obsolescence marker,
1371 # - working directory parent change,
1371 # - working directory parent change,
1372 # - bookmark changes
1372 # - bookmark changes
1373 self.filteredrevcache = {}
1373 self.filteredrevcache = {}
1374
1374
1375 # post-dirstate-status hooks
1375 # post-dirstate-status hooks
1376 self._postdsstatus = []
1376 self._postdsstatus = []
1377
1377
1378 # generic mapping between names and nodes
1378 # generic mapping between names and nodes
1379 self.names = namespaces.namespaces()
1379 self.names = namespaces.namespaces()
1380
1380
1381 # Key to signature value.
1381 # Key to signature value.
1382 self._sparsesignaturecache = {}
1382 self._sparsesignaturecache = {}
1383 # Signature to cached matcher instance.
1383 # Signature to cached matcher instance.
1384 self._sparsematchercache = {}
1384 self._sparsematchercache = {}
1385
1385
1386 self._extrafilterid = repoview.extrafilter(ui)
1386 self._extrafilterid = repoview.extrafilter(ui)
1387
1387
1388 self.filecopiesmode = None
1388 self.filecopiesmode = None
1389 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1389 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1390 self.filecopiesmode = b'changeset-sidedata'
1390 self.filecopiesmode = b'changeset-sidedata'
1391
1391
1392 def _getvfsward(self, origfunc):
1392 def _getvfsward(self, origfunc):
1393 """build a ward for self.vfs"""
1393 """build a ward for self.vfs"""
1394 rref = weakref.ref(self)
1394 rref = weakref.ref(self)
1395
1395
1396 def checkvfs(path, mode=None):
1396 def checkvfs(path, mode=None):
1397 ret = origfunc(path, mode=mode)
1397 ret = origfunc(path, mode=mode)
1398 repo = rref()
1398 repo = rref()
1399 if (
1399 if (
1400 repo is None
1400 repo is None
1401 or not util.safehasattr(repo, b'_wlockref')
1401 or not util.safehasattr(repo, b'_wlockref')
1402 or not util.safehasattr(repo, b'_lockref')
1402 or not util.safehasattr(repo, b'_lockref')
1403 ):
1403 ):
1404 return
1404 return
1405 if mode in (None, b'r', b'rb'):
1405 if mode in (None, b'r', b'rb'):
1406 return
1406 return
1407 if path.startswith(repo.path):
1407 if path.startswith(repo.path):
1408 # truncate name relative to the repository (.hg)
1408 # truncate name relative to the repository (.hg)
1409 path = path[len(repo.path) + 1 :]
1409 path = path[len(repo.path) + 1 :]
1410 if path.startswith(b'cache/'):
1410 if path.startswith(b'cache/'):
1411 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1411 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1412 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1412 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1413 # path prefixes covered by 'lock'
1413 # path prefixes covered by 'lock'
1414 vfs_path_prefixes = (
1414 vfs_path_prefixes = (
1415 b'journal.',
1415 b'journal.',
1416 b'undo.',
1416 b'undo.',
1417 b'strip-backup/',
1417 b'strip-backup/',
1418 b'cache/',
1418 b'cache/',
1419 )
1419 )
1420 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1420 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1421 if repo._currentlock(repo._lockref) is None:
1421 if repo._currentlock(repo._lockref) is None:
1422 repo.ui.develwarn(
1422 repo.ui.develwarn(
1423 b'write with no lock: "%s"' % path,
1423 b'write with no lock: "%s"' % path,
1424 stacklevel=3,
1424 stacklevel=3,
1425 config=b'check-locks',
1425 config=b'check-locks',
1426 )
1426 )
1427 elif repo._currentlock(repo._wlockref) is None:
1427 elif repo._currentlock(repo._wlockref) is None:
1428 # rest of vfs files are covered by 'wlock'
1428 # rest of vfs files are covered by 'wlock'
1429 #
1429 #
1430 # exclude special files
1430 # exclude special files
1431 for prefix in self._wlockfreeprefix:
1431 for prefix in self._wlockfreeprefix:
1432 if path.startswith(prefix):
1432 if path.startswith(prefix):
1433 return
1433 return
1434 repo.ui.develwarn(
1434 repo.ui.develwarn(
1435 b'write with no wlock: "%s"' % path,
1435 b'write with no wlock: "%s"' % path,
1436 stacklevel=3,
1436 stacklevel=3,
1437 config=b'check-locks',
1437 config=b'check-locks',
1438 )
1438 )
1439 return ret
1439 return ret
1440
1440
1441 return checkvfs
1441 return checkvfs
1442
1442
1443 def _getsvfsward(self, origfunc):
1443 def _getsvfsward(self, origfunc):
1444 """build a ward for self.svfs"""
1444 """build a ward for self.svfs"""
1445 rref = weakref.ref(self)
1445 rref = weakref.ref(self)
1446
1446
1447 def checksvfs(path, mode=None):
1447 def checksvfs(path, mode=None):
1448 ret = origfunc(path, mode=mode)
1448 ret = origfunc(path, mode=mode)
1449 repo = rref()
1449 repo = rref()
1450 if repo is None or not util.safehasattr(repo, b'_lockref'):
1450 if repo is None or not util.safehasattr(repo, b'_lockref'):
1451 return
1451 return
1452 if mode in (None, b'r', b'rb'):
1452 if mode in (None, b'r', b'rb'):
1453 return
1453 return
1454 if path.startswith(repo.sharedpath):
1454 if path.startswith(repo.sharedpath):
1455 # truncate name relative to the repository (.hg)
1455 # truncate name relative to the repository (.hg)
1456 path = path[len(repo.sharedpath) + 1 :]
1456 path = path[len(repo.sharedpath) + 1 :]
1457 if repo._currentlock(repo._lockref) is None:
1457 if repo._currentlock(repo._lockref) is None:
1458 repo.ui.develwarn(
1458 repo.ui.develwarn(
1459 b'write with no lock: "%s"' % path, stacklevel=4
1459 b'write with no lock: "%s"' % path, stacklevel=4
1460 )
1460 )
1461 return ret
1461 return ret
1462
1462
1463 return checksvfs
1463 return checksvfs
1464
1464
1465 def close(self):
1465 def close(self):
1466 self._writecaches()
1466 self._writecaches()
1467
1467
1468 def _writecaches(self):
1468 def _writecaches(self):
1469 if self._revbranchcache:
1469 if self._revbranchcache:
1470 self._revbranchcache.write()
1470 self._revbranchcache.write()
1471
1471
1472 def _restrictcapabilities(self, caps):
1472 def _restrictcapabilities(self, caps):
1473 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1473 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1474 caps = set(caps)
1474 caps = set(caps)
1475 capsblob = bundle2.encodecaps(
1475 capsblob = bundle2.encodecaps(
1476 bundle2.getrepocaps(self, role=b'client')
1476 bundle2.getrepocaps(self, role=b'client')
1477 )
1477 )
1478 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1478 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1479 return caps
1479 return caps
1480
1480
1481 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1481 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1482 # self -> auditor -> self._checknested -> self
1482 # self -> auditor -> self._checknested -> self
1483
1483
1484 @property
1484 @property
1485 def auditor(self):
1485 def auditor(self):
1486 # This is only used by context.workingctx.match in order to
1486 # This is only used by context.workingctx.match in order to
1487 # detect files in subrepos.
1487 # detect files in subrepos.
1488 return pathutil.pathauditor(self.root, callback=self._checknested)
1488 return pathutil.pathauditor(self.root, callback=self._checknested)
1489
1489
1490 @property
1490 @property
1491 def nofsauditor(self):
1491 def nofsauditor(self):
1492 # This is only used by context.basectx.match in order to detect
1492 # This is only used by context.basectx.match in order to detect
1493 # files in subrepos.
1493 # files in subrepos.
1494 return pathutil.pathauditor(
1494 return pathutil.pathauditor(
1495 self.root, callback=self._checknested, realfs=False, cached=True
1495 self.root, callback=self._checknested, realfs=False, cached=True
1496 )
1496 )
1497
1497
1498 def _checknested(self, path):
1498 def _checknested(self, path):
1499 """Determine if path is a legal nested repository."""
1499 """Determine if path is a legal nested repository."""
1500 if not path.startswith(self.root):
1500 if not path.startswith(self.root):
1501 return False
1501 return False
1502 subpath = path[len(self.root) + 1 :]
1502 subpath = path[len(self.root) + 1 :]
1503 normsubpath = util.pconvert(subpath)
1503 normsubpath = util.pconvert(subpath)
1504
1504
1505 # XXX: Checking against the current working copy is wrong in
1505 # XXX: Checking against the current working copy is wrong in
1506 # the sense that it can reject things like
1506 # the sense that it can reject things like
1507 #
1507 #
1508 # $ hg cat -r 10 sub/x.txt
1508 # $ hg cat -r 10 sub/x.txt
1509 #
1509 #
1510 # if sub/ is no longer a subrepository in the working copy
1510 # if sub/ is no longer a subrepository in the working copy
1511 # parent revision.
1511 # parent revision.
1512 #
1512 #
1513 # However, it can of course also allow things that would have
1513 # However, it can of course also allow things that would have
1514 # been rejected before, such as the above cat command if sub/
1514 # been rejected before, such as the above cat command if sub/
1515 # is a subrepository now, but was a normal directory before.
1515 # is a subrepository now, but was a normal directory before.
1516 # The old path auditor would have rejected by mistake since it
1516 # The old path auditor would have rejected by mistake since it
1517 # panics when it sees sub/.hg/.
1517 # panics when it sees sub/.hg/.
1518 #
1518 #
1519 # All in all, checking against the working copy seems sensible
1519 # All in all, checking against the working copy seems sensible
1520 # since we want to prevent access to nested repositories on
1520 # since we want to prevent access to nested repositories on
1521 # the filesystem *now*.
1521 # the filesystem *now*.
1522 ctx = self[None]
1522 ctx = self[None]
1523 parts = util.splitpath(subpath)
1523 parts = util.splitpath(subpath)
1524 while parts:
1524 while parts:
1525 prefix = b'/'.join(parts)
1525 prefix = b'/'.join(parts)
1526 if prefix in ctx.substate:
1526 if prefix in ctx.substate:
1527 if prefix == normsubpath:
1527 if prefix == normsubpath:
1528 return True
1528 return True
1529 else:
1529 else:
1530 sub = ctx.sub(prefix)
1530 sub = ctx.sub(prefix)
1531 return sub.checknested(subpath[len(prefix) + 1 :])
1531 return sub.checknested(subpath[len(prefix) + 1 :])
1532 else:
1532 else:
1533 parts.pop()
1533 parts.pop()
1534 return False
1534 return False
1535
1535
1536 def peer(self):
1536 def peer(self):
1537 return localpeer(self) # not cached to avoid reference cycle
1537 return localpeer(self) # not cached to avoid reference cycle
1538
1538
1539 def unfiltered(self):
1539 def unfiltered(self):
1540 """Return unfiltered version of the repository
1540 """Return unfiltered version of the repository
1541
1541
1542 Intended to be overwritten by filtered repo."""
1542 Intended to be overwritten by filtered repo."""
1543 return self
1543 return self
1544
1544
1545 def filtered(self, name, visibilityexceptions=None):
1545 def filtered(self, name, visibilityexceptions=None):
1546 """Return a filtered version of a repository
1546 """Return a filtered version of a repository
1547
1547
1548 The `name` parameter is the identifier of the requested view. This
1548 The `name` parameter is the identifier of the requested view. This
1549 will return a repoview object set "exactly" to the specified view.
1549 will return a repoview object set "exactly" to the specified view.
1550
1550
1551 This function does not apply recursive filtering to a repository. For
1551 This function does not apply recursive filtering to a repository. For
1552 example calling `repo.filtered("served")` will return a repoview using
1552 example calling `repo.filtered("served")` will return a repoview using
1553 the "served" view, regardless of the initial view used by `repo`.
1553 the "served" view, regardless of the initial view used by `repo`.
1554
1554
1555 In other word, there is always only one level of `repoview` "filtering".
1555 In other word, there is always only one level of `repoview` "filtering".
1556 """
1556 """
1557 if self._extrafilterid is not None and b'%' not in name:
1557 if self._extrafilterid is not None and b'%' not in name:
1558 name = name + b'%' + self._extrafilterid
1558 name = name + b'%' + self._extrafilterid
1559
1559
1560 cls = repoview.newtype(self.unfiltered().__class__)
1560 cls = repoview.newtype(self.unfiltered().__class__)
1561 return cls(self, name, visibilityexceptions)
1561 return cls(self, name, visibilityexceptions)
1562
1562
1563 @mixedrepostorecache(
1563 @mixedrepostorecache(
1564 (b'bookmarks', b'plain'),
1564 (b'bookmarks', b'plain'),
1565 (b'bookmarks.current', b'plain'),
1565 (b'bookmarks.current', b'plain'),
1566 (b'bookmarks', b''),
1566 (b'bookmarks', b''),
1567 (b'00changelog.i', b''),
1567 (b'00changelog.i', b''),
1568 )
1568 )
1569 def _bookmarks(self):
1569 def _bookmarks(self):
1570 # Since the multiple files involved in the transaction cannot be
1570 # Since the multiple files involved in the transaction cannot be
1571 # written atomically (with current repository format), there is a race
1571 # written atomically (with current repository format), there is a race
1572 # condition here.
1572 # condition here.
1573 #
1573 #
1574 # 1) changelog content A is read
1574 # 1) changelog content A is read
1575 # 2) outside transaction update changelog to content B
1575 # 2) outside transaction update changelog to content B
1576 # 3) outside transaction update bookmark file referring to content B
1576 # 3) outside transaction update bookmark file referring to content B
1577 # 4) bookmarks file content is read and filtered against changelog-A
1577 # 4) bookmarks file content is read and filtered against changelog-A
1578 #
1578 #
1579 # When this happens, bookmarks against nodes missing from A are dropped.
1579 # When this happens, bookmarks against nodes missing from A are dropped.
1580 #
1580 #
1581 # Having this happening during read is not great, but it become worse
1581 # Having this happening during read is not great, but it become worse
1582 # when this happen during write because the bookmarks to the "unknown"
1582 # when this happen during write because the bookmarks to the "unknown"
1583 # nodes will be dropped for good. However, writes happen within locks.
1583 # nodes will be dropped for good. However, writes happen within locks.
1584 # This locking makes it possible to have a race free consistent read.
1584 # This locking makes it possible to have a race free consistent read.
1585 # For this purpose data read from disc before locking are
1585 # For this purpose data read from disc before locking are
1586 # "invalidated" right after the locks are taken. This invalidations are
1586 # "invalidated" right after the locks are taken. This invalidations are
1587 # "light", the `filecache` mechanism keep the data in memory and will
1587 # "light", the `filecache` mechanism keep the data in memory and will
1588 # reuse them if the underlying files did not changed. Not parsing the
1588 # reuse them if the underlying files did not changed. Not parsing the
1589 # same data multiple times helps performances.
1589 # same data multiple times helps performances.
1590 #
1590 #
1591 # Unfortunately in the case describe above, the files tracked by the
1591 # Unfortunately in the case describe above, the files tracked by the
1592 # bookmarks file cache might not have changed, but the in-memory
1592 # bookmarks file cache might not have changed, but the in-memory
1593 # content is still "wrong" because we used an older changelog content
1593 # content is still "wrong" because we used an older changelog content
1594 # to process the on-disk data. So after locking, the changelog would be
1594 # to process the on-disk data. So after locking, the changelog would be
1595 # refreshed but `_bookmarks` would be preserved.
1595 # refreshed but `_bookmarks` would be preserved.
1596 # Adding `00changelog.i` to the list of tracked file is not
1596 # Adding `00changelog.i` to the list of tracked file is not
1597 # enough, because at the time we build the content for `_bookmarks` in
1597 # enough, because at the time we build the content for `_bookmarks` in
1598 # (4), the changelog file has already diverged from the content used
1598 # (4), the changelog file has already diverged from the content used
1599 # for loading `changelog` in (1)
1599 # for loading `changelog` in (1)
1600 #
1600 #
1601 # To prevent the issue, we force the changelog to be explicitly
1601 # To prevent the issue, we force the changelog to be explicitly
1602 # reloaded while computing `_bookmarks`. The data race can still happen
1602 # reloaded while computing `_bookmarks`. The data race can still happen
1603 # without the lock (with a narrower window), but it would no longer go
1603 # without the lock (with a narrower window), but it would no longer go
1604 # undetected during the lock time refresh.
1604 # undetected during the lock time refresh.
1605 #
1605 #
1606 # The new schedule is as follow
1606 # The new schedule is as follow
1607 #
1607 #
1608 # 1) filecache logic detect that `_bookmarks` needs to be computed
1608 # 1) filecache logic detect that `_bookmarks` needs to be computed
1609 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1609 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1610 # 3) We force `changelog` filecache to be tested
1610 # 3) We force `changelog` filecache to be tested
1611 # 4) cachestat for `changelog` are captured (for changelog)
1611 # 4) cachestat for `changelog` are captured (for changelog)
1612 # 5) `_bookmarks` is computed and cached
1612 # 5) `_bookmarks` is computed and cached
1613 #
1613 #
1614 # The step in (3) ensure we have a changelog at least as recent as the
1614 # The step in (3) ensure we have a changelog at least as recent as the
1615 # cache stat computed in (1). As a result at locking time:
1615 # cache stat computed in (1). As a result at locking time:
1616 # * if the changelog did not changed since (1) -> we can reuse the data
1616 # * if the changelog did not changed since (1) -> we can reuse the data
1617 # * otherwise -> the bookmarks get refreshed.
1617 # * otherwise -> the bookmarks get refreshed.
1618 self._refreshchangelog()
1618 self._refreshchangelog()
1619 return bookmarks.bmstore(self)
1619 return bookmarks.bmstore(self)
1620
1620
1621 def _refreshchangelog(self):
1621 def _refreshchangelog(self):
1622 """make sure the in memory changelog match the on-disk one"""
1622 """make sure the in memory changelog match the on-disk one"""
1623 if 'changelog' in vars(self) and self.currenttransaction() is None:
1623 if 'changelog' in vars(self) and self.currenttransaction() is None:
1624 del self.changelog
1624 del self.changelog
1625
1625
1626 @property
1626 @property
1627 def _activebookmark(self):
1627 def _activebookmark(self):
1628 return self._bookmarks.active
1628 return self._bookmarks.active
1629
1629
1630 # _phasesets depend on changelog. what we need is to call
1630 # _phasesets depend on changelog. what we need is to call
1631 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1631 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1632 # can't be easily expressed in filecache mechanism.
1632 # can't be easily expressed in filecache mechanism.
1633 @storecache(b'phaseroots', b'00changelog.i')
1633 @storecache(b'phaseroots', b'00changelog.i')
1634 def _phasecache(self):
1634 def _phasecache(self):
1635 return phases.phasecache(self, self._phasedefaults)
1635 return phases.phasecache(self, self._phasedefaults)
1636
1636
1637 @storecache(b'obsstore')
1637 @storecache(b'obsstore')
1638 def obsstore(self):
1638 def obsstore(self):
1639 return obsolete.makestore(self.ui, self)
1639 return obsolete.makestore(self.ui, self)
1640
1640
1641 @storecache(b'00changelog.i')
1641 @storecache(b'00changelog.i')
1642 def changelog(self):
1642 def changelog(self):
1643 # load dirstate before changelog to avoid race see issue6303
1643 # load dirstate before changelog to avoid race see issue6303
1644 self.dirstate.prefetch_parents()
1644 self.dirstate.prefetch_parents()
1645 return self.store.changelog(txnutil.mayhavepending(self.root))
1645 return self.store.changelog(txnutil.mayhavepending(self.root))
1646
1646
1647 @storecache(b'00manifest.i')
1647 @storecache(b'00manifest.i')
1648 def manifestlog(self):
1648 def manifestlog(self):
1649 return self.store.manifestlog(self, self._storenarrowmatch)
1649 return self.store.manifestlog(self, self._storenarrowmatch)
1650
1650
1651 @repofilecache(b'dirstate')
1651 @repofilecache(b'dirstate')
1652 def dirstate(self):
1652 def dirstate(self):
1653 return self._makedirstate()
1653 return self._makedirstate()
1654
1654
1655 def _makedirstate(self):
1655 def _makedirstate(self):
1656 """Extension point for wrapping the dirstate per-repo."""
1656 """Extension point for wrapping the dirstate per-repo."""
1657 sparsematchfn = lambda: sparse.matcher(self)
1657 sparsematchfn = lambda: sparse.matcher(self)
1658
1658
1659 return dirstate.dirstate(
1659 return dirstate.dirstate(
1660 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1660 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1661 )
1661 )
1662
1662
1663 def _dirstatevalidate(self, node):
1663 def _dirstatevalidate(self, node):
1664 try:
1664 try:
1665 self.changelog.rev(node)
1665 self.changelog.rev(node)
1666 return node
1666 return node
1667 except error.LookupError:
1667 except error.LookupError:
1668 if not self._dirstatevalidatewarned:
1668 if not self._dirstatevalidatewarned:
1669 self._dirstatevalidatewarned = True
1669 self._dirstatevalidatewarned = True
1670 self.ui.warn(
1670 self.ui.warn(
1671 _(b"warning: ignoring unknown working parent %s!\n")
1671 _(b"warning: ignoring unknown working parent %s!\n")
1672 % short(node)
1672 % short(node)
1673 )
1673 )
1674 return nullid
1674 return nullid
1675
1675
1676 @storecache(narrowspec.FILENAME)
1676 @storecache(narrowspec.FILENAME)
1677 def narrowpats(self):
1677 def narrowpats(self):
1678 """matcher patterns for this repository's narrowspec
1678 """matcher patterns for this repository's narrowspec
1679
1679
1680 A tuple of (includes, excludes).
1680 A tuple of (includes, excludes).
1681 """
1681 """
1682 return narrowspec.load(self)
1682 return narrowspec.load(self)
1683
1683
1684 @storecache(narrowspec.FILENAME)
1684 @storecache(narrowspec.FILENAME)
1685 def _storenarrowmatch(self):
1685 def _storenarrowmatch(self):
1686 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1686 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1687 return matchmod.always()
1687 return matchmod.always()
1688 include, exclude = self.narrowpats
1688 include, exclude = self.narrowpats
1689 return narrowspec.match(self.root, include=include, exclude=exclude)
1689 return narrowspec.match(self.root, include=include, exclude=exclude)
1690
1690
1691 @storecache(narrowspec.FILENAME)
1691 @storecache(narrowspec.FILENAME)
1692 def _narrowmatch(self):
1692 def _narrowmatch(self):
1693 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1693 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1694 return matchmod.always()
1694 return matchmod.always()
1695 narrowspec.checkworkingcopynarrowspec(self)
1695 narrowspec.checkworkingcopynarrowspec(self)
1696 include, exclude = self.narrowpats
1696 include, exclude = self.narrowpats
1697 return narrowspec.match(self.root, include=include, exclude=exclude)
1697 return narrowspec.match(self.root, include=include, exclude=exclude)
1698
1698
1699 def narrowmatch(self, match=None, includeexact=False):
1699 def narrowmatch(self, match=None, includeexact=False):
1700 """matcher corresponding the the repo's narrowspec
1700 """matcher corresponding the the repo's narrowspec
1701
1701
1702 If `match` is given, then that will be intersected with the narrow
1702 If `match` is given, then that will be intersected with the narrow
1703 matcher.
1703 matcher.
1704
1704
1705 If `includeexact` is True, then any exact matches from `match` will
1705 If `includeexact` is True, then any exact matches from `match` will
1706 be included even if they're outside the narrowspec.
1706 be included even if they're outside the narrowspec.
1707 """
1707 """
1708 if match:
1708 if match:
1709 if includeexact and not self._narrowmatch.always():
1709 if includeexact and not self._narrowmatch.always():
1710 # do not exclude explicitly-specified paths so that they can
1710 # do not exclude explicitly-specified paths so that they can
1711 # be warned later on
1711 # be warned later on
1712 em = matchmod.exact(match.files())
1712 em = matchmod.exact(match.files())
1713 nm = matchmod.unionmatcher([self._narrowmatch, em])
1713 nm = matchmod.unionmatcher([self._narrowmatch, em])
1714 return matchmod.intersectmatchers(match, nm)
1714 return matchmod.intersectmatchers(match, nm)
1715 return matchmod.intersectmatchers(match, self._narrowmatch)
1715 return matchmod.intersectmatchers(match, self._narrowmatch)
1716 return self._narrowmatch
1716 return self._narrowmatch
1717
1717
1718 def setnarrowpats(self, newincludes, newexcludes):
1718 def setnarrowpats(self, newincludes, newexcludes):
1719 narrowspec.save(self, newincludes, newexcludes)
1719 narrowspec.save(self, newincludes, newexcludes)
1720 self.invalidate(clearfilecache=True)
1720 self.invalidate(clearfilecache=True)
1721
1721
1722 @unfilteredpropertycache
1722 @unfilteredpropertycache
1723 def _quick_access_changeid_null(self):
1723 def _quick_access_changeid_null(self):
1724 return {
1724 return {
1725 b'null': (nullrev, nullid),
1725 b'null': (nullrev, nullid),
1726 nullrev: (nullrev, nullid),
1726 nullrev: (nullrev, nullid),
1727 nullid: (nullrev, nullid),
1727 nullid: (nullrev, nullid),
1728 }
1728 }
1729
1729
1730 @unfilteredpropertycache
1730 @unfilteredpropertycache
1731 def _quick_access_changeid_wc(self):
1731 def _quick_access_changeid_wc(self):
1732 # also fast path access to the working copy parents
1732 # also fast path access to the working copy parents
1733 # however, only do it for filter that ensure wc is visible.
1733 # however, only do it for filter that ensure wc is visible.
1734 quick = self._quick_access_changeid_null.copy()
1734 quick = self._quick_access_changeid_null.copy()
1735 cl = self.unfiltered().changelog
1735 cl = self.unfiltered().changelog
1736 for node in self.dirstate.parents():
1736 for node in self.dirstate.parents():
1737 if node == nullid:
1737 if node == nullid:
1738 continue
1738 continue
1739 rev = cl.index.get_rev(node)
1739 rev = cl.index.get_rev(node)
1740 if rev is None:
1740 if rev is None:
1741 # unknown working copy parent case:
1741 # unknown working copy parent case:
1742 #
1742 #
1743 # skip the fast path and let higher code deal with it
1743 # skip the fast path and let higher code deal with it
1744 continue
1744 continue
1745 pair = (rev, node)
1745 pair = (rev, node)
1746 quick[rev] = pair
1746 quick[rev] = pair
1747 quick[node] = pair
1747 quick[node] = pair
1748 # also add the parents of the parents
1748 # also add the parents of the parents
1749 for r in cl.parentrevs(rev):
1749 for r in cl.parentrevs(rev):
1750 if r == nullrev:
1750 if r == nullrev:
1751 continue
1751 continue
1752 n = cl.node(r)
1752 n = cl.node(r)
1753 pair = (r, n)
1753 pair = (r, n)
1754 quick[r] = pair
1754 quick[r] = pair
1755 quick[n] = pair
1755 quick[n] = pair
1756 p1node = self.dirstate.p1()
1756 p1node = self.dirstate.p1()
1757 if p1node != nullid:
1757 if p1node != nullid:
1758 quick[b'.'] = quick[p1node]
1758 quick[b'.'] = quick[p1node]
1759 return quick
1759 return quick
1760
1760
1761 @unfilteredmethod
1761 @unfilteredmethod
1762 def _quick_access_changeid_invalidate(self):
1762 def _quick_access_changeid_invalidate(self):
1763 if '_quick_access_changeid_wc' in vars(self):
1763 if '_quick_access_changeid_wc' in vars(self):
1764 del self.__dict__['_quick_access_changeid_wc']
1764 del self.__dict__['_quick_access_changeid_wc']
1765
1765
1766 @property
1766 @property
1767 def _quick_access_changeid(self):
1767 def _quick_access_changeid(self):
1768 """an helper dictionnary for __getitem__ calls
1768 """an helper dictionnary for __getitem__ calls
1769
1769
1770 This contains a list of symbol we can recognise right away without
1770 This contains a list of symbol we can recognise right away without
1771 further processing.
1771 further processing.
1772 """
1772 """
1773 if self.filtername in repoview.filter_has_wc:
1773 if self.filtername in repoview.filter_has_wc:
1774 return self._quick_access_changeid_wc
1774 return self._quick_access_changeid_wc
1775 return self._quick_access_changeid_null
1775 return self._quick_access_changeid_null
1776
1776
1777 def __getitem__(self, changeid):
1777 def __getitem__(self, changeid):
1778 # dealing with special cases
1778 # dealing with special cases
1779 if changeid is None:
1779 if changeid is None:
1780 return context.workingctx(self)
1780 return context.workingctx(self)
1781 if isinstance(changeid, context.basectx):
1781 if isinstance(changeid, context.basectx):
1782 return changeid
1782 return changeid
1783
1783
1784 # dealing with multiple revisions
1784 # dealing with multiple revisions
1785 if isinstance(changeid, slice):
1785 if isinstance(changeid, slice):
1786 # wdirrev isn't contiguous so the slice shouldn't include it
1786 # wdirrev isn't contiguous so the slice shouldn't include it
1787 return [
1787 return [
1788 self[i]
1788 self[i]
1789 for i in pycompat.xrange(*changeid.indices(len(self)))
1789 for i in pycompat.xrange(*changeid.indices(len(self)))
1790 if i not in self.changelog.filteredrevs
1790 if i not in self.changelog.filteredrevs
1791 ]
1791 ]
1792
1792
1793 # dealing with some special values
1793 # dealing with some special values
1794 quick_access = self._quick_access_changeid.get(changeid)
1794 quick_access = self._quick_access_changeid.get(changeid)
1795 if quick_access is not None:
1795 if quick_access is not None:
1796 rev, node = quick_access
1796 rev, node = quick_access
1797 return context.changectx(self, rev, node, maybe_filtered=False)
1797 return context.changectx(self, rev, node, maybe_filtered=False)
1798 if changeid == b'tip':
1798 if changeid == b'tip':
1799 node = self.changelog.tip()
1799 node = self.changelog.tip()
1800 rev = self.changelog.rev(node)
1800 rev = self.changelog.rev(node)
1801 return context.changectx(self, rev, node)
1801 return context.changectx(self, rev, node)
1802
1802
1803 # dealing with arbitrary values
1803 # dealing with arbitrary values
1804 try:
1804 try:
1805 if isinstance(changeid, int):
1805 if isinstance(changeid, int):
1806 node = self.changelog.node(changeid)
1806 node = self.changelog.node(changeid)
1807 rev = changeid
1807 rev = changeid
1808 elif changeid == b'.':
1808 elif changeid == b'.':
1809 # this is a hack to delay/avoid loading obsmarkers
1809 # this is a hack to delay/avoid loading obsmarkers
1810 # when we know that '.' won't be hidden
1810 # when we know that '.' won't be hidden
1811 node = self.dirstate.p1()
1811 node = self.dirstate.p1()
1812 rev = self.unfiltered().changelog.rev(node)
1812 rev = self.unfiltered().changelog.rev(node)
1813 elif len(changeid) == 20:
1813 elif len(changeid) == 20:
1814 try:
1814 try:
1815 node = changeid
1815 node = changeid
1816 rev = self.changelog.rev(changeid)
1816 rev = self.changelog.rev(changeid)
1817 except error.FilteredLookupError:
1817 except error.FilteredLookupError:
1818 changeid = hex(changeid) # for the error message
1818 changeid = hex(changeid) # for the error message
1819 raise
1819 raise
1820 except LookupError:
1820 except LookupError:
1821 # check if it might have come from damaged dirstate
1821 # check if it might have come from damaged dirstate
1822 #
1822 #
1823 # XXX we could avoid the unfiltered if we had a recognizable
1823 # XXX we could avoid the unfiltered if we had a recognizable
1824 # exception for filtered changeset access
1824 # exception for filtered changeset access
1825 if (
1825 if (
1826 self.local()
1826 self.local()
1827 and changeid in self.unfiltered().dirstate.parents()
1827 and changeid in self.unfiltered().dirstate.parents()
1828 ):
1828 ):
1829 msg = _(b"working directory has unknown parent '%s'!")
1829 msg = _(b"working directory has unknown parent '%s'!")
1830 raise error.Abort(msg % short(changeid))
1830 raise error.Abort(msg % short(changeid))
1831 changeid = hex(changeid) # for the error message
1831 changeid = hex(changeid) # for the error message
1832 raise
1832 raise
1833
1833
1834 elif len(changeid) == 40:
1834 elif len(changeid) == 40:
1835 node = bin(changeid)
1835 node = bin(changeid)
1836 rev = self.changelog.rev(node)
1836 rev = self.changelog.rev(node)
1837 else:
1837 else:
1838 raise error.ProgrammingError(
1838 raise error.ProgrammingError(
1839 b"unsupported changeid '%s' of type %s"
1839 b"unsupported changeid '%s' of type %s"
1840 % (changeid, pycompat.bytestr(type(changeid)))
1840 % (changeid, pycompat.bytestr(type(changeid)))
1841 )
1841 )
1842
1842
1843 return context.changectx(self, rev, node)
1843 return context.changectx(self, rev, node)
1844
1844
1845 except (error.FilteredIndexError, error.FilteredLookupError):
1845 except (error.FilteredIndexError, error.FilteredLookupError):
1846 raise error.FilteredRepoLookupError(
1846 raise error.FilteredRepoLookupError(
1847 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1847 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1848 )
1848 )
1849 except (IndexError, LookupError):
1849 except (IndexError, LookupError):
1850 raise error.RepoLookupError(
1850 raise error.RepoLookupError(
1851 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1851 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1852 )
1852 )
1853 except error.WdirUnsupported:
1853 except error.WdirUnsupported:
1854 return context.workingctx(self)
1854 return context.workingctx(self)
1855
1855
1856 def __contains__(self, changeid):
1856 def __contains__(self, changeid):
1857 """True if the given changeid exists"""
1857 """True if the given changeid exists"""
1858 try:
1858 try:
1859 self[changeid]
1859 self[changeid]
1860 return True
1860 return True
1861 except error.RepoLookupError:
1861 except error.RepoLookupError:
1862 return False
1862 return False
1863
1863
1864 def __nonzero__(self):
1864 def __nonzero__(self):
1865 return True
1865 return True
1866
1866
1867 __bool__ = __nonzero__
1867 __bool__ = __nonzero__
1868
1868
1869 def __len__(self):
1869 def __len__(self):
1870 # no need to pay the cost of repoview.changelog
1870 # no need to pay the cost of repoview.changelog
1871 unfi = self.unfiltered()
1871 unfi = self.unfiltered()
1872 return len(unfi.changelog)
1872 return len(unfi.changelog)
1873
1873
1874 def __iter__(self):
1874 def __iter__(self):
1875 return iter(self.changelog)
1875 return iter(self.changelog)
1876
1876
1877 def revs(self, expr, *args):
1877 def revs(self, expr, *args):
1878 """Find revisions matching a revset.
1878 """Find revisions matching a revset.
1879
1879
1880 The revset is specified as a string ``expr`` that may contain
1880 The revset is specified as a string ``expr`` that may contain
1881 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1881 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1882
1882
1883 Revset aliases from the configuration are not expanded. To expand
1883 Revset aliases from the configuration are not expanded. To expand
1884 user aliases, consider calling ``scmutil.revrange()`` or
1884 user aliases, consider calling ``scmutil.revrange()`` or
1885 ``repo.anyrevs([expr], user=True)``.
1885 ``repo.anyrevs([expr], user=True)``.
1886
1886
1887 Returns a smartset.abstractsmartset, which is a list-like interface
1887 Returns a smartset.abstractsmartset, which is a list-like interface
1888 that contains integer revisions.
1888 that contains integer revisions.
1889 """
1889 """
1890 tree = revsetlang.spectree(expr, *args)
1890 tree = revsetlang.spectree(expr, *args)
1891 return revset.makematcher(tree)(self)
1891 return revset.makematcher(tree)(self)
1892
1892
1893 def set(self, expr, *args):
1893 def set(self, expr, *args):
1894 """Find revisions matching a revset and emit changectx instances.
1894 """Find revisions matching a revset and emit changectx instances.
1895
1895
1896 This is a convenience wrapper around ``revs()`` that iterates the
1896 This is a convenience wrapper around ``revs()`` that iterates the
1897 result and is a generator of changectx instances.
1897 result and is a generator of changectx instances.
1898
1898
1899 Revset aliases from the configuration are not expanded. To expand
1899 Revset aliases from the configuration are not expanded. To expand
1900 user aliases, consider calling ``scmutil.revrange()``.
1900 user aliases, consider calling ``scmutil.revrange()``.
1901 """
1901 """
1902 for r in self.revs(expr, *args):
1902 for r in self.revs(expr, *args):
1903 yield self[r]
1903 yield self[r]
1904
1904
1905 def anyrevs(self, specs, user=False, localalias=None):
1905 def anyrevs(self, specs, user=False, localalias=None):
1906 """Find revisions matching one of the given revsets.
1906 """Find revisions matching one of the given revsets.
1907
1907
1908 Revset aliases from the configuration are not expanded by default. To
1908 Revset aliases from the configuration are not expanded by default. To
1909 expand user aliases, specify ``user=True``. To provide some local
1909 expand user aliases, specify ``user=True``. To provide some local
1910 definitions overriding user aliases, set ``localalias`` to
1910 definitions overriding user aliases, set ``localalias`` to
1911 ``{name: definitionstring}``.
1911 ``{name: definitionstring}``.
1912 """
1912 """
1913 if specs == [b'null']:
1913 if specs == [b'null']:
1914 return revset.baseset([nullrev])
1914 return revset.baseset([nullrev])
1915 if specs == [b'.']:
1915 if specs == [b'.']:
1916 quick_data = self._quick_access_changeid.get(b'.')
1916 quick_data = self._quick_access_changeid.get(b'.')
1917 if quick_data is not None:
1917 if quick_data is not None:
1918 return revset.baseset([quick_data[0]])
1918 return revset.baseset([quick_data[0]])
1919 if user:
1919 if user:
1920 m = revset.matchany(
1920 m = revset.matchany(
1921 self.ui,
1921 self.ui,
1922 specs,
1922 specs,
1923 lookup=revset.lookupfn(self),
1923 lookup=revset.lookupfn(self),
1924 localalias=localalias,
1924 localalias=localalias,
1925 )
1925 )
1926 else:
1926 else:
1927 m = revset.matchany(None, specs, localalias=localalias)
1927 m = revset.matchany(None, specs, localalias=localalias)
1928 return m(self)
1928 return m(self)
1929
1929
1930 def url(self):
1930 def url(self):
1931 return b'file:' + self.root
1931 return b'file:' + self.root
1932
1932
1933 def hook(self, name, throw=False, **args):
1933 def hook(self, name, throw=False, **args):
1934 """Call a hook, passing this repo instance.
1934 """Call a hook, passing this repo instance.
1935
1935
1936 This a convenience method to aid invoking hooks. Extensions likely
1936 This a convenience method to aid invoking hooks. Extensions likely
1937 won't call this unless they have registered a custom hook or are
1937 won't call this unless they have registered a custom hook or are
1938 replacing code that is expected to call a hook.
1938 replacing code that is expected to call a hook.
1939 """
1939 """
1940 return hook.hook(self.ui, self, name, throw, **args)
1940 return hook.hook(self.ui, self, name, throw, **args)
1941
1941
1942 @filteredpropertycache
1942 @filteredpropertycache
1943 def _tagscache(self):
1943 def _tagscache(self):
1944 """Returns a tagscache object that contains various tags related
1944 """Returns a tagscache object that contains various tags related
1945 caches."""
1945 caches."""
1946
1946
1947 # This simplifies its cache management by having one decorated
1947 # This simplifies its cache management by having one decorated
1948 # function (this one) and the rest simply fetch things from it.
1948 # function (this one) and the rest simply fetch things from it.
1949 class tagscache(object):
1949 class tagscache(object):
1950 def __init__(self):
1950 def __init__(self):
1951 # These two define the set of tags for this repository. tags
1951 # These two define the set of tags for this repository. tags
1952 # maps tag name to node; tagtypes maps tag name to 'global' or
1952 # maps tag name to node; tagtypes maps tag name to 'global' or
1953 # 'local'. (Global tags are defined by .hgtags across all
1953 # 'local'. (Global tags are defined by .hgtags across all
1954 # heads, and local tags are defined in .hg/localtags.)
1954 # heads, and local tags are defined in .hg/localtags.)
1955 # They constitute the in-memory cache of tags.
1955 # They constitute the in-memory cache of tags.
1956 self.tags = self.tagtypes = None
1956 self.tags = self.tagtypes = None
1957
1957
1958 self.nodetagscache = self.tagslist = None
1958 self.nodetagscache = self.tagslist = None
1959
1959
1960 cache = tagscache()
1960 cache = tagscache()
1961 cache.tags, cache.tagtypes = self._findtags()
1961 cache.tags, cache.tagtypes = self._findtags()
1962
1962
1963 return cache
1963 return cache
1964
1964
1965 def tags(self):
1965 def tags(self):
1966 '''return a mapping of tag to node'''
1966 '''return a mapping of tag to node'''
1967 t = {}
1967 t = {}
1968 if self.changelog.filteredrevs:
1968 if self.changelog.filteredrevs:
1969 tags, tt = self._findtags()
1969 tags, tt = self._findtags()
1970 else:
1970 else:
1971 tags = self._tagscache.tags
1971 tags = self._tagscache.tags
1972 rev = self.changelog.rev
1972 rev = self.changelog.rev
1973 for k, v in pycompat.iteritems(tags):
1973 for k, v in pycompat.iteritems(tags):
1974 try:
1974 try:
1975 # ignore tags to unknown nodes
1975 # ignore tags to unknown nodes
1976 rev(v)
1976 rev(v)
1977 t[k] = v
1977 t[k] = v
1978 except (error.LookupError, ValueError):
1978 except (error.LookupError, ValueError):
1979 pass
1979 pass
1980 return t
1980 return t
1981
1981
1982 def _findtags(self):
1982 def _findtags(self):
1983 """Do the hard work of finding tags. Return a pair of dicts
1983 """Do the hard work of finding tags. Return a pair of dicts
1984 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1984 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1985 maps tag name to a string like \'global\' or \'local\'.
1985 maps tag name to a string like \'global\' or \'local\'.
1986 Subclasses or extensions are free to add their own tags, but
1986 Subclasses or extensions are free to add their own tags, but
1987 should be aware that the returned dicts will be retained for the
1987 should be aware that the returned dicts will be retained for the
1988 duration of the localrepo object."""
1988 duration of the localrepo object."""
1989
1989
1990 # XXX what tagtype should subclasses/extensions use? Currently
1990 # XXX what tagtype should subclasses/extensions use? Currently
1991 # mq and bookmarks add tags, but do not set the tagtype at all.
1991 # mq and bookmarks add tags, but do not set the tagtype at all.
1992 # Should each extension invent its own tag type? Should there
1992 # Should each extension invent its own tag type? Should there
1993 # be one tagtype for all such "virtual" tags? Or is the status
1993 # be one tagtype for all such "virtual" tags? Or is the status
1994 # quo fine?
1994 # quo fine?
1995
1995
1996 # map tag name to (node, hist)
1996 # map tag name to (node, hist)
1997 alltags = tagsmod.findglobaltags(self.ui, self)
1997 alltags = tagsmod.findglobaltags(self.ui, self)
1998 # map tag name to tag type
1998 # map tag name to tag type
1999 tagtypes = {tag: b'global' for tag in alltags}
1999 tagtypes = {tag: b'global' for tag in alltags}
2000
2000
2001 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2001 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2002
2002
2003 # Build the return dicts. Have to re-encode tag names because
2003 # Build the return dicts. Have to re-encode tag names because
2004 # the tags module always uses UTF-8 (in order not to lose info
2004 # the tags module always uses UTF-8 (in order not to lose info
2005 # writing to the cache), but the rest of Mercurial wants them in
2005 # writing to the cache), but the rest of Mercurial wants them in
2006 # local encoding.
2006 # local encoding.
2007 tags = {}
2007 tags = {}
2008 for (name, (node, hist)) in pycompat.iteritems(alltags):
2008 for (name, (node, hist)) in pycompat.iteritems(alltags):
2009 if node != nullid:
2009 if node != nullid:
2010 tags[encoding.tolocal(name)] = node
2010 tags[encoding.tolocal(name)] = node
2011 tags[b'tip'] = self.changelog.tip()
2011 tags[b'tip'] = self.changelog.tip()
2012 tagtypes = {
2012 tagtypes = {
2013 encoding.tolocal(name): value
2013 encoding.tolocal(name): value
2014 for (name, value) in pycompat.iteritems(tagtypes)
2014 for (name, value) in pycompat.iteritems(tagtypes)
2015 }
2015 }
2016 return (tags, tagtypes)
2016 return (tags, tagtypes)
2017
2017
2018 def tagtype(self, tagname):
2018 def tagtype(self, tagname):
2019 """
2019 """
2020 return the type of the given tag. result can be:
2020 return the type of the given tag. result can be:
2021
2021
2022 'local' : a local tag
2022 'local' : a local tag
2023 'global' : a global tag
2023 'global' : a global tag
2024 None : tag does not exist
2024 None : tag does not exist
2025 """
2025 """
2026
2026
2027 return self._tagscache.tagtypes.get(tagname)
2027 return self._tagscache.tagtypes.get(tagname)
2028
2028
2029 def tagslist(self):
2029 def tagslist(self):
2030 '''return a list of tags ordered by revision'''
2030 '''return a list of tags ordered by revision'''
2031 if not self._tagscache.tagslist:
2031 if not self._tagscache.tagslist:
2032 l = []
2032 l = []
2033 for t, n in pycompat.iteritems(self.tags()):
2033 for t, n in pycompat.iteritems(self.tags()):
2034 l.append((self.changelog.rev(n), t, n))
2034 l.append((self.changelog.rev(n), t, n))
2035 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2035 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2036
2036
2037 return self._tagscache.tagslist
2037 return self._tagscache.tagslist
2038
2038
2039 def nodetags(self, node):
2039 def nodetags(self, node):
2040 '''return the tags associated with a node'''
2040 '''return the tags associated with a node'''
2041 if not self._tagscache.nodetagscache:
2041 if not self._tagscache.nodetagscache:
2042 nodetagscache = {}
2042 nodetagscache = {}
2043 for t, n in pycompat.iteritems(self._tagscache.tags):
2043 for t, n in pycompat.iteritems(self._tagscache.tags):
2044 nodetagscache.setdefault(n, []).append(t)
2044 nodetagscache.setdefault(n, []).append(t)
2045 for tags in pycompat.itervalues(nodetagscache):
2045 for tags in pycompat.itervalues(nodetagscache):
2046 tags.sort()
2046 tags.sort()
2047 self._tagscache.nodetagscache = nodetagscache
2047 self._tagscache.nodetagscache = nodetagscache
2048 return self._tagscache.nodetagscache.get(node, [])
2048 return self._tagscache.nodetagscache.get(node, [])
2049
2049
2050 def nodebookmarks(self, node):
2050 def nodebookmarks(self, node):
2051 """return the list of bookmarks pointing to the specified node"""
2051 """return the list of bookmarks pointing to the specified node"""
2052 return self._bookmarks.names(node)
2052 return self._bookmarks.names(node)
2053
2053
2054 def branchmap(self):
2054 def branchmap(self):
2055 """returns a dictionary {branch: [branchheads]} with branchheads
2055 """returns a dictionary {branch: [branchheads]} with branchheads
2056 ordered by increasing revision number"""
2056 ordered by increasing revision number"""
2057 return self._branchcaches[self]
2057 return self._branchcaches[self]
2058
2058
2059 @unfilteredmethod
2059 @unfilteredmethod
2060 def revbranchcache(self):
2060 def revbranchcache(self):
2061 if not self._revbranchcache:
2061 if not self._revbranchcache:
2062 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2062 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2063 return self._revbranchcache
2063 return self._revbranchcache
2064
2064
2065 def register_changeset(self, rev, changelogrevision):
2066 pass
2067
2065 def branchtip(self, branch, ignoremissing=False):
2068 def branchtip(self, branch, ignoremissing=False):
2066 """return the tip node for a given branch
2069 """return the tip node for a given branch
2067
2070
2068 If ignoremissing is True, then this method will not raise an error.
2071 If ignoremissing is True, then this method will not raise an error.
2069 This is helpful for callers that only expect None for a missing branch
2072 This is helpful for callers that only expect None for a missing branch
2070 (e.g. namespace).
2073 (e.g. namespace).
2071
2074
2072 """
2075 """
2073 try:
2076 try:
2074 return self.branchmap().branchtip(branch)
2077 return self.branchmap().branchtip(branch)
2075 except KeyError:
2078 except KeyError:
2076 if not ignoremissing:
2079 if not ignoremissing:
2077 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2080 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2078 else:
2081 else:
2079 pass
2082 pass
2080
2083
2081 def lookup(self, key):
2084 def lookup(self, key):
2082 node = scmutil.revsymbol(self, key).node()
2085 node = scmutil.revsymbol(self, key).node()
2083 if node is None:
2086 if node is None:
2084 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2087 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2085 return node
2088 return node
2086
2089
2087 def lookupbranch(self, key):
2090 def lookupbranch(self, key):
2088 if self.branchmap().hasbranch(key):
2091 if self.branchmap().hasbranch(key):
2089 return key
2092 return key
2090
2093
2091 return scmutil.revsymbol(self, key).branch()
2094 return scmutil.revsymbol(self, key).branch()
2092
2095
2093 def known(self, nodes):
2096 def known(self, nodes):
2094 cl = self.changelog
2097 cl = self.changelog
2095 get_rev = cl.index.get_rev
2098 get_rev = cl.index.get_rev
2096 filtered = cl.filteredrevs
2099 filtered = cl.filteredrevs
2097 result = []
2100 result = []
2098 for n in nodes:
2101 for n in nodes:
2099 r = get_rev(n)
2102 r = get_rev(n)
2100 resp = not (r is None or r in filtered)
2103 resp = not (r is None or r in filtered)
2101 result.append(resp)
2104 result.append(resp)
2102 return result
2105 return result
2103
2106
2104 def local(self):
2107 def local(self):
2105 return self
2108 return self
2106
2109
2107 def publishing(self):
2110 def publishing(self):
2108 # it's safe (and desirable) to trust the publish flag unconditionally
2111 # it's safe (and desirable) to trust the publish flag unconditionally
2109 # so that we don't finalize changes shared between users via ssh or nfs
2112 # so that we don't finalize changes shared between users via ssh or nfs
2110 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2113 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2111
2114
2112 def cancopy(self):
2115 def cancopy(self):
2113 # so statichttprepo's override of local() works
2116 # so statichttprepo's override of local() works
2114 if not self.local():
2117 if not self.local():
2115 return False
2118 return False
2116 if not self.publishing():
2119 if not self.publishing():
2117 return True
2120 return True
2118 # if publishing we can't copy if there is filtered content
2121 # if publishing we can't copy if there is filtered content
2119 return not self.filtered(b'visible').changelog.filteredrevs
2122 return not self.filtered(b'visible').changelog.filteredrevs
2120
2123
2121 def shared(self):
2124 def shared(self):
2122 '''the type of shared repository (None if not shared)'''
2125 '''the type of shared repository (None if not shared)'''
2123 if self.sharedpath != self.path:
2126 if self.sharedpath != self.path:
2124 return b'store'
2127 return b'store'
2125 return None
2128 return None
2126
2129
2127 def wjoin(self, f, *insidef):
2130 def wjoin(self, f, *insidef):
2128 return self.vfs.reljoin(self.root, f, *insidef)
2131 return self.vfs.reljoin(self.root, f, *insidef)
2129
2132
2130 def setparents(self, p1, p2=nullid):
2133 def setparents(self, p1, p2=nullid):
2131 self[None].setparents(p1, p2)
2134 self[None].setparents(p1, p2)
2132 self._quick_access_changeid_invalidate()
2135 self._quick_access_changeid_invalidate()
2133
2136
2134 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2137 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2135 """changeid must be a changeset revision, if specified.
2138 """changeid must be a changeset revision, if specified.
2136 fileid can be a file revision or node."""
2139 fileid can be a file revision or node."""
2137 return context.filectx(
2140 return context.filectx(
2138 self, path, changeid, fileid, changectx=changectx
2141 self, path, changeid, fileid, changectx=changectx
2139 )
2142 )
2140
2143
2141 def getcwd(self):
2144 def getcwd(self):
2142 return self.dirstate.getcwd()
2145 return self.dirstate.getcwd()
2143
2146
2144 def pathto(self, f, cwd=None):
2147 def pathto(self, f, cwd=None):
2145 return self.dirstate.pathto(f, cwd)
2148 return self.dirstate.pathto(f, cwd)
2146
2149
2147 def _loadfilter(self, filter):
2150 def _loadfilter(self, filter):
2148 if filter not in self._filterpats:
2151 if filter not in self._filterpats:
2149 l = []
2152 l = []
2150 for pat, cmd in self.ui.configitems(filter):
2153 for pat, cmd in self.ui.configitems(filter):
2151 if cmd == b'!':
2154 if cmd == b'!':
2152 continue
2155 continue
2153 mf = matchmod.match(self.root, b'', [pat])
2156 mf = matchmod.match(self.root, b'', [pat])
2154 fn = None
2157 fn = None
2155 params = cmd
2158 params = cmd
2156 for name, filterfn in pycompat.iteritems(self._datafilters):
2159 for name, filterfn in pycompat.iteritems(self._datafilters):
2157 if cmd.startswith(name):
2160 if cmd.startswith(name):
2158 fn = filterfn
2161 fn = filterfn
2159 params = cmd[len(name) :].lstrip()
2162 params = cmd[len(name) :].lstrip()
2160 break
2163 break
2161 if not fn:
2164 if not fn:
2162 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2165 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2163 fn.__name__ = 'commandfilter'
2166 fn.__name__ = 'commandfilter'
2164 # Wrap old filters not supporting keyword arguments
2167 # Wrap old filters not supporting keyword arguments
2165 if not pycompat.getargspec(fn)[2]:
2168 if not pycompat.getargspec(fn)[2]:
2166 oldfn = fn
2169 oldfn = fn
2167 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2170 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2168 fn.__name__ = 'compat-' + oldfn.__name__
2171 fn.__name__ = 'compat-' + oldfn.__name__
2169 l.append((mf, fn, params))
2172 l.append((mf, fn, params))
2170 self._filterpats[filter] = l
2173 self._filterpats[filter] = l
2171 return self._filterpats[filter]
2174 return self._filterpats[filter]
2172
2175
2173 def _filter(self, filterpats, filename, data):
2176 def _filter(self, filterpats, filename, data):
2174 for mf, fn, cmd in filterpats:
2177 for mf, fn, cmd in filterpats:
2175 if mf(filename):
2178 if mf(filename):
2176 self.ui.debug(
2179 self.ui.debug(
2177 b"filtering %s through %s\n"
2180 b"filtering %s through %s\n"
2178 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2181 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2179 )
2182 )
2180 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2183 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2181 break
2184 break
2182
2185
2183 return data
2186 return data
2184
2187
2185 @unfilteredpropertycache
2188 @unfilteredpropertycache
2186 def _encodefilterpats(self):
2189 def _encodefilterpats(self):
2187 return self._loadfilter(b'encode')
2190 return self._loadfilter(b'encode')
2188
2191
2189 @unfilteredpropertycache
2192 @unfilteredpropertycache
2190 def _decodefilterpats(self):
2193 def _decodefilterpats(self):
2191 return self._loadfilter(b'decode')
2194 return self._loadfilter(b'decode')
2192
2195
2193 def adddatafilter(self, name, filter):
2196 def adddatafilter(self, name, filter):
2194 self._datafilters[name] = filter
2197 self._datafilters[name] = filter
2195
2198
2196 def wread(self, filename):
2199 def wread(self, filename):
2197 if self.wvfs.islink(filename):
2200 if self.wvfs.islink(filename):
2198 data = self.wvfs.readlink(filename)
2201 data = self.wvfs.readlink(filename)
2199 else:
2202 else:
2200 data = self.wvfs.read(filename)
2203 data = self.wvfs.read(filename)
2201 return self._filter(self._encodefilterpats, filename, data)
2204 return self._filter(self._encodefilterpats, filename, data)
2202
2205
2203 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2206 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2204 """write ``data`` into ``filename`` in the working directory
2207 """write ``data`` into ``filename`` in the working directory
2205
2208
2206 This returns length of written (maybe decoded) data.
2209 This returns length of written (maybe decoded) data.
2207 """
2210 """
2208 data = self._filter(self._decodefilterpats, filename, data)
2211 data = self._filter(self._decodefilterpats, filename, data)
2209 if b'l' in flags:
2212 if b'l' in flags:
2210 self.wvfs.symlink(data, filename)
2213 self.wvfs.symlink(data, filename)
2211 else:
2214 else:
2212 self.wvfs.write(
2215 self.wvfs.write(
2213 filename, data, backgroundclose=backgroundclose, **kwargs
2216 filename, data, backgroundclose=backgroundclose, **kwargs
2214 )
2217 )
2215 if b'x' in flags:
2218 if b'x' in flags:
2216 self.wvfs.setflags(filename, False, True)
2219 self.wvfs.setflags(filename, False, True)
2217 else:
2220 else:
2218 self.wvfs.setflags(filename, False, False)
2221 self.wvfs.setflags(filename, False, False)
2219 return len(data)
2222 return len(data)
2220
2223
2221 def wwritedata(self, filename, data):
2224 def wwritedata(self, filename, data):
2222 return self._filter(self._decodefilterpats, filename, data)
2225 return self._filter(self._decodefilterpats, filename, data)
2223
2226
2224 def currenttransaction(self):
2227 def currenttransaction(self):
2225 """return the current transaction or None if non exists"""
2228 """return the current transaction or None if non exists"""
2226 if self._transref:
2229 if self._transref:
2227 tr = self._transref()
2230 tr = self._transref()
2228 else:
2231 else:
2229 tr = None
2232 tr = None
2230
2233
2231 if tr and tr.running():
2234 if tr and tr.running():
2232 return tr
2235 return tr
2233 return None
2236 return None
2234
2237
2235 def transaction(self, desc, report=None):
2238 def transaction(self, desc, report=None):
2236 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2239 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2237 b'devel', b'check-locks'
2240 b'devel', b'check-locks'
2238 ):
2241 ):
2239 if self._currentlock(self._lockref) is None:
2242 if self._currentlock(self._lockref) is None:
2240 raise error.ProgrammingError(b'transaction requires locking')
2243 raise error.ProgrammingError(b'transaction requires locking')
2241 tr = self.currenttransaction()
2244 tr = self.currenttransaction()
2242 if tr is not None:
2245 if tr is not None:
2243 return tr.nest(name=desc)
2246 return tr.nest(name=desc)
2244
2247
2245 # abort here if the journal already exists
2248 # abort here if the journal already exists
2246 if self.svfs.exists(b"journal"):
2249 if self.svfs.exists(b"journal"):
2247 raise error.RepoError(
2250 raise error.RepoError(
2248 _(b"abandoned transaction found"),
2251 _(b"abandoned transaction found"),
2249 hint=_(b"run 'hg recover' to clean up transaction"),
2252 hint=_(b"run 'hg recover' to clean up transaction"),
2250 )
2253 )
2251
2254
2252 idbase = b"%.40f#%f" % (random.random(), time.time())
2255 idbase = b"%.40f#%f" % (random.random(), time.time())
2253 ha = hex(hashutil.sha1(idbase).digest())
2256 ha = hex(hashutil.sha1(idbase).digest())
2254 txnid = b'TXN:' + ha
2257 txnid = b'TXN:' + ha
2255 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2258 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2256
2259
2257 self._writejournal(desc)
2260 self._writejournal(desc)
2258 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2261 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2259 if report:
2262 if report:
2260 rp = report
2263 rp = report
2261 else:
2264 else:
2262 rp = self.ui.warn
2265 rp = self.ui.warn
2263 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2266 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2264 # we must avoid cyclic reference between repo and transaction.
2267 # we must avoid cyclic reference between repo and transaction.
2265 reporef = weakref.ref(self)
2268 reporef = weakref.ref(self)
2266 # Code to track tag movement
2269 # Code to track tag movement
2267 #
2270 #
2268 # Since tags are all handled as file content, it is actually quite hard
2271 # Since tags are all handled as file content, it is actually quite hard
2269 # to track these movement from a code perspective. So we fallback to a
2272 # to track these movement from a code perspective. So we fallback to a
2270 # tracking at the repository level. One could envision to track changes
2273 # tracking at the repository level. One could envision to track changes
2271 # to the '.hgtags' file through changegroup apply but that fails to
2274 # to the '.hgtags' file through changegroup apply but that fails to
2272 # cope with case where transaction expose new heads without changegroup
2275 # cope with case where transaction expose new heads without changegroup
2273 # being involved (eg: phase movement).
2276 # being involved (eg: phase movement).
2274 #
2277 #
2275 # For now, We gate the feature behind a flag since this likely comes
2278 # For now, We gate the feature behind a flag since this likely comes
2276 # with performance impacts. The current code run more often than needed
2279 # with performance impacts. The current code run more often than needed
2277 # and do not use caches as much as it could. The current focus is on
2280 # and do not use caches as much as it could. The current focus is on
2278 # the behavior of the feature so we disable it by default. The flag
2281 # the behavior of the feature so we disable it by default. The flag
2279 # will be removed when we are happy with the performance impact.
2282 # will be removed when we are happy with the performance impact.
2280 #
2283 #
2281 # Once this feature is no longer experimental move the following
2284 # Once this feature is no longer experimental move the following
2282 # documentation to the appropriate help section:
2285 # documentation to the appropriate help section:
2283 #
2286 #
2284 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2287 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2285 # tags (new or changed or deleted tags). In addition the details of
2288 # tags (new or changed or deleted tags). In addition the details of
2286 # these changes are made available in a file at:
2289 # these changes are made available in a file at:
2287 # ``REPOROOT/.hg/changes/tags.changes``.
2290 # ``REPOROOT/.hg/changes/tags.changes``.
2288 # Make sure you check for HG_TAG_MOVED before reading that file as it
2291 # Make sure you check for HG_TAG_MOVED before reading that file as it
2289 # might exist from a previous transaction even if no tag were touched
2292 # might exist from a previous transaction even if no tag were touched
2290 # in this one. Changes are recorded in a line base format::
2293 # in this one. Changes are recorded in a line base format::
2291 #
2294 #
2292 # <action> <hex-node> <tag-name>\n
2295 # <action> <hex-node> <tag-name>\n
2293 #
2296 #
2294 # Actions are defined as follow:
2297 # Actions are defined as follow:
2295 # "-R": tag is removed,
2298 # "-R": tag is removed,
2296 # "+A": tag is added,
2299 # "+A": tag is added,
2297 # "-M": tag is moved (old value),
2300 # "-M": tag is moved (old value),
2298 # "+M": tag is moved (new value),
2301 # "+M": tag is moved (new value),
2299 tracktags = lambda x: None
2302 tracktags = lambda x: None
2300 # experimental config: experimental.hook-track-tags
2303 # experimental config: experimental.hook-track-tags
2301 shouldtracktags = self.ui.configbool(
2304 shouldtracktags = self.ui.configbool(
2302 b'experimental', b'hook-track-tags'
2305 b'experimental', b'hook-track-tags'
2303 )
2306 )
2304 if desc != b'strip' and shouldtracktags:
2307 if desc != b'strip' and shouldtracktags:
2305 oldheads = self.changelog.headrevs()
2308 oldheads = self.changelog.headrevs()
2306
2309
2307 def tracktags(tr2):
2310 def tracktags(tr2):
2308 repo = reporef()
2311 repo = reporef()
2309 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2312 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2310 newheads = repo.changelog.headrevs()
2313 newheads = repo.changelog.headrevs()
2311 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2314 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2312 # notes: we compare lists here.
2315 # notes: we compare lists here.
2313 # As we do it only once buiding set would not be cheaper
2316 # As we do it only once buiding set would not be cheaper
2314 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2317 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2315 if changes:
2318 if changes:
2316 tr2.hookargs[b'tag_moved'] = b'1'
2319 tr2.hookargs[b'tag_moved'] = b'1'
2317 with repo.vfs(
2320 with repo.vfs(
2318 b'changes/tags.changes', b'w', atomictemp=True
2321 b'changes/tags.changes', b'w', atomictemp=True
2319 ) as changesfile:
2322 ) as changesfile:
2320 # note: we do not register the file to the transaction
2323 # note: we do not register the file to the transaction
2321 # because we needs it to still exist on the transaction
2324 # because we needs it to still exist on the transaction
2322 # is close (for txnclose hooks)
2325 # is close (for txnclose hooks)
2323 tagsmod.writediff(changesfile, changes)
2326 tagsmod.writediff(changesfile, changes)
2324
2327
2325 def validate(tr2):
2328 def validate(tr2):
2326 """will run pre-closing hooks"""
2329 """will run pre-closing hooks"""
2327 # XXX the transaction API is a bit lacking here so we take a hacky
2330 # XXX the transaction API is a bit lacking here so we take a hacky
2328 # path for now
2331 # path for now
2329 #
2332 #
2330 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2333 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2331 # dict is copied before these run. In addition we needs the data
2334 # dict is copied before these run. In addition we needs the data
2332 # available to in memory hooks too.
2335 # available to in memory hooks too.
2333 #
2336 #
2334 # Moreover, we also need to make sure this runs before txnclose
2337 # Moreover, we also need to make sure this runs before txnclose
2335 # hooks and there is no "pending" mechanism that would execute
2338 # hooks and there is no "pending" mechanism that would execute
2336 # logic only if hooks are about to run.
2339 # logic only if hooks are about to run.
2337 #
2340 #
2338 # Fixing this limitation of the transaction is also needed to track
2341 # Fixing this limitation of the transaction is also needed to track
2339 # other families of changes (bookmarks, phases, obsolescence).
2342 # other families of changes (bookmarks, phases, obsolescence).
2340 #
2343 #
2341 # This will have to be fixed before we remove the experimental
2344 # This will have to be fixed before we remove the experimental
2342 # gating.
2345 # gating.
2343 tracktags(tr2)
2346 tracktags(tr2)
2344 repo = reporef()
2347 repo = reporef()
2345
2348
2346 singleheadopt = (b'experimental', b'single-head-per-branch')
2349 singleheadopt = (b'experimental', b'single-head-per-branch')
2347 singlehead = repo.ui.configbool(*singleheadopt)
2350 singlehead = repo.ui.configbool(*singleheadopt)
2348 if singlehead:
2351 if singlehead:
2349 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2352 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2350 accountclosed = singleheadsub.get(
2353 accountclosed = singleheadsub.get(
2351 b"account-closed-heads", False
2354 b"account-closed-heads", False
2352 )
2355 )
2353 if singleheadsub.get(b"public-changes-only", False):
2356 if singleheadsub.get(b"public-changes-only", False):
2354 filtername = b"immutable"
2357 filtername = b"immutable"
2355 else:
2358 else:
2356 filtername = b"visible"
2359 filtername = b"visible"
2357 scmutil.enforcesinglehead(
2360 scmutil.enforcesinglehead(
2358 repo, tr2, desc, accountclosed, filtername
2361 repo, tr2, desc, accountclosed, filtername
2359 )
2362 )
2360 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2363 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2361 for name, (old, new) in sorted(
2364 for name, (old, new) in sorted(
2362 tr.changes[b'bookmarks'].items()
2365 tr.changes[b'bookmarks'].items()
2363 ):
2366 ):
2364 args = tr.hookargs.copy()
2367 args = tr.hookargs.copy()
2365 args.update(bookmarks.preparehookargs(name, old, new))
2368 args.update(bookmarks.preparehookargs(name, old, new))
2366 repo.hook(
2369 repo.hook(
2367 b'pretxnclose-bookmark',
2370 b'pretxnclose-bookmark',
2368 throw=True,
2371 throw=True,
2369 **pycompat.strkwargs(args)
2372 **pycompat.strkwargs(args)
2370 )
2373 )
2371 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2374 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2372 cl = repo.unfiltered().changelog
2375 cl = repo.unfiltered().changelog
2373 for revs, (old, new) in tr.changes[b'phases']:
2376 for revs, (old, new) in tr.changes[b'phases']:
2374 for rev in revs:
2377 for rev in revs:
2375 args = tr.hookargs.copy()
2378 args = tr.hookargs.copy()
2376 node = hex(cl.node(rev))
2379 node = hex(cl.node(rev))
2377 args.update(phases.preparehookargs(node, old, new))
2380 args.update(phases.preparehookargs(node, old, new))
2378 repo.hook(
2381 repo.hook(
2379 b'pretxnclose-phase',
2382 b'pretxnclose-phase',
2380 throw=True,
2383 throw=True,
2381 **pycompat.strkwargs(args)
2384 **pycompat.strkwargs(args)
2382 )
2385 )
2383
2386
2384 repo.hook(
2387 repo.hook(
2385 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2388 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2386 )
2389 )
2387
2390
2388 def releasefn(tr, success):
2391 def releasefn(tr, success):
2389 repo = reporef()
2392 repo = reporef()
2390 if repo is None:
2393 if repo is None:
2391 # If the repo has been GC'd (and this release function is being
2394 # If the repo has been GC'd (and this release function is being
2392 # called from transaction.__del__), there's not much we can do,
2395 # called from transaction.__del__), there's not much we can do,
2393 # so just leave the unfinished transaction there and let the
2396 # so just leave the unfinished transaction there and let the
2394 # user run `hg recover`.
2397 # user run `hg recover`.
2395 return
2398 return
2396 if success:
2399 if success:
2397 # this should be explicitly invoked here, because
2400 # this should be explicitly invoked here, because
2398 # in-memory changes aren't written out at closing
2401 # in-memory changes aren't written out at closing
2399 # transaction, if tr.addfilegenerator (via
2402 # transaction, if tr.addfilegenerator (via
2400 # dirstate.write or so) isn't invoked while
2403 # dirstate.write or so) isn't invoked while
2401 # transaction running
2404 # transaction running
2402 repo.dirstate.write(None)
2405 repo.dirstate.write(None)
2403 else:
2406 else:
2404 # discard all changes (including ones already written
2407 # discard all changes (including ones already written
2405 # out) in this transaction
2408 # out) in this transaction
2406 narrowspec.restorebackup(self, b'journal.narrowspec')
2409 narrowspec.restorebackup(self, b'journal.narrowspec')
2407 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2410 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2408 repo.dirstate.restorebackup(None, b'journal.dirstate')
2411 repo.dirstate.restorebackup(None, b'journal.dirstate')
2409
2412
2410 repo.invalidate(clearfilecache=True)
2413 repo.invalidate(clearfilecache=True)
2411
2414
2412 tr = transaction.transaction(
2415 tr = transaction.transaction(
2413 rp,
2416 rp,
2414 self.svfs,
2417 self.svfs,
2415 vfsmap,
2418 vfsmap,
2416 b"journal",
2419 b"journal",
2417 b"undo",
2420 b"undo",
2418 aftertrans(renames),
2421 aftertrans(renames),
2419 self.store.createmode,
2422 self.store.createmode,
2420 validator=validate,
2423 validator=validate,
2421 releasefn=releasefn,
2424 releasefn=releasefn,
2422 checkambigfiles=_cachedfiles,
2425 checkambigfiles=_cachedfiles,
2423 name=desc,
2426 name=desc,
2424 )
2427 )
2425 tr.changes[b'origrepolen'] = len(self)
2428 tr.changes[b'origrepolen'] = len(self)
2426 tr.changes[b'obsmarkers'] = set()
2429 tr.changes[b'obsmarkers'] = set()
2427 tr.changes[b'phases'] = []
2430 tr.changes[b'phases'] = []
2428 tr.changes[b'bookmarks'] = {}
2431 tr.changes[b'bookmarks'] = {}
2429
2432
2430 tr.hookargs[b'txnid'] = txnid
2433 tr.hookargs[b'txnid'] = txnid
2431 tr.hookargs[b'txnname'] = desc
2434 tr.hookargs[b'txnname'] = desc
2432 tr.hookargs[b'changes'] = tr.changes
2435 tr.hookargs[b'changes'] = tr.changes
2433 # note: writing the fncache only during finalize mean that the file is
2436 # note: writing the fncache only during finalize mean that the file is
2434 # outdated when running hooks. As fncache is used for streaming clone,
2437 # outdated when running hooks. As fncache is used for streaming clone,
2435 # this is not expected to break anything that happen during the hooks.
2438 # this is not expected to break anything that happen during the hooks.
2436 tr.addfinalize(b'flush-fncache', self.store.write)
2439 tr.addfinalize(b'flush-fncache', self.store.write)
2437
2440
2438 def txnclosehook(tr2):
2441 def txnclosehook(tr2):
2439 """To be run if transaction is successful, will schedule a hook run"""
2442 """To be run if transaction is successful, will schedule a hook run"""
2440 # Don't reference tr2 in hook() so we don't hold a reference.
2443 # Don't reference tr2 in hook() so we don't hold a reference.
2441 # This reduces memory consumption when there are multiple
2444 # This reduces memory consumption when there are multiple
2442 # transactions per lock. This can likely go away if issue5045
2445 # transactions per lock. This can likely go away if issue5045
2443 # fixes the function accumulation.
2446 # fixes the function accumulation.
2444 hookargs = tr2.hookargs
2447 hookargs = tr2.hookargs
2445
2448
2446 def hookfunc(unused_success):
2449 def hookfunc(unused_success):
2447 repo = reporef()
2450 repo = reporef()
2448 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2451 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2449 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2452 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2450 for name, (old, new) in bmchanges:
2453 for name, (old, new) in bmchanges:
2451 args = tr.hookargs.copy()
2454 args = tr.hookargs.copy()
2452 args.update(bookmarks.preparehookargs(name, old, new))
2455 args.update(bookmarks.preparehookargs(name, old, new))
2453 repo.hook(
2456 repo.hook(
2454 b'txnclose-bookmark',
2457 b'txnclose-bookmark',
2455 throw=False,
2458 throw=False,
2456 **pycompat.strkwargs(args)
2459 **pycompat.strkwargs(args)
2457 )
2460 )
2458
2461
2459 if hook.hashook(repo.ui, b'txnclose-phase'):
2462 if hook.hashook(repo.ui, b'txnclose-phase'):
2460 cl = repo.unfiltered().changelog
2463 cl = repo.unfiltered().changelog
2461 phasemv = sorted(
2464 phasemv = sorted(
2462 tr.changes[b'phases'], key=lambda r: r[0][0]
2465 tr.changes[b'phases'], key=lambda r: r[0][0]
2463 )
2466 )
2464 for revs, (old, new) in phasemv:
2467 for revs, (old, new) in phasemv:
2465 for rev in revs:
2468 for rev in revs:
2466 args = tr.hookargs.copy()
2469 args = tr.hookargs.copy()
2467 node = hex(cl.node(rev))
2470 node = hex(cl.node(rev))
2468 args.update(phases.preparehookargs(node, old, new))
2471 args.update(phases.preparehookargs(node, old, new))
2469 repo.hook(
2472 repo.hook(
2470 b'txnclose-phase',
2473 b'txnclose-phase',
2471 throw=False,
2474 throw=False,
2472 **pycompat.strkwargs(args)
2475 **pycompat.strkwargs(args)
2473 )
2476 )
2474
2477
2475 repo.hook(
2478 repo.hook(
2476 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2479 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2477 )
2480 )
2478
2481
2479 reporef()._afterlock(hookfunc)
2482 reporef()._afterlock(hookfunc)
2480
2483
2481 tr.addfinalize(b'txnclose-hook', txnclosehook)
2484 tr.addfinalize(b'txnclose-hook', txnclosehook)
2482 # Include a leading "-" to make it happen before the transaction summary
2485 # Include a leading "-" to make it happen before the transaction summary
2483 # reports registered via scmutil.registersummarycallback() whose names
2486 # reports registered via scmutil.registersummarycallback() whose names
2484 # are 00-txnreport etc. That way, the caches will be warm when the
2487 # are 00-txnreport etc. That way, the caches will be warm when the
2485 # callbacks run.
2488 # callbacks run.
2486 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2489 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2487
2490
2488 def txnaborthook(tr2):
2491 def txnaborthook(tr2):
2489 """To be run if transaction is aborted"""
2492 """To be run if transaction is aborted"""
2490 reporef().hook(
2493 reporef().hook(
2491 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2494 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2492 )
2495 )
2493
2496
2494 tr.addabort(b'txnabort-hook', txnaborthook)
2497 tr.addabort(b'txnabort-hook', txnaborthook)
2495 # avoid eager cache invalidation. in-memory data should be identical
2498 # avoid eager cache invalidation. in-memory data should be identical
2496 # to stored data if transaction has no error.
2499 # to stored data if transaction has no error.
2497 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2500 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2498 self._transref = weakref.ref(tr)
2501 self._transref = weakref.ref(tr)
2499 scmutil.registersummarycallback(self, tr, desc)
2502 scmutil.registersummarycallback(self, tr, desc)
2500 return tr
2503 return tr
2501
2504
2502 def _journalfiles(self):
2505 def _journalfiles(self):
2503 return (
2506 return (
2504 (self.svfs, b'journal'),
2507 (self.svfs, b'journal'),
2505 (self.svfs, b'journal.narrowspec'),
2508 (self.svfs, b'journal.narrowspec'),
2506 (self.vfs, b'journal.narrowspec.dirstate'),
2509 (self.vfs, b'journal.narrowspec.dirstate'),
2507 (self.vfs, b'journal.dirstate'),
2510 (self.vfs, b'journal.dirstate'),
2508 (self.vfs, b'journal.branch'),
2511 (self.vfs, b'journal.branch'),
2509 (self.vfs, b'journal.desc'),
2512 (self.vfs, b'journal.desc'),
2510 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2513 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2511 (self.svfs, b'journal.phaseroots'),
2514 (self.svfs, b'journal.phaseroots'),
2512 )
2515 )
2513
2516
2514 def undofiles(self):
2517 def undofiles(self):
2515 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2518 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2516
2519
2517 @unfilteredmethod
2520 @unfilteredmethod
2518 def _writejournal(self, desc):
2521 def _writejournal(self, desc):
2519 self.dirstate.savebackup(None, b'journal.dirstate')
2522 self.dirstate.savebackup(None, b'journal.dirstate')
2520 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2523 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2521 narrowspec.savebackup(self, b'journal.narrowspec')
2524 narrowspec.savebackup(self, b'journal.narrowspec')
2522 self.vfs.write(
2525 self.vfs.write(
2523 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2526 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2524 )
2527 )
2525 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2528 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2526 bookmarksvfs = bookmarks.bookmarksvfs(self)
2529 bookmarksvfs = bookmarks.bookmarksvfs(self)
2527 bookmarksvfs.write(
2530 bookmarksvfs.write(
2528 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2531 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2529 )
2532 )
2530 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2533 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2531
2534
2532 def recover(self):
2535 def recover(self):
2533 with self.lock():
2536 with self.lock():
2534 if self.svfs.exists(b"journal"):
2537 if self.svfs.exists(b"journal"):
2535 self.ui.status(_(b"rolling back interrupted transaction\n"))
2538 self.ui.status(_(b"rolling back interrupted transaction\n"))
2536 vfsmap = {
2539 vfsmap = {
2537 b'': self.svfs,
2540 b'': self.svfs,
2538 b'plain': self.vfs,
2541 b'plain': self.vfs,
2539 }
2542 }
2540 transaction.rollback(
2543 transaction.rollback(
2541 self.svfs,
2544 self.svfs,
2542 vfsmap,
2545 vfsmap,
2543 b"journal",
2546 b"journal",
2544 self.ui.warn,
2547 self.ui.warn,
2545 checkambigfiles=_cachedfiles,
2548 checkambigfiles=_cachedfiles,
2546 )
2549 )
2547 self.invalidate()
2550 self.invalidate()
2548 return True
2551 return True
2549 else:
2552 else:
2550 self.ui.warn(_(b"no interrupted transaction available\n"))
2553 self.ui.warn(_(b"no interrupted transaction available\n"))
2551 return False
2554 return False
2552
2555
2553 def rollback(self, dryrun=False, force=False):
2556 def rollback(self, dryrun=False, force=False):
2554 wlock = lock = dsguard = None
2557 wlock = lock = dsguard = None
2555 try:
2558 try:
2556 wlock = self.wlock()
2559 wlock = self.wlock()
2557 lock = self.lock()
2560 lock = self.lock()
2558 if self.svfs.exists(b"undo"):
2561 if self.svfs.exists(b"undo"):
2559 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2562 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2560
2563
2561 return self._rollback(dryrun, force, dsguard)
2564 return self._rollback(dryrun, force, dsguard)
2562 else:
2565 else:
2563 self.ui.warn(_(b"no rollback information available\n"))
2566 self.ui.warn(_(b"no rollback information available\n"))
2564 return 1
2567 return 1
2565 finally:
2568 finally:
2566 release(dsguard, lock, wlock)
2569 release(dsguard, lock, wlock)
2567
2570
2568 @unfilteredmethod # Until we get smarter cache management
2571 @unfilteredmethod # Until we get smarter cache management
2569 def _rollback(self, dryrun, force, dsguard):
2572 def _rollback(self, dryrun, force, dsguard):
2570 ui = self.ui
2573 ui = self.ui
2571 try:
2574 try:
2572 args = self.vfs.read(b'undo.desc').splitlines()
2575 args = self.vfs.read(b'undo.desc').splitlines()
2573 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2576 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2574 if len(args) >= 3:
2577 if len(args) >= 3:
2575 detail = args[2]
2578 detail = args[2]
2576 oldtip = oldlen - 1
2579 oldtip = oldlen - 1
2577
2580
2578 if detail and ui.verbose:
2581 if detail and ui.verbose:
2579 msg = _(
2582 msg = _(
2580 b'repository tip rolled back to revision %d'
2583 b'repository tip rolled back to revision %d'
2581 b' (undo %s: %s)\n'
2584 b' (undo %s: %s)\n'
2582 ) % (oldtip, desc, detail)
2585 ) % (oldtip, desc, detail)
2583 else:
2586 else:
2584 msg = _(
2587 msg = _(
2585 b'repository tip rolled back to revision %d (undo %s)\n'
2588 b'repository tip rolled back to revision %d (undo %s)\n'
2586 ) % (oldtip, desc)
2589 ) % (oldtip, desc)
2587 except IOError:
2590 except IOError:
2588 msg = _(b'rolling back unknown transaction\n')
2591 msg = _(b'rolling back unknown transaction\n')
2589 desc = None
2592 desc = None
2590
2593
2591 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2594 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2592 raise error.Abort(
2595 raise error.Abort(
2593 _(
2596 _(
2594 b'rollback of last commit while not checked out '
2597 b'rollback of last commit while not checked out '
2595 b'may lose data'
2598 b'may lose data'
2596 ),
2599 ),
2597 hint=_(b'use -f to force'),
2600 hint=_(b'use -f to force'),
2598 )
2601 )
2599
2602
2600 ui.status(msg)
2603 ui.status(msg)
2601 if dryrun:
2604 if dryrun:
2602 return 0
2605 return 0
2603
2606
2604 parents = self.dirstate.parents()
2607 parents = self.dirstate.parents()
2605 self.destroying()
2608 self.destroying()
2606 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2609 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2607 transaction.rollback(
2610 transaction.rollback(
2608 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2611 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2609 )
2612 )
2610 bookmarksvfs = bookmarks.bookmarksvfs(self)
2613 bookmarksvfs = bookmarks.bookmarksvfs(self)
2611 if bookmarksvfs.exists(b'undo.bookmarks'):
2614 if bookmarksvfs.exists(b'undo.bookmarks'):
2612 bookmarksvfs.rename(
2615 bookmarksvfs.rename(
2613 b'undo.bookmarks', b'bookmarks', checkambig=True
2616 b'undo.bookmarks', b'bookmarks', checkambig=True
2614 )
2617 )
2615 if self.svfs.exists(b'undo.phaseroots'):
2618 if self.svfs.exists(b'undo.phaseroots'):
2616 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2619 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2617 self.invalidate()
2620 self.invalidate()
2618
2621
2619 has_node = self.changelog.index.has_node
2622 has_node = self.changelog.index.has_node
2620 parentgone = any(not has_node(p) for p in parents)
2623 parentgone = any(not has_node(p) for p in parents)
2621 if parentgone:
2624 if parentgone:
2622 # prevent dirstateguard from overwriting already restored one
2625 # prevent dirstateguard from overwriting already restored one
2623 dsguard.close()
2626 dsguard.close()
2624
2627
2625 narrowspec.restorebackup(self, b'undo.narrowspec')
2628 narrowspec.restorebackup(self, b'undo.narrowspec')
2626 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2629 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2627 self.dirstate.restorebackup(None, b'undo.dirstate')
2630 self.dirstate.restorebackup(None, b'undo.dirstate')
2628 try:
2631 try:
2629 branch = self.vfs.read(b'undo.branch')
2632 branch = self.vfs.read(b'undo.branch')
2630 self.dirstate.setbranch(encoding.tolocal(branch))
2633 self.dirstate.setbranch(encoding.tolocal(branch))
2631 except IOError:
2634 except IOError:
2632 ui.warn(
2635 ui.warn(
2633 _(
2636 _(
2634 b'named branch could not be reset: '
2637 b'named branch could not be reset: '
2635 b'current branch is still \'%s\'\n'
2638 b'current branch is still \'%s\'\n'
2636 )
2639 )
2637 % self.dirstate.branch()
2640 % self.dirstate.branch()
2638 )
2641 )
2639
2642
2640 parents = tuple([p.rev() for p in self[None].parents()])
2643 parents = tuple([p.rev() for p in self[None].parents()])
2641 if len(parents) > 1:
2644 if len(parents) > 1:
2642 ui.status(
2645 ui.status(
2643 _(
2646 _(
2644 b'working directory now based on '
2647 b'working directory now based on '
2645 b'revisions %d and %d\n'
2648 b'revisions %d and %d\n'
2646 )
2649 )
2647 % parents
2650 % parents
2648 )
2651 )
2649 else:
2652 else:
2650 ui.status(
2653 ui.status(
2651 _(b'working directory now based on revision %d\n') % parents
2654 _(b'working directory now based on revision %d\n') % parents
2652 )
2655 )
2653 mergestatemod.mergestate.clean(self)
2656 mergestatemod.mergestate.clean(self)
2654
2657
2655 # TODO: if we know which new heads may result from this rollback, pass
2658 # TODO: if we know which new heads may result from this rollback, pass
2656 # them to destroy(), which will prevent the branchhead cache from being
2659 # them to destroy(), which will prevent the branchhead cache from being
2657 # invalidated.
2660 # invalidated.
2658 self.destroyed()
2661 self.destroyed()
2659 return 0
2662 return 0
2660
2663
2661 def _buildcacheupdater(self, newtransaction):
2664 def _buildcacheupdater(self, newtransaction):
2662 """called during transaction to build the callback updating cache
2665 """called during transaction to build the callback updating cache
2663
2666
2664 Lives on the repository to help extension who might want to augment
2667 Lives on the repository to help extension who might want to augment
2665 this logic. For this purpose, the created transaction is passed to the
2668 this logic. For this purpose, the created transaction is passed to the
2666 method.
2669 method.
2667 """
2670 """
2668 # we must avoid cyclic reference between repo and transaction.
2671 # we must avoid cyclic reference between repo and transaction.
2669 reporef = weakref.ref(self)
2672 reporef = weakref.ref(self)
2670
2673
2671 def updater(tr):
2674 def updater(tr):
2672 repo = reporef()
2675 repo = reporef()
2673 repo.updatecaches(tr)
2676 repo.updatecaches(tr)
2674
2677
2675 return updater
2678 return updater
2676
2679
2677 @unfilteredmethod
2680 @unfilteredmethod
2678 def updatecaches(self, tr=None, full=False):
2681 def updatecaches(self, tr=None, full=False):
2679 """warm appropriate caches
2682 """warm appropriate caches
2680
2683
2681 If this function is called after a transaction closed. The transaction
2684 If this function is called after a transaction closed. The transaction
2682 will be available in the 'tr' argument. This can be used to selectively
2685 will be available in the 'tr' argument. This can be used to selectively
2683 update caches relevant to the changes in that transaction.
2686 update caches relevant to the changes in that transaction.
2684
2687
2685 If 'full' is set, make sure all caches the function knows about have
2688 If 'full' is set, make sure all caches the function knows about have
2686 up-to-date data. Even the ones usually loaded more lazily.
2689 up-to-date data. Even the ones usually loaded more lazily.
2687 """
2690 """
2688 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2691 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2689 # During strip, many caches are invalid but
2692 # During strip, many caches are invalid but
2690 # later call to `destroyed` will refresh them.
2693 # later call to `destroyed` will refresh them.
2691 return
2694 return
2692
2695
2693 if tr is None or tr.changes[b'origrepolen'] < len(self):
2696 if tr is None or tr.changes[b'origrepolen'] < len(self):
2694 # accessing the 'served' branchmap should refresh all the others,
2697 # accessing the 'served' branchmap should refresh all the others,
2695 self.ui.debug(b'updating the branch cache\n')
2698 self.ui.debug(b'updating the branch cache\n')
2696 self.filtered(b'served').branchmap()
2699 self.filtered(b'served').branchmap()
2697 self.filtered(b'served.hidden').branchmap()
2700 self.filtered(b'served.hidden').branchmap()
2698
2701
2699 if full:
2702 if full:
2700 unfi = self.unfiltered()
2703 unfi = self.unfiltered()
2701
2704
2702 self.changelog.update_caches(transaction=tr)
2705 self.changelog.update_caches(transaction=tr)
2703 self.manifestlog.update_caches(transaction=tr)
2706 self.manifestlog.update_caches(transaction=tr)
2704
2707
2705 rbc = unfi.revbranchcache()
2708 rbc = unfi.revbranchcache()
2706 for r in unfi.changelog:
2709 for r in unfi.changelog:
2707 rbc.branchinfo(r)
2710 rbc.branchinfo(r)
2708 rbc.write()
2711 rbc.write()
2709
2712
2710 # ensure the working copy parents are in the manifestfulltextcache
2713 # ensure the working copy parents are in the manifestfulltextcache
2711 for ctx in self[b'.'].parents():
2714 for ctx in self[b'.'].parents():
2712 ctx.manifest() # accessing the manifest is enough
2715 ctx.manifest() # accessing the manifest is enough
2713
2716
2714 # accessing fnode cache warms the cache
2717 # accessing fnode cache warms the cache
2715 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2718 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2716 # accessing tags warm the cache
2719 # accessing tags warm the cache
2717 self.tags()
2720 self.tags()
2718 self.filtered(b'served').tags()
2721 self.filtered(b'served').tags()
2719
2722
2720 # The `full` arg is documented as updating even the lazily-loaded
2723 # The `full` arg is documented as updating even the lazily-loaded
2721 # caches immediately, so we're forcing a write to cause these caches
2724 # caches immediately, so we're forcing a write to cause these caches
2722 # to be warmed up even if they haven't explicitly been requested
2725 # to be warmed up even if they haven't explicitly been requested
2723 # yet (if they've never been used by hg, they won't ever have been
2726 # yet (if they've never been used by hg, they won't ever have been
2724 # written, even if they're a subset of another kind of cache that
2727 # written, even if they're a subset of another kind of cache that
2725 # *has* been used).
2728 # *has* been used).
2726 for filt in repoview.filtertable.keys():
2729 for filt in repoview.filtertable.keys():
2727 filtered = self.filtered(filt)
2730 filtered = self.filtered(filt)
2728 filtered.branchmap().write(filtered)
2731 filtered.branchmap().write(filtered)
2729
2732
2730 def invalidatecaches(self):
2733 def invalidatecaches(self):
2731
2734
2732 if '_tagscache' in vars(self):
2735 if '_tagscache' in vars(self):
2733 # can't use delattr on proxy
2736 # can't use delattr on proxy
2734 del self.__dict__['_tagscache']
2737 del self.__dict__['_tagscache']
2735
2738
2736 self._branchcaches.clear()
2739 self._branchcaches.clear()
2737 self.invalidatevolatilesets()
2740 self.invalidatevolatilesets()
2738 self._sparsesignaturecache.clear()
2741 self._sparsesignaturecache.clear()
2739
2742
2740 def invalidatevolatilesets(self):
2743 def invalidatevolatilesets(self):
2741 self.filteredrevcache.clear()
2744 self.filteredrevcache.clear()
2742 obsolete.clearobscaches(self)
2745 obsolete.clearobscaches(self)
2743 self._quick_access_changeid_invalidate()
2746 self._quick_access_changeid_invalidate()
2744
2747
2745 def invalidatedirstate(self):
2748 def invalidatedirstate(self):
2746 """Invalidates the dirstate, causing the next call to dirstate
2749 """Invalidates the dirstate, causing the next call to dirstate
2747 to check if it was modified since the last time it was read,
2750 to check if it was modified since the last time it was read,
2748 rereading it if it has.
2751 rereading it if it has.
2749
2752
2750 This is different to dirstate.invalidate() that it doesn't always
2753 This is different to dirstate.invalidate() that it doesn't always
2751 rereads the dirstate. Use dirstate.invalidate() if you want to
2754 rereads the dirstate. Use dirstate.invalidate() if you want to
2752 explicitly read the dirstate again (i.e. restoring it to a previous
2755 explicitly read the dirstate again (i.e. restoring it to a previous
2753 known good state)."""
2756 known good state)."""
2754 if hasunfilteredcache(self, 'dirstate'):
2757 if hasunfilteredcache(self, 'dirstate'):
2755 for k in self.dirstate._filecache:
2758 for k in self.dirstate._filecache:
2756 try:
2759 try:
2757 delattr(self.dirstate, k)
2760 delattr(self.dirstate, k)
2758 except AttributeError:
2761 except AttributeError:
2759 pass
2762 pass
2760 delattr(self.unfiltered(), 'dirstate')
2763 delattr(self.unfiltered(), 'dirstate')
2761
2764
2762 def invalidate(self, clearfilecache=False):
2765 def invalidate(self, clearfilecache=False):
2763 """Invalidates both store and non-store parts other than dirstate
2766 """Invalidates both store and non-store parts other than dirstate
2764
2767
2765 If a transaction is running, invalidation of store is omitted,
2768 If a transaction is running, invalidation of store is omitted,
2766 because discarding in-memory changes might cause inconsistency
2769 because discarding in-memory changes might cause inconsistency
2767 (e.g. incomplete fncache causes unintentional failure, but
2770 (e.g. incomplete fncache causes unintentional failure, but
2768 redundant one doesn't).
2771 redundant one doesn't).
2769 """
2772 """
2770 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2773 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2771 for k in list(self._filecache.keys()):
2774 for k in list(self._filecache.keys()):
2772 # dirstate is invalidated separately in invalidatedirstate()
2775 # dirstate is invalidated separately in invalidatedirstate()
2773 if k == b'dirstate':
2776 if k == b'dirstate':
2774 continue
2777 continue
2775 if (
2778 if (
2776 k == b'changelog'
2779 k == b'changelog'
2777 and self.currenttransaction()
2780 and self.currenttransaction()
2778 and self.changelog._delayed
2781 and self.changelog._delayed
2779 ):
2782 ):
2780 # The changelog object may store unwritten revisions. We don't
2783 # The changelog object may store unwritten revisions. We don't
2781 # want to lose them.
2784 # want to lose them.
2782 # TODO: Solve the problem instead of working around it.
2785 # TODO: Solve the problem instead of working around it.
2783 continue
2786 continue
2784
2787
2785 if clearfilecache:
2788 if clearfilecache:
2786 del self._filecache[k]
2789 del self._filecache[k]
2787 try:
2790 try:
2788 delattr(unfiltered, k)
2791 delattr(unfiltered, k)
2789 except AttributeError:
2792 except AttributeError:
2790 pass
2793 pass
2791 self.invalidatecaches()
2794 self.invalidatecaches()
2792 if not self.currenttransaction():
2795 if not self.currenttransaction():
2793 # TODO: Changing contents of store outside transaction
2796 # TODO: Changing contents of store outside transaction
2794 # causes inconsistency. We should make in-memory store
2797 # causes inconsistency. We should make in-memory store
2795 # changes detectable, and abort if changed.
2798 # changes detectable, and abort if changed.
2796 self.store.invalidatecaches()
2799 self.store.invalidatecaches()
2797
2800
2798 def invalidateall(self):
2801 def invalidateall(self):
2799 """Fully invalidates both store and non-store parts, causing the
2802 """Fully invalidates both store and non-store parts, causing the
2800 subsequent operation to reread any outside changes."""
2803 subsequent operation to reread any outside changes."""
2801 # extension should hook this to invalidate its caches
2804 # extension should hook this to invalidate its caches
2802 self.invalidate()
2805 self.invalidate()
2803 self.invalidatedirstate()
2806 self.invalidatedirstate()
2804
2807
2805 @unfilteredmethod
2808 @unfilteredmethod
2806 def _refreshfilecachestats(self, tr):
2809 def _refreshfilecachestats(self, tr):
2807 """Reload stats of cached files so that they are flagged as valid"""
2810 """Reload stats of cached files so that they are flagged as valid"""
2808 for k, ce in self._filecache.items():
2811 for k, ce in self._filecache.items():
2809 k = pycompat.sysstr(k)
2812 k = pycompat.sysstr(k)
2810 if k == 'dirstate' or k not in self.__dict__:
2813 if k == 'dirstate' or k not in self.__dict__:
2811 continue
2814 continue
2812 ce.refresh()
2815 ce.refresh()
2813
2816
2814 def _lock(
2817 def _lock(
2815 self,
2818 self,
2816 vfs,
2819 vfs,
2817 lockname,
2820 lockname,
2818 wait,
2821 wait,
2819 releasefn,
2822 releasefn,
2820 acquirefn,
2823 acquirefn,
2821 desc,
2824 desc,
2822 ):
2825 ):
2823 timeout = 0
2826 timeout = 0
2824 warntimeout = 0
2827 warntimeout = 0
2825 if wait:
2828 if wait:
2826 timeout = self.ui.configint(b"ui", b"timeout")
2829 timeout = self.ui.configint(b"ui", b"timeout")
2827 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2830 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2828 # internal config: ui.signal-safe-lock
2831 # internal config: ui.signal-safe-lock
2829 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2832 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2830
2833
2831 l = lockmod.trylock(
2834 l = lockmod.trylock(
2832 self.ui,
2835 self.ui,
2833 vfs,
2836 vfs,
2834 lockname,
2837 lockname,
2835 timeout,
2838 timeout,
2836 warntimeout,
2839 warntimeout,
2837 releasefn=releasefn,
2840 releasefn=releasefn,
2838 acquirefn=acquirefn,
2841 acquirefn=acquirefn,
2839 desc=desc,
2842 desc=desc,
2840 signalsafe=signalsafe,
2843 signalsafe=signalsafe,
2841 )
2844 )
2842 return l
2845 return l
2843
2846
2844 def _afterlock(self, callback):
2847 def _afterlock(self, callback):
2845 """add a callback to be run when the repository is fully unlocked
2848 """add a callback to be run when the repository is fully unlocked
2846
2849
2847 The callback will be executed when the outermost lock is released
2850 The callback will be executed when the outermost lock is released
2848 (with wlock being higher level than 'lock')."""
2851 (with wlock being higher level than 'lock')."""
2849 for ref in (self._wlockref, self._lockref):
2852 for ref in (self._wlockref, self._lockref):
2850 l = ref and ref()
2853 l = ref and ref()
2851 if l and l.held:
2854 if l and l.held:
2852 l.postrelease.append(callback)
2855 l.postrelease.append(callback)
2853 break
2856 break
2854 else: # no lock have been found.
2857 else: # no lock have been found.
2855 callback(True)
2858 callback(True)
2856
2859
2857 def lock(self, wait=True):
2860 def lock(self, wait=True):
2858 """Lock the repository store (.hg/store) and return a weak reference
2861 """Lock the repository store (.hg/store) and return a weak reference
2859 to the lock. Use this before modifying the store (e.g. committing or
2862 to the lock. Use this before modifying the store (e.g. committing or
2860 stripping). If you are opening a transaction, get a lock as well.)
2863 stripping). If you are opening a transaction, get a lock as well.)
2861
2864
2862 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2865 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2863 'wlock' first to avoid a dead-lock hazard."""
2866 'wlock' first to avoid a dead-lock hazard."""
2864 l = self._currentlock(self._lockref)
2867 l = self._currentlock(self._lockref)
2865 if l is not None:
2868 if l is not None:
2866 l.lock()
2869 l.lock()
2867 return l
2870 return l
2868
2871
2869 l = self._lock(
2872 l = self._lock(
2870 vfs=self.svfs,
2873 vfs=self.svfs,
2871 lockname=b"lock",
2874 lockname=b"lock",
2872 wait=wait,
2875 wait=wait,
2873 releasefn=None,
2876 releasefn=None,
2874 acquirefn=self.invalidate,
2877 acquirefn=self.invalidate,
2875 desc=_(b'repository %s') % self.origroot,
2878 desc=_(b'repository %s') % self.origroot,
2876 )
2879 )
2877 self._lockref = weakref.ref(l)
2880 self._lockref = weakref.ref(l)
2878 return l
2881 return l
2879
2882
2880 def wlock(self, wait=True):
2883 def wlock(self, wait=True):
2881 """Lock the non-store parts of the repository (everything under
2884 """Lock the non-store parts of the repository (everything under
2882 .hg except .hg/store) and return a weak reference to the lock.
2885 .hg except .hg/store) and return a weak reference to the lock.
2883
2886
2884 Use this before modifying files in .hg.
2887 Use this before modifying files in .hg.
2885
2888
2886 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2889 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2887 'wlock' first to avoid a dead-lock hazard."""
2890 'wlock' first to avoid a dead-lock hazard."""
2888 l = self._wlockref and self._wlockref()
2891 l = self._wlockref and self._wlockref()
2889 if l is not None and l.held:
2892 if l is not None and l.held:
2890 l.lock()
2893 l.lock()
2891 return l
2894 return l
2892
2895
2893 # We do not need to check for non-waiting lock acquisition. Such
2896 # We do not need to check for non-waiting lock acquisition. Such
2894 # acquisition would not cause dead-lock as they would just fail.
2897 # acquisition would not cause dead-lock as they would just fail.
2895 if wait and (
2898 if wait and (
2896 self.ui.configbool(b'devel', b'all-warnings')
2899 self.ui.configbool(b'devel', b'all-warnings')
2897 or self.ui.configbool(b'devel', b'check-locks')
2900 or self.ui.configbool(b'devel', b'check-locks')
2898 ):
2901 ):
2899 if self._currentlock(self._lockref) is not None:
2902 if self._currentlock(self._lockref) is not None:
2900 self.ui.develwarn(b'"wlock" acquired after "lock"')
2903 self.ui.develwarn(b'"wlock" acquired after "lock"')
2901
2904
2902 def unlock():
2905 def unlock():
2903 if self.dirstate.pendingparentchange():
2906 if self.dirstate.pendingparentchange():
2904 self.dirstate.invalidate()
2907 self.dirstate.invalidate()
2905 else:
2908 else:
2906 self.dirstate.write(None)
2909 self.dirstate.write(None)
2907
2910
2908 self._filecache[b'dirstate'].refresh()
2911 self._filecache[b'dirstate'].refresh()
2909
2912
2910 l = self._lock(
2913 l = self._lock(
2911 self.vfs,
2914 self.vfs,
2912 b"wlock",
2915 b"wlock",
2913 wait,
2916 wait,
2914 unlock,
2917 unlock,
2915 self.invalidatedirstate,
2918 self.invalidatedirstate,
2916 _(b'working directory of %s') % self.origroot,
2919 _(b'working directory of %s') % self.origroot,
2917 )
2920 )
2918 self._wlockref = weakref.ref(l)
2921 self._wlockref = weakref.ref(l)
2919 return l
2922 return l
2920
2923
2921 def _currentlock(self, lockref):
2924 def _currentlock(self, lockref):
2922 """Returns the lock if it's held, or None if it's not."""
2925 """Returns the lock if it's held, or None if it's not."""
2923 if lockref is None:
2926 if lockref is None:
2924 return None
2927 return None
2925 l = lockref()
2928 l = lockref()
2926 if l is None or not l.held:
2929 if l is None or not l.held:
2927 return None
2930 return None
2928 return l
2931 return l
2929
2932
2930 def currentwlock(self):
2933 def currentwlock(self):
2931 """Returns the wlock if it's held, or None if it's not."""
2934 """Returns the wlock if it's held, or None if it's not."""
2932 return self._currentlock(self._wlockref)
2935 return self._currentlock(self._wlockref)
2933
2936
2934 def checkcommitpatterns(self, wctx, match, status, fail):
2937 def checkcommitpatterns(self, wctx, match, status, fail):
2935 """check for commit arguments that aren't committable"""
2938 """check for commit arguments that aren't committable"""
2936 if match.isexact() or match.prefix():
2939 if match.isexact() or match.prefix():
2937 matched = set(status.modified + status.added + status.removed)
2940 matched = set(status.modified + status.added + status.removed)
2938
2941
2939 for f in match.files():
2942 for f in match.files():
2940 f = self.dirstate.normalize(f)
2943 f = self.dirstate.normalize(f)
2941 if f == b'.' or f in matched or f in wctx.substate:
2944 if f == b'.' or f in matched or f in wctx.substate:
2942 continue
2945 continue
2943 if f in status.deleted:
2946 if f in status.deleted:
2944 fail(f, _(b'file not found!'))
2947 fail(f, _(b'file not found!'))
2945 # Is it a directory that exists or used to exist?
2948 # Is it a directory that exists or used to exist?
2946 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2949 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2947 d = f + b'/'
2950 d = f + b'/'
2948 for mf in matched:
2951 for mf in matched:
2949 if mf.startswith(d):
2952 if mf.startswith(d):
2950 break
2953 break
2951 else:
2954 else:
2952 fail(f, _(b"no match under directory!"))
2955 fail(f, _(b"no match under directory!"))
2953 elif f not in self.dirstate:
2956 elif f not in self.dirstate:
2954 fail(f, _(b"file not tracked!"))
2957 fail(f, _(b"file not tracked!"))
2955
2958
2956 @unfilteredmethod
2959 @unfilteredmethod
2957 def commit(
2960 def commit(
2958 self,
2961 self,
2959 text=b"",
2962 text=b"",
2960 user=None,
2963 user=None,
2961 date=None,
2964 date=None,
2962 match=None,
2965 match=None,
2963 force=False,
2966 force=False,
2964 editor=None,
2967 editor=None,
2965 extra=None,
2968 extra=None,
2966 ):
2969 ):
2967 """Add a new revision to current repository.
2970 """Add a new revision to current repository.
2968
2971
2969 Revision information is gathered from the working directory,
2972 Revision information is gathered from the working directory,
2970 match can be used to filter the committed files. If editor is
2973 match can be used to filter the committed files. If editor is
2971 supplied, it is called to get a commit message.
2974 supplied, it is called to get a commit message.
2972 """
2975 """
2973 if extra is None:
2976 if extra is None:
2974 extra = {}
2977 extra = {}
2975
2978
2976 def fail(f, msg):
2979 def fail(f, msg):
2977 raise error.InputError(b'%s: %s' % (f, msg))
2980 raise error.InputError(b'%s: %s' % (f, msg))
2978
2981
2979 if not match:
2982 if not match:
2980 match = matchmod.always()
2983 match = matchmod.always()
2981
2984
2982 if not force:
2985 if not force:
2983 match.bad = fail
2986 match.bad = fail
2984
2987
2985 # lock() for recent changelog (see issue4368)
2988 # lock() for recent changelog (see issue4368)
2986 with self.wlock(), self.lock():
2989 with self.wlock(), self.lock():
2987 wctx = self[None]
2990 wctx = self[None]
2988 merge = len(wctx.parents()) > 1
2991 merge = len(wctx.parents()) > 1
2989
2992
2990 if not force and merge and not match.always():
2993 if not force and merge and not match.always():
2991 raise error.Abort(
2994 raise error.Abort(
2992 _(
2995 _(
2993 b'cannot partially commit a merge '
2996 b'cannot partially commit a merge '
2994 b'(do not specify files or patterns)'
2997 b'(do not specify files or patterns)'
2995 )
2998 )
2996 )
2999 )
2997
3000
2998 status = self.status(match=match, clean=force)
3001 status = self.status(match=match, clean=force)
2999 if force:
3002 if force:
3000 status.modified.extend(
3003 status.modified.extend(
3001 status.clean
3004 status.clean
3002 ) # mq may commit clean files
3005 ) # mq may commit clean files
3003
3006
3004 # check subrepos
3007 # check subrepos
3005 subs, commitsubs, newstate = subrepoutil.precommit(
3008 subs, commitsubs, newstate = subrepoutil.precommit(
3006 self.ui, wctx, status, match, force=force
3009 self.ui, wctx, status, match, force=force
3007 )
3010 )
3008
3011
3009 # make sure all explicit patterns are matched
3012 # make sure all explicit patterns are matched
3010 if not force:
3013 if not force:
3011 self.checkcommitpatterns(wctx, match, status, fail)
3014 self.checkcommitpatterns(wctx, match, status, fail)
3012
3015
3013 cctx = context.workingcommitctx(
3016 cctx = context.workingcommitctx(
3014 self, status, text, user, date, extra
3017 self, status, text, user, date, extra
3015 )
3018 )
3016
3019
3017 ms = mergestatemod.mergestate.read(self)
3020 ms = mergestatemod.mergestate.read(self)
3018 mergeutil.checkunresolved(ms)
3021 mergeutil.checkunresolved(ms)
3019
3022
3020 # internal config: ui.allowemptycommit
3023 # internal config: ui.allowemptycommit
3021 if cctx.isempty() and not self.ui.configbool(
3024 if cctx.isempty() and not self.ui.configbool(
3022 b'ui', b'allowemptycommit'
3025 b'ui', b'allowemptycommit'
3023 ):
3026 ):
3024 self.ui.debug(b'nothing to commit, clearing merge state\n')
3027 self.ui.debug(b'nothing to commit, clearing merge state\n')
3025 ms.reset()
3028 ms.reset()
3026 return None
3029 return None
3027
3030
3028 if merge and cctx.deleted():
3031 if merge and cctx.deleted():
3029 raise error.Abort(_(b"cannot commit merge with missing files"))
3032 raise error.Abort(_(b"cannot commit merge with missing files"))
3030
3033
3031 if editor:
3034 if editor:
3032 cctx._text = editor(self, cctx, subs)
3035 cctx._text = editor(self, cctx, subs)
3033 edited = text != cctx._text
3036 edited = text != cctx._text
3034
3037
3035 # Save commit message in case this transaction gets rolled back
3038 # Save commit message in case this transaction gets rolled back
3036 # (e.g. by a pretxncommit hook). Leave the content alone on
3039 # (e.g. by a pretxncommit hook). Leave the content alone on
3037 # the assumption that the user will use the same editor again.
3040 # the assumption that the user will use the same editor again.
3038 msgfn = self.savecommitmessage(cctx._text)
3041 msgfn = self.savecommitmessage(cctx._text)
3039
3042
3040 # commit subs and write new state
3043 # commit subs and write new state
3041 if subs:
3044 if subs:
3042 uipathfn = scmutil.getuipathfn(self)
3045 uipathfn = scmutil.getuipathfn(self)
3043 for s in sorted(commitsubs):
3046 for s in sorted(commitsubs):
3044 sub = wctx.sub(s)
3047 sub = wctx.sub(s)
3045 self.ui.status(
3048 self.ui.status(
3046 _(b'committing subrepository %s\n')
3049 _(b'committing subrepository %s\n')
3047 % uipathfn(subrepoutil.subrelpath(sub))
3050 % uipathfn(subrepoutil.subrelpath(sub))
3048 )
3051 )
3049 sr = sub.commit(cctx._text, user, date)
3052 sr = sub.commit(cctx._text, user, date)
3050 newstate[s] = (newstate[s][0], sr)
3053 newstate[s] = (newstate[s][0], sr)
3051 subrepoutil.writestate(self, newstate)
3054 subrepoutil.writestate(self, newstate)
3052
3055
3053 p1, p2 = self.dirstate.parents()
3056 p1, p2 = self.dirstate.parents()
3054 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3057 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3055 try:
3058 try:
3056 self.hook(
3059 self.hook(
3057 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3060 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3058 )
3061 )
3059 with self.transaction(b'commit'):
3062 with self.transaction(b'commit'):
3060 ret = self.commitctx(cctx, True)
3063 ret = self.commitctx(cctx, True)
3061 # update bookmarks, dirstate and mergestate
3064 # update bookmarks, dirstate and mergestate
3062 bookmarks.update(self, [p1, p2], ret)
3065 bookmarks.update(self, [p1, p2], ret)
3063 cctx.markcommitted(ret)
3066 cctx.markcommitted(ret)
3064 ms.reset()
3067 ms.reset()
3065 except: # re-raises
3068 except: # re-raises
3066 if edited:
3069 if edited:
3067 self.ui.write(
3070 self.ui.write(
3068 _(b'note: commit message saved in %s\n') % msgfn
3071 _(b'note: commit message saved in %s\n') % msgfn
3069 )
3072 )
3070 self.ui.write(
3073 self.ui.write(
3071 _(
3074 _(
3072 b"note: use 'hg commit --logfile "
3075 b"note: use 'hg commit --logfile "
3073 b".hg/last-message.txt --edit' to reuse it\n"
3076 b".hg/last-message.txt --edit' to reuse it\n"
3074 )
3077 )
3075 )
3078 )
3076 raise
3079 raise
3077
3080
3078 def commithook(unused_success):
3081 def commithook(unused_success):
3079 # hack for command that use a temporary commit (eg: histedit)
3082 # hack for command that use a temporary commit (eg: histedit)
3080 # temporary commit got stripped before hook release
3083 # temporary commit got stripped before hook release
3081 if self.changelog.hasnode(ret):
3084 if self.changelog.hasnode(ret):
3082 self.hook(
3085 self.hook(
3083 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3086 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3084 )
3087 )
3085
3088
3086 self._afterlock(commithook)
3089 self._afterlock(commithook)
3087 return ret
3090 return ret
3088
3091
3089 @unfilteredmethod
3092 @unfilteredmethod
3090 def commitctx(self, ctx, error=False, origctx=None):
3093 def commitctx(self, ctx, error=False, origctx=None):
3091 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3094 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3092
3095
3093 @unfilteredmethod
3096 @unfilteredmethod
3094 def destroying(self):
3097 def destroying(self):
3095 """Inform the repository that nodes are about to be destroyed.
3098 """Inform the repository that nodes are about to be destroyed.
3096 Intended for use by strip and rollback, so there's a common
3099 Intended for use by strip and rollback, so there's a common
3097 place for anything that has to be done before destroying history.
3100 place for anything that has to be done before destroying history.
3098
3101
3099 This is mostly useful for saving state that is in memory and waiting
3102 This is mostly useful for saving state that is in memory and waiting
3100 to be flushed when the current lock is released. Because a call to
3103 to be flushed when the current lock is released. Because a call to
3101 destroyed is imminent, the repo will be invalidated causing those
3104 destroyed is imminent, the repo will be invalidated causing those
3102 changes to stay in memory (waiting for the next unlock), or vanish
3105 changes to stay in memory (waiting for the next unlock), or vanish
3103 completely.
3106 completely.
3104 """
3107 """
3105 # When using the same lock to commit and strip, the phasecache is left
3108 # When using the same lock to commit and strip, the phasecache is left
3106 # dirty after committing. Then when we strip, the repo is invalidated,
3109 # dirty after committing. Then when we strip, the repo is invalidated,
3107 # causing those changes to disappear.
3110 # causing those changes to disappear.
3108 if '_phasecache' in vars(self):
3111 if '_phasecache' in vars(self):
3109 self._phasecache.write()
3112 self._phasecache.write()
3110
3113
3111 @unfilteredmethod
3114 @unfilteredmethod
3112 def destroyed(self):
3115 def destroyed(self):
3113 """Inform the repository that nodes have been destroyed.
3116 """Inform the repository that nodes have been destroyed.
3114 Intended for use by strip and rollback, so there's a common
3117 Intended for use by strip and rollback, so there's a common
3115 place for anything that has to be done after destroying history.
3118 place for anything that has to be done after destroying history.
3116 """
3119 """
3117 # When one tries to:
3120 # When one tries to:
3118 # 1) destroy nodes thus calling this method (e.g. strip)
3121 # 1) destroy nodes thus calling this method (e.g. strip)
3119 # 2) use phasecache somewhere (e.g. commit)
3122 # 2) use phasecache somewhere (e.g. commit)
3120 #
3123 #
3121 # then 2) will fail because the phasecache contains nodes that were
3124 # then 2) will fail because the phasecache contains nodes that were
3122 # removed. We can either remove phasecache from the filecache,
3125 # removed. We can either remove phasecache from the filecache,
3123 # causing it to reload next time it is accessed, or simply filter
3126 # causing it to reload next time it is accessed, or simply filter
3124 # the removed nodes now and write the updated cache.
3127 # the removed nodes now and write the updated cache.
3125 self._phasecache.filterunknown(self)
3128 self._phasecache.filterunknown(self)
3126 self._phasecache.write()
3129 self._phasecache.write()
3127
3130
3128 # refresh all repository caches
3131 # refresh all repository caches
3129 self.updatecaches()
3132 self.updatecaches()
3130
3133
3131 # Ensure the persistent tag cache is updated. Doing it now
3134 # Ensure the persistent tag cache is updated. Doing it now
3132 # means that the tag cache only has to worry about destroyed
3135 # means that the tag cache only has to worry about destroyed
3133 # heads immediately after a strip/rollback. That in turn
3136 # heads immediately after a strip/rollback. That in turn
3134 # guarantees that "cachetip == currenttip" (comparing both rev
3137 # guarantees that "cachetip == currenttip" (comparing both rev
3135 # and node) always means no nodes have been added or destroyed.
3138 # and node) always means no nodes have been added or destroyed.
3136
3139
3137 # XXX this is suboptimal when qrefresh'ing: we strip the current
3140 # XXX this is suboptimal when qrefresh'ing: we strip the current
3138 # head, refresh the tag cache, then immediately add a new head.
3141 # head, refresh the tag cache, then immediately add a new head.
3139 # But I think doing it this way is necessary for the "instant
3142 # But I think doing it this way is necessary for the "instant
3140 # tag cache retrieval" case to work.
3143 # tag cache retrieval" case to work.
3141 self.invalidate()
3144 self.invalidate()
3142
3145
3143 def status(
3146 def status(
3144 self,
3147 self,
3145 node1=b'.',
3148 node1=b'.',
3146 node2=None,
3149 node2=None,
3147 match=None,
3150 match=None,
3148 ignored=False,
3151 ignored=False,
3149 clean=False,
3152 clean=False,
3150 unknown=False,
3153 unknown=False,
3151 listsubrepos=False,
3154 listsubrepos=False,
3152 ):
3155 ):
3153 '''a convenience method that calls node1.status(node2)'''
3156 '''a convenience method that calls node1.status(node2)'''
3154 return self[node1].status(
3157 return self[node1].status(
3155 node2, match, ignored, clean, unknown, listsubrepos
3158 node2, match, ignored, clean, unknown, listsubrepos
3156 )
3159 )
3157
3160
3158 def addpostdsstatus(self, ps):
3161 def addpostdsstatus(self, ps):
3159 """Add a callback to run within the wlock, at the point at which status
3162 """Add a callback to run within the wlock, at the point at which status
3160 fixups happen.
3163 fixups happen.
3161
3164
3162 On status completion, callback(wctx, status) will be called with the
3165 On status completion, callback(wctx, status) will be called with the
3163 wlock held, unless the dirstate has changed from underneath or the wlock
3166 wlock held, unless the dirstate has changed from underneath or the wlock
3164 couldn't be grabbed.
3167 couldn't be grabbed.
3165
3168
3166 Callbacks should not capture and use a cached copy of the dirstate --
3169 Callbacks should not capture and use a cached copy of the dirstate --
3167 it might change in the meanwhile. Instead, they should access the
3170 it might change in the meanwhile. Instead, they should access the
3168 dirstate via wctx.repo().dirstate.
3171 dirstate via wctx.repo().dirstate.
3169
3172
3170 This list is emptied out after each status run -- extensions should
3173 This list is emptied out after each status run -- extensions should
3171 make sure it adds to this list each time dirstate.status is called.
3174 make sure it adds to this list each time dirstate.status is called.
3172 Extensions should also make sure they don't call this for statuses
3175 Extensions should also make sure they don't call this for statuses
3173 that don't involve the dirstate.
3176 that don't involve the dirstate.
3174 """
3177 """
3175
3178
3176 # The list is located here for uniqueness reasons -- it is actually
3179 # The list is located here for uniqueness reasons -- it is actually
3177 # managed by the workingctx, but that isn't unique per-repo.
3180 # managed by the workingctx, but that isn't unique per-repo.
3178 self._postdsstatus.append(ps)
3181 self._postdsstatus.append(ps)
3179
3182
3180 def postdsstatus(self):
3183 def postdsstatus(self):
3181 """Used by workingctx to get the list of post-dirstate-status hooks."""
3184 """Used by workingctx to get the list of post-dirstate-status hooks."""
3182 return self._postdsstatus
3185 return self._postdsstatus
3183
3186
3184 def clearpostdsstatus(self):
3187 def clearpostdsstatus(self):
3185 """Used by workingctx to clear post-dirstate-status hooks."""
3188 """Used by workingctx to clear post-dirstate-status hooks."""
3186 del self._postdsstatus[:]
3189 del self._postdsstatus[:]
3187
3190
3188 def heads(self, start=None):
3191 def heads(self, start=None):
3189 if start is None:
3192 if start is None:
3190 cl = self.changelog
3193 cl = self.changelog
3191 headrevs = reversed(cl.headrevs())
3194 headrevs = reversed(cl.headrevs())
3192 return [cl.node(rev) for rev in headrevs]
3195 return [cl.node(rev) for rev in headrevs]
3193
3196
3194 heads = self.changelog.heads(start)
3197 heads = self.changelog.heads(start)
3195 # sort the output in rev descending order
3198 # sort the output in rev descending order
3196 return sorted(heads, key=self.changelog.rev, reverse=True)
3199 return sorted(heads, key=self.changelog.rev, reverse=True)
3197
3200
3198 def branchheads(self, branch=None, start=None, closed=False):
3201 def branchheads(self, branch=None, start=None, closed=False):
3199 """return a (possibly filtered) list of heads for the given branch
3202 """return a (possibly filtered) list of heads for the given branch
3200
3203
3201 Heads are returned in topological order, from newest to oldest.
3204 Heads are returned in topological order, from newest to oldest.
3202 If branch is None, use the dirstate branch.
3205 If branch is None, use the dirstate branch.
3203 If start is not None, return only heads reachable from start.
3206 If start is not None, return only heads reachable from start.
3204 If closed is True, return heads that are marked as closed as well.
3207 If closed is True, return heads that are marked as closed as well.
3205 """
3208 """
3206 if branch is None:
3209 if branch is None:
3207 branch = self[None].branch()
3210 branch = self[None].branch()
3208 branches = self.branchmap()
3211 branches = self.branchmap()
3209 if not branches.hasbranch(branch):
3212 if not branches.hasbranch(branch):
3210 return []
3213 return []
3211 # the cache returns heads ordered lowest to highest
3214 # the cache returns heads ordered lowest to highest
3212 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3215 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3213 if start is not None:
3216 if start is not None:
3214 # filter out the heads that cannot be reached from startrev
3217 # filter out the heads that cannot be reached from startrev
3215 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3218 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3216 bheads = [h for h in bheads if h in fbheads]
3219 bheads = [h for h in bheads if h in fbheads]
3217 return bheads
3220 return bheads
3218
3221
3219 def branches(self, nodes):
3222 def branches(self, nodes):
3220 if not nodes:
3223 if not nodes:
3221 nodes = [self.changelog.tip()]
3224 nodes = [self.changelog.tip()]
3222 b = []
3225 b = []
3223 for n in nodes:
3226 for n in nodes:
3224 t = n
3227 t = n
3225 while True:
3228 while True:
3226 p = self.changelog.parents(n)
3229 p = self.changelog.parents(n)
3227 if p[1] != nullid or p[0] == nullid:
3230 if p[1] != nullid or p[0] == nullid:
3228 b.append((t, n, p[0], p[1]))
3231 b.append((t, n, p[0], p[1]))
3229 break
3232 break
3230 n = p[0]
3233 n = p[0]
3231 return b
3234 return b
3232
3235
3233 def between(self, pairs):
3236 def between(self, pairs):
3234 r = []
3237 r = []
3235
3238
3236 for top, bottom in pairs:
3239 for top, bottom in pairs:
3237 n, l, i = top, [], 0
3240 n, l, i = top, [], 0
3238 f = 1
3241 f = 1
3239
3242
3240 while n != bottom and n != nullid:
3243 while n != bottom and n != nullid:
3241 p = self.changelog.parents(n)[0]
3244 p = self.changelog.parents(n)[0]
3242 if i == f:
3245 if i == f:
3243 l.append(n)
3246 l.append(n)
3244 f = f * 2
3247 f = f * 2
3245 n = p
3248 n = p
3246 i += 1
3249 i += 1
3247
3250
3248 r.append(l)
3251 r.append(l)
3249
3252
3250 return r
3253 return r
3251
3254
3252 def checkpush(self, pushop):
3255 def checkpush(self, pushop):
3253 """Extensions can override this function if additional checks have
3256 """Extensions can override this function if additional checks have
3254 to be performed before pushing, or call it if they override push
3257 to be performed before pushing, or call it if they override push
3255 command.
3258 command.
3256 """
3259 """
3257
3260
3258 @unfilteredpropertycache
3261 @unfilteredpropertycache
3259 def prepushoutgoinghooks(self):
3262 def prepushoutgoinghooks(self):
3260 """Return util.hooks consists of a pushop with repo, remote, outgoing
3263 """Return util.hooks consists of a pushop with repo, remote, outgoing
3261 methods, which are called before pushing changesets.
3264 methods, which are called before pushing changesets.
3262 """
3265 """
3263 return util.hooks()
3266 return util.hooks()
3264
3267
3265 def pushkey(self, namespace, key, old, new):
3268 def pushkey(self, namespace, key, old, new):
3266 try:
3269 try:
3267 tr = self.currenttransaction()
3270 tr = self.currenttransaction()
3268 hookargs = {}
3271 hookargs = {}
3269 if tr is not None:
3272 if tr is not None:
3270 hookargs.update(tr.hookargs)
3273 hookargs.update(tr.hookargs)
3271 hookargs = pycompat.strkwargs(hookargs)
3274 hookargs = pycompat.strkwargs(hookargs)
3272 hookargs['namespace'] = namespace
3275 hookargs['namespace'] = namespace
3273 hookargs['key'] = key
3276 hookargs['key'] = key
3274 hookargs['old'] = old
3277 hookargs['old'] = old
3275 hookargs['new'] = new
3278 hookargs['new'] = new
3276 self.hook(b'prepushkey', throw=True, **hookargs)
3279 self.hook(b'prepushkey', throw=True, **hookargs)
3277 except error.HookAbort as exc:
3280 except error.HookAbort as exc:
3278 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3281 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3279 if exc.hint:
3282 if exc.hint:
3280 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3283 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3281 return False
3284 return False
3282 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3285 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3283 ret = pushkey.push(self, namespace, key, old, new)
3286 ret = pushkey.push(self, namespace, key, old, new)
3284
3287
3285 def runhook(unused_success):
3288 def runhook(unused_success):
3286 self.hook(
3289 self.hook(
3287 b'pushkey',
3290 b'pushkey',
3288 namespace=namespace,
3291 namespace=namespace,
3289 key=key,
3292 key=key,
3290 old=old,
3293 old=old,
3291 new=new,
3294 new=new,
3292 ret=ret,
3295 ret=ret,
3293 )
3296 )
3294
3297
3295 self._afterlock(runhook)
3298 self._afterlock(runhook)
3296 return ret
3299 return ret
3297
3300
3298 def listkeys(self, namespace):
3301 def listkeys(self, namespace):
3299 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3302 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3300 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3303 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3301 values = pushkey.list(self, namespace)
3304 values = pushkey.list(self, namespace)
3302 self.hook(b'listkeys', namespace=namespace, values=values)
3305 self.hook(b'listkeys', namespace=namespace, values=values)
3303 return values
3306 return values
3304
3307
3305 def debugwireargs(self, one, two, three=None, four=None, five=None):
3308 def debugwireargs(self, one, two, three=None, four=None, five=None):
3306 '''used to test argument passing over the wire'''
3309 '''used to test argument passing over the wire'''
3307 return b"%s %s %s %s %s" % (
3310 return b"%s %s %s %s %s" % (
3308 one,
3311 one,
3309 two,
3312 two,
3310 pycompat.bytestr(three),
3313 pycompat.bytestr(three),
3311 pycompat.bytestr(four),
3314 pycompat.bytestr(four),
3312 pycompat.bytestr(five),
3315 pycompat.bytestr(five),
3313 )
3316 )
3314
3317
3315 def savecommitmessage(self, text):
3318 def savecommitmessage(self, text):
3316 fp = self.vfs(b'last-message.txt', b'wb')
3319 fp = self.vfs(b'last-message.txt', b'wb')
3317 try:
3320 try:
3318 fp.write(text)
3321 fp.write(text)
3319 finally:
3322 finally:
3320 fp.close()
3323 fp.close()
3321 return self.pathto(fp.name[len(self.root) + 1 :])
3324 return self.pathto(fp.name[len(self.root) + 1 :])
3322
3325
3323
3326
3324 # used to avoid circular references so destructors work
3327 # used to avoid circular references so destructors work
3325 def aftertrans(files):
3328 def aftertrans(files):
3326 renamefiles = [tuple(t) for t in files]
3329 renamefiles = [tuple(t) for t in files]
3327
3330
3328 def a():
3331 def a():
3329 for vfs, src, dest in renamefiles:
3332 for vfs, src, dest in renamefiles:
3330 # if src and dest refer to a same file, vfs.rename is a no-op,
3333 # if src and dest refer to a same file, vfs.rename is a no-op,
3331 # leaving both src and dest on disk. delete dest to make sure
3334 # leaving both src and dest on disk. delete dest to make sure
3332 # the rename couldn't be such a no-op.
3335 # the rename couldn't be such a no-op.
3333 vfs.tryunlink(dest)
3336 vfs.tryunlink(dest)
3334 try:
3337 try:
3335 vfs.rename(src, dest)
3338 vfs.rename(src, dest)
3336 except OSError: # journal file does not yet exist
3339 except OSError: # journal file does not yet exist
3337 pass
3340 pass
3338
3341
3339 return a
3342 return a
3340
3343
3341
3344
3342 def undoname(fn):
3345 def undoname(fn):
3343 base, name = os.path.split(fn)
3346 base, name = os.path.split(fn)
3344 assert name.startswith(b'journal')
3347 assert name.startswith(b'journal')
3345 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3348 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3346
3349
3347
3350
3348 def instance(ui, path, create, intents=None, createopts=None):
3351 def instance(ui, path, create, intents=None, createopts=None):
3349 localpath = util.urllocalpath(path)
3352 localpath = util.urllocalpath(path)
3350 if create:
3353 if create:
3351 createrepository(ui, localpath, createopts=createopts)
3354 createrepository(ui, localpath, createopts=createopts)
3352
3355
3353 return makelocalrepository(ui, localpath, intents=intents)
3356 return makelocalrepository(ui, localpath, intents=intents)
3354
3357
3355
3358
3356 def islocal(path):
3359 def islocal(path):
3357 return True
3360 return True
3358
3361
3359
3362
3360 def defaultcreateopts(ui, createopts=None):
3363 def defaultcreateopts(ui, createopts=None):
3361 """Populate the default creation options for a repository.
3364 """Populate the default creation options for a repository.
3362
3365
3363 A dictionary of explicitly requested creation options can be passed
3366 A dictionary of explicitly requested creation options can be passed
3364 in. Missing keys will be populated.
3367 in. Missing keys will be populated.
3365 """
3368 """
3366 createopts = dict(createopts or {})
3369 createopts = dict(createopts or {})
3367
3370
3368 if b'backend' not in createopts:
3371 if b'backend' not in createopts:
3369 # experimental config: storage.new-repo-backend
3372 # experimental config: storage.new-repo-backend
3370 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3373 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3371
3374
3372 return createopts
3375 return createopts
3373
3376
3374
3377
3375 def newreporequirements(ui, createopts):
3378 def newreporequirements(ui, createopts):
3376 """Determine the set of requirements for a new local repository.
3379 """Determine the set of requirements for a new local repository.
3377
3380
3378 Extensions can wrap this function to specify custom requirements for
3381 Extensions can wrap this function to specify custom requirements for
3379 new repositories.
3382 new repositories.
3380 """
3383 """
3381 # If the repo is being created from a shared repository, we copy
3384 # If the repo is being created from a shared repository, we copy
3382 # its requirements.
3385 # its requirements.
3383 if b'sharedrepo' in createopts:
3386 if b'sharedrepo' in createopts:
3384 requirements = set(createopts[b'sharedrepo'].requirements)
3387 requirements = set(createopts[b'sharedrepo'].requirements)
3385 if createopts.get(b'sharedrelative'):
3388 if createopts.get(b'sharedrelative'):
3386 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3389 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3387 else:
3390 else:
3388 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3391 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3389
3392
3390 return requirements
3393 return requirements
3391
3394
3392 if b'backend' not in createopts:
3395 if b'backend' not in createopts:
3393 raise error.ProgrammingError(
3396 raise error.ProgrammingError(
3394 b'backend key not present in createopts; '
3397 b'backend key not present in createopts; '
3395 b'was defaultcreateopts() called?'
3398 b'was defaultcreateopts() called?'
3396 )
3399 )
3397
3400
3398 if createopts[b'backend'] != b'revlogv1':
3401 if createopts[b'backend'] != b'revlogv1':
3399 raise error.Abort(
3402 raise error.Abort(
3400 _(
3403 _(
3401 b'unable to determine repository requirements for '
3404 b'unable to determine repository requirements for '
3402 b'storage backend: %s'
3405 b'storage backend: %s'
3403 )
3406 )
3404 % createopts[b'backend']
3407 % createopts[b'backend']
3405 )
3408 )
3406
3409
3407 requirements = {b'revlogv1'}
3410 requirements = {b'revlogv1'}
3408 if ui.configbool(b'format', b'usestore'):
3411 if ui.configbool(b'format', b'usestore'):
3409 requirements.add(b'store')
3412 requirements.add(b'store')
3410 if ui.configbool(b'format', b'usefncache'):
3413 if ui.configbool(b'format', b'usefncache'):
3411 requirements.add(b'fncache')
3414 requirements.add(b'fncache')
3412 if ui.configbool(b'format', b'dotencode'):
3415 if ui.configbool(b'format', b'dotencode'):
3413 requirements.add(b'dotencode')
3416 requirements.add(b'dotencode')
3414
3417
3415 compengines = ui.configlist(b'format', b'revlog-compression')
3418 compengines = ui.configlist(b'format', b'revlog-compression')
3416 for compengine in compengines:
3419 for compengine in compengines:
3417 if compengine in util.compengines:
3420 if compengine in util.compengines:
3418 break
3421 break
3419 else:
3422 else:
3420 raise error.Abort(
3423 raise error.Abort(
3421 _(
3424 _(
3422 b'compression engines %s defined by '
3425 b'compression engines %s defined by '
3423 b'format.revlog-compression not available'
3426 b'format.revlog-compression not available'
3424 )
3427 )
3425 % b', '.join(b'"%s"' % e for e in compengines),
3428 % b', '.join(b'"%s"' % e for e in compengines),
3426 hint=_(
3429 hint=_(
3427 b'run "hg debuginstall" to list available '
3430 b'run "hg debuginstall" to list available '
3428 b'compression engines'
3431 b'compression engines'
3429 ),
3432 ),
3430 )
3433 )
3431
3434
3432 # zlib is the historical default and doesn't need an explicit requirement.
3435 # zlib is the historical default and doesn't need an explicit requirement.
3433 if compengine == b'zstd':
3436 if compengine == b'zstd':
3434 requirements.add(b'revlog-compression-zstd')
3437 requirements.add(b'revlog-compression-zstd')
3435 elif compengine != b'zlib':
3438 elif compengine != b'zlib':
3436 requirements.add(b'exp-compression-%s' % compengine)
3439 requirements.add(b'exp-compression-%s' % compengine)
3437
3440
3438 if scmutil.gdinitconfig(ui):
3441 if scmutil.gdinitconfig(ui):
3439 requirements.add(b'generaldelta')
3442 requirements.add(b'generaldelta')
3440 if ui.configbool(b'format', b'sparse-revlog'):
3443 if ui.configbool(b'format', b'sparse-revlog'):
3441 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3444 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3442
3445
3443 # experimental config: format.exp-use-side-data
3446 # experimental config: format.exp-use-side-data
3444 if ui.configbool(b'format', b'exp-use-side-data'):
3447 if ui.configbool(b'format', b'exp-use-side-data'):
3445 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3448 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3446 # experimental config: format.exp-use-copies-side-data-changeset
3449 # experimental config: format.exp-use-copies-side-data-changeset
3447 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3450 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3448 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3451 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3449 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3452 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3450 if ui.configbool(b'experimental', b'treemanifest'):
3453 if ui.configbool(b'experimental', b'treemanifest'):
3451 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3454 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3452
3455
3453 revlogv2 = ui.config(b'experimental', b'revlogv2')
3456 revlogv2 = ui.config(b'experimental', b'revlogv2')
3454 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3457 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3455 requirements.remove(b'revlogv1')
3458 requirements.remove(b'revlogv1')
3456 # generaldelta is implied by revlogv2.
3459 # generaldelta is implied by revlogv2.
3457 requirements.discard(b'generaldelta')
3460 requirements.discard(b'generaldelta')
3458 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3461 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3459 # experimental config: format.internal-phase
3462 # experimental config: format.internal-phase
3460 if ui.configbool(b'format', b'internal-phase'):
3463 if ui.configbool(b'format', b'internal-phase'):
3461 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3464 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3462
3465
3463 if createopts.get(b'narrowfiles'):
3466 if createopts.get(b'narrowfiles'):
3464 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3467 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3465
3468
3466 if createopts.get(b'lfs'):
3469 if createopts.get(b'lfs'):
3467 requirements.add(b'lfs')
3470 requirements.add(b'lfs')
3468
3471
3469 if ui.configbool(b'format', b'bookmarks-in-store'):
3472 if ui.configbool(b'format', b'bookmarks-in-store'):
3470 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3473 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3471
3474
3472 if ui.configbool(b'format', b'use-persistent-nodemap'):
3475 if ui.configbool(b'format', b'use-persistent-nodemap'):
3473 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3476 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3474
3477
3475 # if share-safe is enabled, let's create the new repository with the new
3478 # if share-safe is enabled, let's create the new repository with the new
3476 # requirement
3479 # requirement
3477 if ui.configbool(b'format', b'use-share-safe'):
3480 if ui.configbool(b'format', b'use-share-safe'):
3478 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3481 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3479
3482
3480 return requirements
3483 return requirements
3481
3484
3482
3485
3483 def checkrequirementscompat(ui, requirements):
3486 def checkrequirementscompat(ui, requirements):
3484 """Checks compatibility of repository requirements enabled and disabled.
3487 """Checks compatibility of repository requirements enabled and disabled.
3485
3488
3486 Returns a set of requirements which needs to be dropped because dependend
3489 Returns a set of requirements which needs to be dropped because dependend
3487 requirements are not enabled. Also warns users about it"""
3490 requirements are not enabled. Also warns users about it"""
3488
3491
3489 dropped = set()
3492 dropped = set()
3490
3493
3491 if b'store' not in requirements:
3494 if b'store' not in requirements:
3492 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3495 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3493 ui.warn(
3496 ui.warn(
3494 _(
3497 _(
3495 b'ignoring enabled \'format.bookmarks-in-store\' config '
3498 b'ignoring enabled \'format.bookmarks-in-store\' config '
3496 b'beacuse it is incompatible with disabled '
3499 b'beacuse it is incompatible with disabled '
3497 b'\'format.usestore\' config\n'
3500 b'\'format.usestore\' config\n'
3498 )
3501 )
3499 )
3502 )
3500 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3503 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3501
3504
3502 if (
3505 if (
3503 requirementsmod.SHARED_REQUIREMENT in requirements
3506 requirementsmod.SHARED_REQUIREMENT in requirements
3504 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3507 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3505 ):
3508 ):
3506 raise error.Abort(
3509 raise error.Abort(
3507 _(
3510 _(
3508 b"cannot create shared repository as source was created"
3511 b"cannot create shared repository as source was created"
3509 b" with 'format.usestore' config disabled"
3512 b" with 'format.usestore' config disabled"
3510 )
3513 )
3511 )
3514 )
3512
3515
3513 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3516 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3514 ui.warn(
3517 ui.warn(
3515 _(
3518 _(
3516 b"ignoring enabled 'format.use-share-safe' config because "
3519 b"ignoring enabled 'format.use-share-safe' config because "
3517 b"it is incompatible with disabled 'format.usestore'"
3520 b"it is incompatible with disabled 'format.usestore'"
3518 b" config\n"
3521 b" config\n"
3519 )
3522 )
3520 )
3523 )
3521 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3524 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3522
3525
3523 return dropped
3526 return dropped
3524
3527
3525
3528
3526 def filterknowncreateopts(ui, createopts):
3529 def filterknowncreateopts(ui, createopts):
3527 """Filters a dict of repo creation options against options that are known.
3530 """Filters a dict of repo creation options against options that are known.
3528
3531
3529 Receives a dict of repo creation options and returns a dict of those
3532 Receives a dict of repo creation options and returns a dict of those
3530 options that we don't know how to handle.
3533 options that we don't know how to handle.
3531
3534
3532 This function is called as part of repository creation. If the
3535 This function is called as part of repository creation. If the
3533 returned dict contains any items, repository creation will not
3536 returned dict contains any items, repository creation will not
3534 be allowed, as it means there was a request to create a repository
3537 be allowed, as it means there was a request to create a repository
3535 with options not recognized by loaded code.
3538 with options not recognized by loaded code.
3536
3539
3537 Extensions can wrap this function to filter out creation options
3540 Extensions can wrap this function to filter out creation options
3538 they know how to handle.
3541 they know how to handle.
3539 """
3542 """
3540 known = {
3543 known = {
3541 b'backend',
3544 b'backend',
3542 b'lfs',
3545 b'lfs',
3543 b'narrowfiles',
3546 b'narrowfiles',
3544 b'sharedrepo',
3547 b'sharedrepo',
3545 b'sharedrelative',
3548 b'sharedrelative',
3546 b'shareditems',
3549 b'shareditems',
3547 b'shallowfilestore',
3550 b'shallowfilestore',
3548 }
3551 }
3549
3552
3550 return {k: v for k, v in createopts.items() if k not in known}
3553 return {k: v for k, v in createopts.items() if k not in known}
3551
3554
3552
3555
3553 def createrepository(ui, path, createopts=None):
3556 def createrepository(ui, path, createopts=None):
3554 """Create a new repository in a vfs.
3557 """Create a new repository in a vfs.
3555
3558
3556 ``path`` path to the new repo's working directory.
3559 ``path`` path to the new repo's working directory.
3557 ``createopts`` options for the new repository.
3560 ``createopts`` options for the new repository.
3558
3561
3559 The following keys for ``createopts`` are recognized:
3562 The following keys for ``createopts`` are recognized:
3560
3563
3561 backend
3564 backend
3562 The storage backend to use.
3565 The storage backend to use.
3563 lfs
3566 lfs
3564 Repository will be created with ``lfs`` requirement. The lfs extension
3567 Repository will be created with ``lfs`` requirement. The lfs extension
3565 will automatically be loaded when the repository is accessed.
3568 will automatically be loaded when the repository is accessed.
3566 narrowfiles
3569 narrowfiles
3567 Set up repository to support narrow file storage.
3570 Set up repository to support narrow file storage.
3568 sharedrepo
3571 sharedrepo
3569 Repository object from which storage should be shared.
3572 Repository object from which storage should be shared.
3570 sharedrelative
3573 sharedrelative
3571 Boolean indicating if the path to the shared repo should be
3574 Boolean indicating if the path to the shared repo should be
3572 stored as relative. By default, the pointer to the "parent" repo
3575 stored as relative. By default, the pointer to the "parent" repo
3573 is stored as an absolute path.
3576 is stored as an absolute path.
3574 shareditems
3577 shareditems
3575 Set of items to share to the new repository (in addition to storage).
3578 Set of items to share to the new repository (in addition to storage).
3576 shallowfilestore
3579 shallowfilestore
3577 Indicates that storage for files should be shallow (not all ancestor
3580 Indicates that storage for files should be shallow (not all ancestor
3578 revisions are known).
3581 revisions are known).
3579 """
3582 """
3580 createopts = defaultcreateopts(ui, createopts=createopts)
3583 createopts = defaultcreateopts(ui, createopts=createopts)
3581
3584
3582 unknownopts = filterknowncreateopts(ui, createopts)
3585 unknownopts = filterknowncreateopts(ui, createopts)
3583
3586
3584 if not isinstance(unknownopts, dict):
3587 if not isinstance(unknownopts, dict):
3585 raise error.ProgrammingError(
3588 raise error.ProgrammingError(
3586 b'filterknowncreateopts() did not return a dict'
3589 b'filterknowncreateopts() did not return a dict'
3587 )
3590 )
3588
3591
3589 if unknownopts:
3592 if unknownopts:
3590 raise error.Abort(
3593 raise error.Abort(
3591 _(
3594 _(
3592 b'unable to create repository because of unknown '
3595 b'unable to create repository because of unknown '
3593 b'creation option: %s'
3596 b'creation option: %s'
3594 )
3597 )
3595 % b', '.join(sorted(unknownopts)),
3598 % b', '.join(sorted(unknownopts)),
3596 hint=_(b'is a required extension not loaded?'),
3599 hint=_(b'is a required extension not loaded?'),
3597 )
3600 )
3598
3601
3599 requirements = newreporequirements(ui, createopts=createopts)
3602 requirements = newreporequirements(ui, createopts=createopts)
3600 requirements -= checkrequirementscompat(ui, requirements)
3603 requirements -= checkrequirementscompat(ui, requirements)
3601
3604
3602 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3605 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3603
3606
3604 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3607 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3605 if hgvfs.exists():
3608 if hgvfs.exists():
3606 raise error.RepoError(_(b'repository %s already exists') % path)
3609 raise error.RepoError(_(b'repository %s already exists') % path)
3607
3610
3608 if b'sharedrepo' in createopts:
3611 if b'sharedrepo' in createopts:
3609 sharedpath = createopts[b'sharedrepo'].sharedpath
3612 sharedpath = createopts[b'sharedrepo'].sharedpath
3610
3613
3611 if createopts.get(b'sharedrelative'):
3614 if createopts.get(b'sharedrelative'):
3612 try:
3615 try:
3613 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3616 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3614 except (IOError, ValueError) as e:
3617 except (IOError, ValueError) as e:
3615 # ValueError is raised on Windows if the drive letters differ
3618 # ValueError is raised on Windows if the drive letters differ
3616 # on each path.
3619 # on each path.
3617 raise error.Abort(
3620 raise error.Abort(
3618 _(b'cannot calculate relative path'),
3621 _(b'cannot calculate relative path'),
3619 hint=stringutil.forcebytestr(e),
3622 hint=stringutil.forcebytestr(e),
3620 )
3623 )
3621
3624
3622 if not wdirvfs.exists():
3625 if not wdirvfs.exists():
3623 wdirvfs.makedirs()
3626 wdirvfs.makedirs()
3624
3627
3625 hgvfs.makedir(notindexed=True)
3628 hgvfs.makedir(notindexed=True)
3626 if b'sharedrepo' not in createopts:
3629 if b'sharedrepo' not in createopts:
3627 hgvfs.mkdir(b'cache')
3630 hgvfs.mkdir(b'cache')
3628 hgvfs.mkdir(b'wcache')
3631 hgvfs.mkdir(b'wcache')
3629
3632
3630 if b'store' in requirements and b'sharedrepo' not in createopts:
3633 if b'store' in requirements and b'sharedrepo' not in createopts:
3631 hgvfs.mkdir(b'store')
3634 hgvfs.mkdir(b'store')
3632
3635
3633 # We create an invalid changelog outside the store so very old
3636 # We create an invalid changelog outside the store so very old
3634 # Mercurial versions (which didn't know about the requirements
3637 # Mercurial versions (which didn't know about the requirements
3635 # file) encounter an error on reading the changelog. This
3638 # file) encounter an error on reading the changelog. This
3636 # effectively locks out old clients and prevents them from
3639 # effectively locks out old clients and prevents them from
3637 # mucking with a repo in an unknown format.
3640 # mucking with a repo in an unknown format.
3638 #
3641 #
3639 # The revlog header has version 2, which won't be recognized by
3642 # The revlog header has version 2, which won't be recognized by
3640 # such old clients.
3643 # such old clients.
3641 hgvfs.append(
3644 hgvfs.append(
3642 b'00changelog.i',
3645 b'00changelog.i',
3643 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3646 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3644 b'layout',
3647 b'layout',
3645 )
3648 )
3646
3649
3647 # Filter the requirements into working copy and store ones
3650 # Filter the requirements into working copy and store ones
3648 wcreq, storereq = scmutil.filterrequirements(requirements)
3651 wcreq, storereq = scmutil.filterrequirements(requirements)
3649 # write working copy ones
3652 # write working copy ones
3650 scmutil.writerequires(hgvfs, wcreq)
3653 scmutil.writerequires(hgvfs, wcreq)
3651 # If there are store requirements and the current repository
3654 # If there are store requirements and the current repository
3652 # is not a shared one, write stored requirements
3655 # is not a shared one, write stored requirements
3653 # For new shared repository, we don't need to write the store
3656 # For new shared repository, we don't need to write the store
3654 # requirements as they are already present in store requires
3657 # requirements as they are already present in store requires
3655 if storereq and b'sharedrepo' not in createopts:
3658 if storereq and b'sharedrepo' not in createopts:
3656 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3659 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3657 scmutil.writerequires(storevfs, storereq)
3660 scmutil.writerequires(storevfs, storereq)
3658
3661
3659 # Write out file telling readers where to find the shared store.
3662 # Write out file telling readers where to find the shared store.
3660 if b'sharedrepo' in createopts:
3663 if b'sharedrepo' in createopts:
3661 hgvfs.write(b'sharedpath', sharedpath)
3664 hgvfs.write(b'sharedpath', sharedpath)
3662
3665
3663 if createopts.get(b'shareditems'):
3666 if createopts.get(b'shareditems'):
3664 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3667 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3665 hgvfs.write(b'shared', shared)
3668 hgvfs.write(b'shared', shared)
3666
3669
3667
3670
3668 def poisonrepository(repo):
3671 def poisonrepository(repo):
3669 """Poison a repository instance so it can no longer be used."""
3672 """Poison a repository instance so it can no longer be used."""
3670 # Perform any cleanup on the instance.
3673 # Perform any cleanup on the instance.
3671 repo.close()
3674 repo.close()
3672
3675
3673 # Our strategy is to replace the type of the object with one that
3676 # Our strategy is to replace the type of the object with one that
3674 # has all attribute lookups result in error.
3677 # has all attribute lookups result in error.
3675 #
3678 #
3676 # But we have to allow the close() method because some constructors
3679 # But we have to allow the close() method because some constructors
3677 # of repos call close() on repo references.
3680 # of repos call close() on repo references.
3678 class poisonedrepository(object):
3681 class poisonedrepository(object):
3679 def __getattribute__(self, item):
3682 def __getattribute__(self, item):
3680 if item == 'close':
3683 if item == 'close':
3681 return object.__getattribute__(self, item)
3684 return object.__getattribute__(self, item)
3682
3685
3683 raise error.ProgrammingError(
3686 raise error.ProgrammingError(
3684 b'repo instances should not be used after unshare'
3687 b'repo instances should not be used after unshare'
3685 )
3688 )
3686
3689
3687 def close(self):
3690 def close(self):
3688 pass
3691 pass
3689
3692
3690 # We may have a repoview, which intercepts __setattr__. So be sure
3693 # We may have a repoview, which intercepts __setattr__. So be sure
3691 # we operate at the lowest level possible.
3694 # we operate at the lowest level possible.
3692 object.__setattr__(repo, '__class__', poisonedrepository)
3695 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now