##// END OF EJS Templates
phases: allow registration and boundary advancement with revision sets...
Joerg Sonnenberger -
r46374:09735cde default
parent child Browse files
Show More
@@ -1,1702 +1,1703 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import open
21 from .pycompat import open
22
22
23 from . import (
23 from . import (
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 requirements,
29 requirements,
30 scmutil,
30 scmutil,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import repository
34 from .interfaces import repository
35
35
36 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
36 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
37 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
37 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
38 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
38 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
39
39
40 LFS_REQUIREMENT = b'lfs'
40 LFS_REQUIREMENT = b'lfs'
41
41
42 readexactly = util.readexactly
42 readexactly = util.readexactly
43
43
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(b">l", d)[0]
48 l = struct.unpack(b">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_(b"invalid chunk length %d") % l)
51 raise error.Abort(_(b"invalid chunk length %d") % l)
52 return b""
52 return b""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55
55
56 def chunkheader(length):
56 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
57 """return a changegroup chunk header (string)"""
58 return struct.pack(b">l", length + 4)
58 return struct.pack(b">l", length + 4)
59
59
60
60
61 def closechunk():
61 def closechunk():
62 """return a changegroup chunk header (string) for a zero-length chunk"""
62 """return a changegroup chunk header (string) for a zero-length chunk"""
63 return struct.pack(b">l", 0)
63 return struct.pack(b">l", 0)
64
64
65
65
66 def _fileheader(path):
66 def _fileheader(path):
67 """Obtain a changegroup chunk header for a named path."""
67 """Obtain a changegroup chunk header for a named path."""
68 return chunkheader(len(path)) + path
68 return chunkheader(len(path)) + path
69
69
70
70
71 def writechunks(ui, chunks, filename, vfs=None):
71 def writechunks(ui, chunks, filename, vfs=None):
72 """Write chunks to a file and return its filename.
72 """Write chunks to a file and return its filename.
73
73
74 The stream is assumed to be a bundle file.
74 The stream is assumed to be a bundle file.
75 Existing files will not be overwritten.
75 Existing files will not be overwritten.
76 If no filename is specified, a temporary file is created.
76 If no filename is specified, a temporary file is created.
77 """
77 """
78 fh = None
78 fh = None
79 cleanup = None
79 cleanup = None
80 try:
80 try:
81 if filename:
81 if filename:
82 if vfs:
82 if vfs:
83 fh = vfs.open(filename, b"wb")
83 fh = vfs.open(filename, b"wb")
84 else:
84 else:
85 # Increase default buffer size because default is usually
85 # Increase default buffer size because default is usually
86 # small (4k is common on Linux).
86 # small (4k is common on Linux).
87 fh = open(filename, b"wb", 131072)
87 fh = open(filename, b"wb", 131072)
88 else:
88 else:
89 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
89 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
90 fh = os.fdopen(fd, "wb")
90 fh = os.fdopen(fd, "wb")
91 cleanup = filename
91 cleanup = filename
92 for c in chunks:
92 for c in chunks:
93 fh.write(c)
93 fh.write(c)
94 cleanup = None
94 cleanup = None
95 return filename
95 return filename
96 finally:
96 finally:
97 if fh is not None:
97 if fh is not None:
98 fh.close()
98 fh.close()
99 if cleanup is not None:
99 if cleanup is not None:
100 if filename and vfs:
100 if filename and vfs:
101 vfs.unlink(cleanup)
101 vfs.unlink(cleanup)
102 else:
102 else:
103 os.unlink(cleanup)
103 os.unlink(cleanup)
104
104
105
105
106 class cg1unpacker(object):
106 class cg1unpacker(object):
107 """Unpacker for cg1 changegroup streams.
107 """Unpacker for cg1 changegroup streams.
108
108
109 A changegroup unpacker handles the framing of the revision data in
109 A changegroup unpacker handles the framing of the revision data in
110 the wire format. Most consumers will want to use the apply()
110 the wire format. Most consumers will want to use the apply()
111 method to add the changes from the changegroup to a repository.
111 method to add the changes from the changegroup to a repository.
112
112
113 If you're forwarding a changegroup unmodified to another consumer,
113 If you're forwarding a changegroup unmodified to another consumer,
114 use getchunks(), which returns an iterator of changegroup
114 use getchunks(), which returns an iterator of changegroup
115 chunks. This is mostly useful for cases where you need to know the
115 chunks. This is mostly useful for cases where you need to know the
116 data stream has ended by observing the end of the changegroup.
116 data stream has ended by observing the end of the changegroup.
117
117
118 deltachunk() is useful only if you're applying delta data. Most
118 deltachunk() is useful only if you're applying delta data. Most
119 consumers should prefer apply() instead.
119 consumers should prefer apply() instead.
120
120
121 A few other public methods exist. Those are used only for
121 A few other public methods exist. Those are used only for
122 bundlerepo and some debug commands - their use is discouraged.
122 bundlerepo and some debug commands - their use is discouraged.
123 """
123 """
124
124
125 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
125 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
126 deltaheadersize = deltaheader.size
126 deltaheadersize = deltaheader.size
127 version = b'01'
127 version = b'01'
128 _grouplistcount = 1 # One list of files after the manifests
128 _grouplistcount = 1 # One list of files after the manifests
129
129
130 def __init__(self, fh, alg, extras=None):
130 def __init__(self, fh, alg, extras=None):
131 if alg is None:
131 if alg is None:
132 alg = b'UN'
132 alg = b'UN'
133 if alg not in util.compengines.supportedbundletypes:
133 if alg not in util.compengines.supportedbundletypes:
134 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
134 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
135 if alg == b'BZ':
135 if alg == b'BZ':
136 alg = b'_truncatedBZ'
136 alg = b'_truncatedBZ'
137
137
138 compengine = util.compengines.forbundletype(alg)
138 compengine = util.compengines.forbundletype(alg)
139 self._stream = compengine.decompressorreader(fh)
139 self._stream = compengine.decompressorreader(fh)
140 self._type = alg
140 self._type = alg
141 self.extras = extras or {}
141 self.extras = extras or {}
142 self.callback = None
142 self.callback = None
143
143
144 # These methods (compressed, read, seek, tell) all appear to only
144 # These methods (compressed, read, seek, tell) all appear to only
145 # be used by bundlerepo, but it's a little hard to tell.
145 # be used by bundlerepo, but it's a little hard to tell.
146 def compressed(self):
146 def compressed(self):
147 return self._type is not None and self._type != b'UN'
147 return self._type is not None and self._type != b'UN'
148
148
149 def read(self, l):
149 def read(self, l):
150 return self._stream.read(l)
150 return self._stream.read(l)
151
151
152 def seek(self, pos):
152 def seek(self, pos):
153 return self._stream.seek(pos)
153 return self._stream.seek(pos)
154
154
155 def tell(self):
155 def tell(self):
156 return self._stream.tell()
156 return self._stream.tell()
157
157
158 def close(self):
158 def close(self):
159 return self._stream.close()
159 return self._stream.close()
160
160
161 def _chunklength(self):
161 def _chunklength(self):
162 d = readexactly(self._stream, 4)
162 d = readexactly(self._stream, 4)
163 l = struct.unpack(b">l", d)[0]
163 l = struct.unpack(b">l", d)[0]
164 if l <= 4:
164 if l <= 4:
165 if l:
165 if l:
166 raise error.Abort(_(b"invalid chunk length %d") % l)
166 raise error.Abort(_(b"invalid chunk length %d") % l)
167 return 0
167 return 0
168 if self.callback:
168 if self.callback:
169 self.callback()
169 self.callback()
170 return l - 4
170 return l - 4
171
171
172 def changelogheader(self):
172 def changelogheader(self):
173 """v10 does not have a changelog header chunk"""
173 """v10 does not have a changelog header chunk"""
174 return {}
174 return {}
175
175
176 def manifestheader(self):
176 def manifestheader(self):
177 """v10 does not have a manifest header chunk"""
177 """v10 does not have a manifest header chunk"""
178 return {}
178 return {}
179
179
180 def filelogheader(self):
180 def filelogheader(self):
181 """return the header of the filelogs chunk, v10 only has the filename"""
181 """return the header of the filelogs chunk, v10 only has the filename"""
182 l = self._chunklength()
182 l = self._chunklength()
183 if not l:
183 if not l:
184 return {}
184 return {}
185 fname = readexactly(self._stream, l)
185 fname = readexactly(self._stream, l)
186 return {b'filename': fname}
186 return {b'filename': fname}
187
187
188 def _deltaheader(self, headertuple, prevnode):
188 def _deltaheader(self, headertuple, prevnode):
189 node, p1, p2, cs = headertuple
189 node, p1, p2, cs = headertuple
190 if prevnode is None:
190 if prevnode is None:
191 deltabase = p1
191 deltabase = p1
192 else:
192 else:
193 deltabase = prevnode
193 deltabase = prevnode
194 flags = 0
194 flags = 0
195 return node, p1, p2, deltabase, cs, flags
195 return node, p1, p2, deltabase, cs, flags
196
196
197 def deltachunk(self, prevnode):
197 def deltachunk(self, prevnode):
198 l = self._chunklength()
198 l = self._chunklength()
199 if not l:
199 if not l:
200 return {}
200 return {}
201 headerdata = readexactly(self._stream, self.deltaheadersize)
201 headerdata = readexactly(self._stream, self.deltaheadersize)
202 header = self.deltaheader.unpack(headerdata)
202 header = self.deltaheader.unpack(headerdata)
203 delta = readexactly(self._stream, l - self.deltaheadersize)
203 delta = readexactly(self._stream, l - self.deltaheadersize)
204 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
204 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
205 return (node, p1, p2, cs, deltabase, delta, flags)
205 return (node, p1, p2, cs, deltabase, delta, flags)
206
206
207 def getchunks(self):
207 def getchunks(self):
208 """returns all the chunks contains in the bundle
208 """returns all the chunks contains in the bundle
209
209
210 Used when you need to forward the binary stream to a file or another
210 Used when you need to forward the binary stream to a file or another
211 network API. To do so, it parse the changegroup data, otherwise it will
211 network API. To do so, it parse the changegroup data, otherwise it will
212 block in case of sshrepo because it don't know the end of the stream.
212 block in case of sshrepo because it don't know the end of the stream.
213 """
213 """
214 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
214 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
215 # and a list of filelogs. For changegroup 3, we expect 4 parts:
215 # and a list of filelogs. For changegroup 3, we expect 4 parts:
216 # changelog, manifestlog, a list of tree manifestlogs, and a list of
216 # changelog, manifestlog, a list of tree manifestlogs, and a list of
217 # filelogs.
217 # filelogs.
218 #
218 #
219 # Changelog and manifestlog parts are terminated with empty chunks. The
219 # Changelog and manifestlog parts are terminated with empty chunks. The
220 # tree and file parts are a list of entry sections. Each entry section
220 # tree and file parts are a list of entry sections. Each entry section
221 # is a series of chunks terminating in an empty chunk. The list of these
221 # is a series of chunks terminating in an empty chunk. The list of these
222 # entry sections is terminated in yet another empty chunk, so we know
222 # entry sections is terminated in yet another empty chunk, so we know
223 # we've reached the end of the tree/file list when we reach an empty
223 # we've reached the end of the tree/file list when we reach an empty
224 # chunk that was proceeded by no non-empty chunks.
224 # chunk that was proceeded by no non-empty chunks.
225
225
226 parts = 0
226 parts = 0
227 while parts < 2 + self._grouplistcount:
227 while parts < 2 + self._grouplistcount:
228 noentries = True
228 noentries = True
229 while True:
229 while True:
230 chunk = getchunk(self)
230 chunk = getchunk(self)
231 if not chunk:
231 if not chunk:
232 # The first two empty chunks represent the end of the
232 # The first two empty chunks represent the end of the
233 # changelog and the manifestlog portions. The remaining
233 # changelog and the manifestlog portions. The remaining
234 # empty chunks represent either A) the end of individual
234 # empty chunks represent either A) the end of individual
235 # tree or file entries in the file list, or B) the end of
235 # tree or file entries in the file list, or B) the end of
236 # the entire list. It's the end of the entire list if there
236 # the entire list. It's the end of the entire list if there
237 # were no entries (i.e. noentries is True).
237 # were no entries (i.e. noentries is True).
238 if parts < 2:
238 if parts < 2:
239 parts += 1
239 parts += 1
240 elif noentries:
240 elif noentries:
241 parts += 1
241 parts += 1
242 break
242 break
243 noentries = False
243 noentries = False
244 yield chunkheader(len(chunk))
244 yield chunkheader(len(chunk))
245 pos = 0
245 pos = 0
246 while pos < len(chunk):
246 while pos < len(chunk):
247 next = pos + 2 ** 20
247 next = pos + 2 ** 20
248 yield chunk[pos:next]
248 yield chunk[pos:next]
249 pos = next
249 pos = next
250 yield closechunk()
250 yield closechunk()
251
251
252 def _unpackmanifests(self, repo, revmap, trp, prog):
252 def _unpackmanifests(self, repo, revmap, trp, prog):
253 self.callback = prog.increment
253 self.callback = prog.increment
254 # no need to check for empty manifest group here:
254 # no need to check for empty manifest group here:
255 # if the result of the merge of 1 and 2 is the same in 3 and 4,
255 # if the result of the merge of 1 and 2 is the same in 3 and 4,
256 # no new manifest will be created and the manifest group will
256 # no new manifest will be created and the manifest group will
257 # be empty during the pull
257 # be empty during the pull
258 self.manifestheader()
258 self.manifestheader()
259 deltas = self.deltaiter()
259 deltas = self.deltaiter()
260 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
260 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
261 prog.complete()
261 prog.complete()
262 self.callback = None
262 self.callback = None
263
263
264 def apply(
264 def apply(
265 self,
265 self,
266 repo,
266 repo,
267 tr,
267 tr,
268 srctype,
268 srctype,
269 url,
269 url,
270 targetphase=phases.draft,
270 targetphase=phases.draft,
271 expectedtotal=None,
271 expectedtotal=None,
272 ):
272 ):
273 """Add the changegroup returned by source.read() to this repo.
273 """Add the changegroup returned by source.read() to this repo.
274 srctype is a string like 'push', 'pull', or 'unbundle'. url is
274 srctype is a string like 'push', 'pull', or 'unbundle'. url is
275 the URL of the repo where this changegroup is coming from.
275 the URL of the repo where this changegroup is coming from.
276
276
277 Return an integer summarizing the change to this repo:
277 Return an integer summarizing the change to this repo:
278 - nothing changed or no source: 0
278 - nothing changed or no source: 0
279 - more heads than before: 1+added heads (2..n)
279 - more heads than before: 1+added heads (2..n)
280 - fewer heads than before: -1-removed heads (-2..-n)
280 - fewer heads than before: -1-removed heads (-2..-n)
281 - number of heads stays the same: 1
281 - number of heads stays the same: 1
282 """
282 """
283 repo = repo.unfiltered()
283 repo = repo.unfiltered()
284
284
285 def csmap(x):
285 def csmap(x):
286 repo.ui.debug(b"add changeset %s\n" % short(x))
286 repo.ui.debug(b"add changeset %s\n" % short(x))
287 return len(cl)
287 return len(cl)
288
288
289 def revmap(x):
289 def revmap(x):
290 return cl.rev(x)
290 return cl.rev(x)
291
291
292 try:
292 try:
293 # The transaction may already carry source information. In this
293 # The transaction may already carry source information. In this
294 # case we use the top level data. We overwrite the argument
294 # case we use the top level data. We overwrite the argument
295 # because we need to use the top level value (if they exist)
295 # because we need to use the top level value (if they exist)
296 # in this function.
296 # in this function.
297 srctype = tr.hookargs.setdefault(b'source', srctype)
297 srctype = tr.hookargs.setdefault(b'source', srctype)
298 tr.hookargs.setdefault(b'url', url)
298 tr.hookargs.setdefault(b'url', url)
299 repo.hook(
299 repo.hook(
300 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
300 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
301 )
301 )
302
302
303 # write changelog data to temp files so concurrent readers
303 # write changelog data to temp files so concurrent readers
304 # will not see an inconsistent view
304 # will not see an inconsistent view
305 cl = repo.changelog
305 cl = repo.changelog
306 cl.delayupdate(tr)
306 cl.delayupdate(tr)
307 oldheads = set(cl.heads())
307 oldheads = set(cl.heads())
308
308
309 trp = weakref.proxy(tr)
309 trp = weakref.proxy(tr)
310 # pull off the changeset group
310 # pull off the changeset group
311 repo.ui.status(_(b"adding changesets\n"))
311 repo.ui.status(_(b"adding changesets\n"))
312 clstart = len(cl)
312 clstart = len(cl)
313 progress = repo.ui.makeprogress(
313 progress = repo.ui.makeprogress(
314 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
314 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
315 )
315 )
316 self.callback = progress.increment
316 self.callback = progress.increment
317
317
318 efilesset = set()
318 efilesset = set()
319 cgnodes = []
319 cgnodes = []
320
320
321 def ondupchangelog(cl, node):
322 if cl.rev(node) < clstart:
323 cgnodes.append(node)
324
321 def onchangelog(cl, node):
325 def onchangelog(cl, node):
322 efilesset.update(cl.readfiles(node))
326 efilesset.update(cl.readfiles(node))
323 cgnodes.append(node)
324
325 def ondupchangelog(cl, node):
326 cgnodes.append(node)
327
327
328 self.changelogheader()
328 self.changelogheader()
329 deltas = self.deltaiter()
329 deltas = self.deltaiter()
330 if not cl.addgroup(
330 if not cl.addgroup(
331 deltas,
331 deltas,
332 csmap,
332 csmap,
333 trp,
333 trp,
334 addrevisioncb=onchangelog,
334 addrevisioncb=onchangelog,
335 duplicaterevisioncb=ondupchangelog,
335 duplicaterevisioncb=ondupchangelog,
336 ):
336 ):
337 repo.ui.develwarn(
337 repo.ui.develwarn(
338 b'applied empty changelog from changegroup',
338 b'applied empty changelog from changegroup',
339 config=b'warn-empty-changegroup',
339 config=b'warn-empty-changegroup',
340 )
340 )
341 efiles = len(efilesset)
341 efiles = len(efilesset)
342 clend = len(cl)
342 clend = len(cl)
343 changesets = clend - clstart
343 changesets = clend - clstart
344 progress.complete()
344 progress.complete()
345 del deltas
345 del deltas
346 # TODO Python 2.7 removal
346 # TODO Python 2.7 removal
347 # del efilesset
347 # del efilesset
348 efilesset = None
348 efilesset = None
349 self.callback = None
349 self.callback = None
350
350
351 # pull off the manifest group
351 # pull off the manifest group
352 repo.ui.status(_(b"adding manifests\n"))
352 repo.ui.status(_(b"adding manifests\n"))
353 # We know that we'll never have more manifests than we had
353 # We know that we'll never have more manifests than we had
354 # changesets.
354 # changesets.
355 progress = repo.ui.makeprogress(
355 progress = repo.ui.makeprogress(
356 _(b'manifests'), unit=_(b'chunks'), total=changesets
356 _(b'manifests'), unit=_(b'chunks'), total=changesets
357 )
357 )
358 self._unpackmanifests(repo, revmap, trp, progress)
358 self._unpackmanifests(repo, revmap, trp, progress)
359
359
360 needfiles = {}
360 needfiles = {}
361 if repo.ui.configbool(b'server', b'validate'):
361 if repo.ui.configbool(b'server', b'validate'):
362 cl = repo.changelog
362 cl = repo.changelog
363 ml = repo.manifestlog
363 ml = repo.manifestlog
364 # validate incoming csets have their manifests
364 # validate incoming csets have their manifests
365 for cset in pycompat.xrange(clstart, clend):
365 for cset in pycompat.xrange(clstart, clend):
366 mfnode = cl.changelogrevision(cset).manifest
366 mfnode = cl.changelogrevision(cset).manifest
367 mfest = ml[mfnode].readdelta()
367 mfest = ml[mfnode].readdelta()
368 # store file cgnodes we must see
368 # store file nodes we must see
369 for f, n in pycompat.iteritems(mfest):
369 for f, n in pycompat.iteritems(mfest):
370 needfiles.setdefault(f, set()).add(n)
370 needfiles.setdefault(f, set()).add(n)
371
371
372 # process the files
372 # process the files
373 repo.ui.status(_(b"adding file changes\n"))
373 repo.ui.status(_(b"adding file changes\n"))
374 newrevs, newfiles = _addchangegroupfiles(
374 newrevs, newfiles = _addchangegroupfiles(
375 repo, self, revmap, trp, efiles, needfiles
375 repo, self, revmap, trp, efiles, needfiles
376 )
376 )
377
377
378 # making sure the value exists
378 # making sure the value exists
379 tr.changes.setdefault(b'changegroup-count-changesets', 0)
379 tr.changes.setdefault(b'changegroup-count-changesets', 0)
380 tr.changes.setdefault(b'changegroup-count-revisions', 0)
380 tr.changes.setdefault(b'changegroup-count-revisions', 0)
381 tr.changes.setdefault(b'changegroup-count-files', 0)
381 tr.changes.setdefault(b'changegroup-count-files', 0)
382 tr.changes.setdefault(b'changegroup-count-heads', 0)
382 tr.changes.setdefault(b'changegroup-count-heads', 0)
383
383
384 # some code use bundle operation for internal purpose. They usually
384 # some code use bundle operation for internal purpose. They usually
385 # set `ui.quiet` to do this outside of user sight. Size the report
385 # set `ui.quiet` to do this outside of user sight. Size the report
386 # of such operation now happens at the end of the transaction, that
386 # of such operation now happens at the end of the transaction, that
387 # ui.quiet has not direct effect on the output.
387 # ui.quiet has not direct effect on the output.
388 #
388 #
389 # To preserve this intend use an inelegant hack, we fail to report
389 # To preserve this intend use an inelegant hack, we fail to report
390 # the change if `quiet` is set. We should probably move to
390 # the change if `quiet` is set. We should probably move to
391 # something better, but this is a good first step to allow the "end
391 # something better, but this is a good first step to allow the "end
392 # of transaction report" to pass tests.
392 # of transaction report" to pass tests.
393 if not repo.ui.quiet:
393 if not repo.ui.quiet:
394 tr.changes[b'changegroup-count-changesets'] += changesets
394 tr.changes[b'changegroup-count-changesets'] += changesets
395 tr.changes[b'changegroup-count-revisions'] += newrevs
395 tr.changes[b'changegroup-count-revisions'] += newrevs
396 tr.changes[b'changegroup-count-files'] += newfiles
396 tr.changes[b'changegroup-count-files'] += newfiles
397
397
398 deltaheads = 0
398 deltaheads = 0
399 if oldheads:
399 if oldheads:
400 heads = cl.heads()
400 heads = cl.heads()
401 deltaheads += len(heads) - len(oldheads)
401 deltaheads += len(heads) - len(oldheads)
402 for h in heads:
402 for h in heads:
403 if h not in oldheads and repo[h].closesbranch():
403 if h not in oldheads and repo[h].closesbranch():
404 deltaheads -= 1
404 deltaheads -= 1
405
405
406 # see previous comment about checking ui.quiet
406 # see previous comment about checking ui.quiet
407 if not repo.ui.quiet:
407 if not repo.ui.quiet:
408 tr.changes[b'changegroup-count-heads'] += deltaheads
408 tr.changes[b'changegroup-count-heads'] += deltaheads
409 repo.invalidatevolatilesets()
409 repo.invalidatevolatilesets()
410
410
411 if changesets > 0:
411 if changesets > 0:
412 if b'node' not in tr.hookargs:
412 if b'node' not in tr.hookargs:
413 tr.hookargs[b'node'] = hex(cl.node(clstart))
413 tr.hookargs[b'node'] = hex(cl.node(clstart))
414 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
414 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
415 hookargs = dict(tr.hookargs)
415 hookargs = dict(tr.hookargs)
416 else:
416 else:
417 hookargs = dict(tr.hookargs)
417 hookargs = dict(tr.hookargs)
418 hookargs[b'node'] = hex(cl.node(clstart))
418 hookargs[b'node'] = hex(cl.node(clstart))
419 hookargs[b'node_last'] = hex(cl.node(clend - 1))
419 hookargs[b'node_last'] = hex(cl.node(clend - 1))
420 repo.hook(
420 repo.hook(
421 b'pretxnchangegroup',
421 b'pretxnchangegroup',
422 throw=True,
422 throw=True,
423 **pycompat.strkwargs(hookargs)
423 **pycompat.strkwargs(hookargs)
424 )
424 )
425
425
426 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
426 added = pycompat.xrange(clstart, clend)
427 phaseall = None
427 phaseall = None
428 if srctype in (b'push', b'serve'):
428 if srctype in (b'push', b'serve'):
429 # Old servers can not push the boundary themselves.
429 # Old servers can not push the boundary themselves.
430 # New servers won't push the boundary if changeset already
430 # New servers won't push the boundary if changeset already
431 # exists locally as secret
431 # exists locally as secret
432 #
432 #
433 # We should not use added here but the list of all change in
433 # We should not use added here but the list of all change in
434 # the bundle
434 # the bundle
435 if repo.publishing():
435 if repo.publishing():
436 targetphase = phaseall = phases.public
436 targetphase = phaseall = phases.public
437 else:
437 else:
438 # closer target phase computation
438 # closer target phase computation
439
439
440 # Those changesets have been pushed from the
440 # Those changesets have been pushed from the
441 # outside, their phases are going to be pushed
441 # outside, their phases are going to be pushed
442 # alongside. Therefor `targetphase` is
442 # alongside. Therefor `targetphase` is
443 # ignored.
443 # ignored.
444 targetphase = phaseall = phases.draft
444 targetphase = phaseall = phases.draft
445 if added:
445 if added:
446 phases.registernew(repo, tr, targetphase, added)
446 phases.registernew(repo, tr, targetphase, [], revs=added)
447 if phaseall is not None:
447 if phaseall is not None:
448 phases.advanceboundary(repo, tr, phaseall, cgnodes)
448 phases.advanceboundary(repo, tr, phaseall, cgnodes, revs=added)
449 cgnodes = []
449
450
450 if changesets > 0:
451 if changesets > 0:
451
452
452 def runhooks(unused_success):
453 def runhooks(unused_success):
453 # These hooks run when the lock releases, not when the
454 # These hooks run when the lock releases, not when the
454 # transaction closes. So it's possible for the changelog
455 # transaction closes. So it's possible for the changelog
455 # to have changed since we last saw it.
456 # to have changed since we last saw it.
456 if clstart >= len(repo):
457 if clstart >= len(repo):
457 return
458 return
458
459
459 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
460 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
460
461
461 for n in added:
462 for rev in added:
462 args = hookargs.copy()
463 args = hookargs.copy()
463 args[b'node'] = hex(n)
464 args[b'node'] = hex(cl.node(rev))
464 del args[b'node_last']
465 del args[b'node_last']
465 repo.hook(b"incoming", **pycompat.strkwargs(args))
466 repo.hook(b"incoming", **pycompat.strkwargs(args))
466
467
467 newheads = [h for h in repo.heads() if h not in oldheads]
468 newheads = [h for h in repo.heads() if h not in oldheads]
468 repo.ui.log(
469 repo.ui.log(
469 b"incoming",
470 b"incoming",
470 b"%d incoming changes - new heads: %s\n",
471 b"%d incoming changes - new heads: %s\n",
471 len(added),
472 len(added),
472 b', '.join([hex(c[:6]) for c in newheads]),
473 b', '.join([hex(c[:6]) for c in newheads]),
473 )
474 )
474
475
475 tr.addpostclose(
476 tr.addpostclose(
476 b'changegroup-runhooks-%020i' % clstart,
477 b'changegroup-runhooks-%020i' % clstart,
477 lambda tr: repo._afterlock(runhooks),
478 lambda tr: repo._afterlock(runhooks),
478 )
479 )
479 finally:
480 finally:
480 repo.ui.flush()
481 repo.ui.flush()
481 # never return 0 here:
482 # never return 0 here:
482 if deltaheads < 0:
483 if deltaheads < 0:
483 ret = deltaheads - 1
484 ret = deltaheads - 1
484 else:
485 else:
485 ret = deltaheads + 1
486 ret = deltaheads + 1
486 return ret
487 return ret
487
488
488 def deltaiter(self):
489 def deltaiter(self):
489 """
490 """
490 returns an iterator of the deltas in this changegroup
491 returns an iterator of the deltas in this changegroup
491
492
492 Useful for passing to the underlying storage system to be stored.
493 Useful for passing to the underlying storage system to be stored.
493 """
494 """
494 chain = None
495 chain = None
495 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
496 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
496 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
497 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
497 yield chunkdata
498 yield chunkdata
498 chain = chunkdata[0]
499 chain = chunkdata[0]
499
500
500
501
501 class cg2unpacker(cg1unpacker):
502 class cg2unpacker(cg1unpacker):
502 """Unpacker for cg2 streams.
503 """Unpacker for cg2 streams.
503
504
504 cg2 streams add support for generaldelta, so the delta header
505 cg2 streams add support for generaldelta, so the delta header
505 format is slightly different. All other features about the data
506 format is slightly different. All other features about the data
506 remain the same.
507 remain the same.
507 """
508 """
508
509
509 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
510 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
510 deltaheadersize = deltaheader.size
511 deltaheadersize = deltaheader.size
511 version = b'02'
512 version = b'02'
512
513
513 def _deltaheader(self, headertuple, prevnode):
514 def _deltaheader(self, headertuple, prevnode):
514 node, p1, p2, deltabase, cs = headertuple
515 node, p1, p2, deltabase, cs = headertuple
515 flags = 0
516 flags = 0
516 return node, p1, p2, deltabase, cs, flags
517 return node, p1, p2, deltabase, cs, flags
517
518
518
519
519 class cg3unpacker(cg2unpacker):
520 class cg3unpacker(cg2unpacker):
520 """Unpacker for cg3 streams.
521 """Unpacker for cg3 streams.
521
522
522 cg3 streams add support for exchanging treemanifests and revlog
523 cg3 streams add support for exchanging treemanifests and revlog
523 flags. It adds the revlog flags to the delta header and an empty chunk
524 flags. It adds the revlog flags to the delta header and an empty chunk
524 separating manifests and files.
525 separating manifests and files.
525 """
526 """
526
527
527 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
528 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
528 deltaheadersize = deltaheader.size
529 deltaheadersize = deltaheader.size
529 version = b'03'
530 version = b'03'
530 _grouplistcount = 2 # One list of manifests and one list of files
531 _grouplistcount = 2 # One list of manifests and one list of files
531
532
532 def _deltaheader(self, headertuple, prevnode):
533 def _deltaheader(self, headertuple, prevnode):
533 node, p1, p2, deltabase, cs, flags = headertuple
534 node, p1, p2, deltabase, cs, flags = headertuple
534 return node, p1, p2, deltabase, cs, flags
535 return node, p1, p2, deltabase, cs, flags
535
536
536 def _unpackmanifests(self, repo, revmap, trp, prog):
537 def _unpackmanifests(self, repo, revmap, trp, prog):
537 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
538 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
538 for chunkdata in iter(self.filelogheader, {}):
539 for chunkdata in iter(self.filelogheader, {}):
539 # If we get here, there are directory manifests in the changegroup
540 # If we get here, there are directory manifests in the changegroup
540 d = chunkdata[b"filename"]
541 d = chunkdata[b"filename"]
541 repo.ui.debug(b"adding %s revisions\n" % d)
542 repo.ui.debug(b"adding %s revisions\n" % d)
542 deltas = self.deltaiter()
543 deltas = self.deltaiter()
543 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
544 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
544 raise error.Abort(_(b"received dir revlog group is empty"))
545 raise error.Abort(_(b"received dir revlog group is empty"))
545
546
546
547
547 class headerlessfixup(object):
548 class headerlessfixup(object):
548 def __init__(self, fh, h):
549 def __init__(self, fh, h):
549 self._h = h
550 self._h = h
550 self._fh = fh
551 self._fh = fh
551
552
552 def read(self, n):
553 def read(self, n):
553 if self._h:
554 if self._h:
554 d, self._h = self._h[:n], self._h[n:]
555 d, self._h = self._h[:n], self._h[n:]
555 if len(d) < n:
556 if len(d) < n:
556 d += readexactly(self._fh, n - len(d))
557 d += readexactly(self._fh, n - len(d))
557 return d
558 return d
558 return readexactly(self._fh, n)
559 return readexactly(self._fh, n)
559
560
560
561
561 def _revisiondeltatochunks(delta, headerfn):
562 def _revisiondeltatochunks(delta, headerfn):
562 """Serialize a revisiondelta to changegroup chunks."""
563 """Serialize a revisiondelta to changegroup chunks."""
563
564
564 # The captured revision delta may be encoded as a delta against
565 # The captured revision delta may be encoded as a delta against
565 # a base revision or as a full revision. The changegroup format
566 # a base revision or as a full revision. The changegroup format
566 # requires that everything on the wire be deltas. So for full
567 # requires that everything on the wire be deltas. So for full
567 # revisions, we need to invent a header that says to rewrite
568 # revisions, we need to invent a header that says to rewrite
568 # data.
569 # data.
569
570
570 if delta.delta is not None:
571 if delta.delta is not None:
571 prefix, data = b'', delta.delta
572 prefix, data = b'', delta.delta
572 elif delta.basenode == nullid:
573 elif delta.basenode == nullid:
573 data = delta.revision
574 data = delta.revision
574 prefix = mdiff.trivialdiffheader(len(data))
575 prefix = mdiff.trivialdiffheader(len(data))
575 else:
576 else:
576 data = delta.revision
577 data = delta.revision
577 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
578 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
578
579
579 meta = headerfn(delta)
580 meta = headerfn(delta)
580
581
581 yield chunkheader(len(meta) + len(prefix) + len(data))
582 yield chunkheader(len(meta) + len(prefix) + len(data))
582 yield meta
583 yield meta
583 if prefix:
584 if prefix:
584 yield prefix
585 yield prefix
585 yield data
586 yield data
586
587
587
588
588 def _sortnodesellipsis(store, nodes, cl, lookup):
589 def _sortnodesellipsis(store, nodes, cl, lookup):
589 """Sort nodes for changegroup generation."""
590 """Sort nodes for changegroup generation."""
590 # Ellipses serving mode.
591 # Ellipses serving mode.
591 #
592 #
592 # In a perfect world, we'd generate better ellipsis-ified graphs
593 # In a perfect world, we'd generate better ellipsis-ified graphs
593 # for non-changelog revlogs. In practice, we haven't started doing
594 # for non-changelog revlogs. In practice, we haven't started doing
594 # that yet, so the resulting DAGs for the manifestlog and filelogs
595 # that yet, so the resulting DAGs for the manifestlog and filelogs
595 # are actually full of bogus parentage on all the ellipsis
596 # are actually full of bogus parentage on all the ellipsis
596 # nodes. This has the side effect that, while the contents are
597 # nodes. This has the side effect that, while the contents are
597 # correct, the individual DAGs might be completely out of whack in
598 # correct, the individual DAGs might be completely out of whack in
598 # a case like 882681bc3166 and its ancestors (back about 10
599 # a case like 882681bc3166 and its ancestors (back about 10
599 # revisions or so) in the main hg repo.
600 # revisions or so) in the main hg repo.
600 #
601 #
601 # The one invariant we *know* holds is that the new (potentially
602 # The one invariant we *know* holds is that the new (potentially
602 # bogus) DAG shape will be valid if we order the nodes in the
603 # bogus) DAG shape will be valid if we order the nodes in the
603 # order that they're introduced in dramatis personae by the
604 # order that they're introduced in dramatis personae by the
604 # changelog, so what we do is we sort the non-changelog histories
605 # changelog, so what we do is we sort the non-changelog histories
605 # by the order in which they are used by the changelog.
606 # by the order in which they are used by the changelog.
606 key = lambda n: cl.rev(lookup(n))
607 key = lambda n: cl.rev(lookup(n))
607 return sorted(nodes, key=key)
608 return sorted(nodes, key=key)
608
609
609
610
610 def _resolvenarrowrevisioninfo(
611 def _resolvenarrowrevisioninfo(
611 cl,
612 cl,
612 store,
613 store,
613 ischangelog,
614 ischangelog,
614 rev,
615 rev,
615 linkrev,
616 linkrev,
616 linknode,
617 linknode,
617 clrevtolocalrev,
618 clrevtolocalrev,
618 fullclnodes,
619 fullclnodes,
619 precomputedellipsis,
620 precomputedellipsis,
620 ):
621 ):
621 linkparents = precomputedellipsis[linkrev]
622 linkparents = precomputedellipsis[linkrev]
622
623
623 def local(clrev):
624 def local(clrev):
624 """Turn a changelog revnum into a local revnum.
625 """Turn a changelog revnum into a local revnum.
625
626
626 The ellipsis dag is stored as revnums on the changelog,
627 The ellipsis dag is stored as revnums on the changelog,
627 but when we're producing ellipsis entries for
628 but when we're producing ellipsis entries for
628 non-changelog revlogs, we need to turn those numbers into
629 non-changelog revlogs, we need to turn those numbers into
629 something local. This does that for us, and during the
630 something local. This does that for us, and during the
630 changelog sending phase will also expand the stored
631 changelog sending phase will also expand the stored
631 mappings as needed.
632 mappings as needed.
632 """
633 """
633 if clrev == nullrev:
634 if clrev == nullrev:
634 return nullrev
635 return nullrev
635
636
636 if ischangelog:
637 if ischangelog:
637 return clrev
638 return clrev
638
639
639 # Walk the ellipsis-ized changelog breadth-first looking for a
640 # Walk the ellipsis-ized changelog breadth-first looking for a
640 # change that has been linked from the current revlog.
641 # change that has been linked from the current revlog.
641 #
642 #
642 # For a flat manifest revlog only a single step should be necessary
643 # For a flat manifest revlog only a single step should be necessary
643 # as all relevant changelog entries are relevant to the flat
644 # as all relevant changelog entries are relevant to the flat
644 # manifest.
645 # manifest.
645 #
646 #
646 # For a filelog or tree manifest dirlog however not every changelog
647 # For a filelog or tree manifest dirlog however not every changelog
647 # entry will have been relevant, so we need to skip some changelog
648 # entry will have been relevant, so we need to skip some changelog
648 # nodes even after ellipsis-izing.
649 # nodes even after ellipsis-izing.
649 walk = [clrev]
650 walk = [clrev]
650 while walk:
651 while walk:
651 p = walk[0]
652 p = walk[0]
652 walk = walk[1:]
653 walk = walk[1:]
653 if p in clrevtolocalrev:
654 if p in clrevtolocalrev:
654 return clrevtolocalrev[p]
655 return clrevtolocalrev[p]
655 elif p in fullclnodes:
656 elif p in fullclnodes:
656 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
657 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
657 elif p in precomputedellipsis:
658 elif p in precomputedellipsis:
658 walk.extend(
659 walk.extend(
659 [pp for pp in precomputedellipsis[p] if pp != nullrev]
660 [pp for pp in precomputedellipsis[p] if pp != nullrev]
660 )
661 )
661 else:
662 else:
662 # In this case, we've got an ellipsis with parents
663 # In this case, we've got an ellipsis with parents
663 # outside the current bundle (likely an
664 # outside the current bundle (likely an
664 # incremental pull). We "know" that we can use the
665 # incremental pull). We "know" that we can use the
665 # value of this same revlog at whatever revision
666 # value of this same revlog at whatever revision
666 # is pointed to by linknode. "Know" is in scare
667 # is pointed to by linknode. "Know" is in scare
667 # quotes because I haven't done enough examination
668 # quotes because I haven't done enough examination
668 # of edge cases to convince myself this is really
669 # of edge cases to convince myself this is really
669 # a fact - it works for all the (admittedly
670 # a fact - it works for all the (admittedly
670 # thorough) cases in our testsuite, but I would be
671 # thorough) cases in our testsuite, but I would be
671 # somewhat unsurprised to find a case in the wild
672 # somewhat unsurprised to find a case in the wild
672 # where this breaks down a bit. That said, I don't
673 # where this breaks down a bit. That said, I don't
673 # know if it would hurt anything.
674 # know if it would hurt anything.
674 for i in pycompat.xrange(rev, 0, -1):
675 for i in pycompat.xrange(rev, 0, -1):
675 if store.linkrev(i) == clrev:
676 if store.linkrev(i) == clrev:
676 return i
677 return i
677 # We failed to resolve a parent for this node, so
678 # We failed to resolve a parent for this node, so
678 # we crash the changegroup construction.
679 # we crash the changegroup construction.
679 raise error.Abort(
680 raise error.Abort(
680 b'unable to resolve parent while packing %r %r'
681 b'unable to resolve parent while packing %r %r'
681 b' for changeset %r' % (store.indexfile, rev, clrev)
682 b' for changeset %r' % (store.indexfile, rev, clrev)
682 )
683 )
683
684
684 return nullrev
685 return nullrev
685
686
686 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
687 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
687 p1, p2 = nullrev, nullrev
688 p1, p2 = nullrev, nullrev
688 elif len(linkparents) == 1:
689 elif len(linkparents) == 1:
689 (p1,) = sorted(local(p) for p in linkparents)
690 (p1,) = sorted(local(p) for p in linkparents)
690 p2 = nullrev
691 p2 = nullrev
691 else:
692 else:
692 p1, p2 = sorted(local(p) for p in linkparents)
693 p1, p2 = sorted(local(p) for p in linkparents)
693
694
694 p1node, p2node = store.node(p1), store.node(p2)
695 p1node, p2node = store.node(p1), store.node(p2)
695
696
696 return p1node, p2node, linknode
697 return p1node, p2node, linknode
697
698
698
699
699 def deltagroup(
700 def deltagroup(
700 repo,
701 repo,
701 store,
702 store,
702 nodes,
703 nodes,
703 ischangelog,
704 ischangelog,
704 lookup,
705 lookup,
705 forcedeltaparentprev,
706 forcedeltaparentprev,
706 topic=None,
707 topic=None,
707 ellipses=False,
708 ellipses=False,
708 clrevtolocalrev=None,
709 clrevtolocalrev=None,
709 fullclnodes=None,
710 fullclnodes=None,
710 precomputedellipsis=None,
711 precomputedellipsis=None,
711 ):
712 ):
712 """Calculate deltas for a set of revisions.
713 """Calculate deltas for a set of revisions.
713
714
714 Is a generator of ``revisiondelta`` instances.
715 Is a generator of ``revisiondelta`` instances.
715
716
716 If topic is not None, progress detail will be generated using this
717 If topic is not None, progress detail will be generated using this
717 topic name (e.g. changesets, manifests, etc).
718 topic name (e.g. changesets, manifests, etc).
718 """
719 """
719 if not nodes:
720 if not nodes:
720 return
721 return
721
722
722 cl = repo.changelog
723 cl = repo.changelog
723
724
724 if ischangelog:
725 if ischangelog:
725 # `hg log` shows changesets in storage order. To preserve order
726 # `hg log` shows changesets in storage order. To preserve order
726 # across clones, send out changesets in storage order.
727 # across clones, send out changesets in storage order.
727 nodesorder = b'storage'
728 nodesorder = b'storage'
728 elif ellipses:
729 elif ellipses:
729 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
730 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
730 nodesorder = b'nodes'
731 nodesorder = b'nodes'
731 else:
732 else:
732 nodesorder = None
733 nodesorder = None
733
734
734 # Perform ellipses filtering and revision massaging. We do this before
735 # Perform ellipses filtering and revision massaging. We do this before
735 # emitrevisions() because a) filtering out revisions creates less work
736 # emitrevisions() because a) filtering out revisions creates less work
736 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
737 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
737 # assumptions about delta choices and we would possibly send a delta
738 # assumptions about delta choices and we would possibly send a delta
738 # referencing a missing base revision.
739 # referencing a missing base revision.
739 #
740 #
740 # Also, calling lookup() has side-effects with regards to populating
741 # Also, calling lookup() has side-effects with regards to populating
741 # data structures. If we don't call lookup() for each node or if we call
742 # data structures. If we don't call lookup() for each node or if we call
742 # lookup() after the first pass through each node, things can break -
743 # lookup() after the first pass through each node, things can break -
743 # possibly intermittently depending on the python hash seed! For that
744 # possibly intermittently depending on the python hash seed! For that
744 # reason, we store a mapping of all linknodes during the initial node
745 # reason, we store a mapping of all linknodes during the initial node
745 # pass rather than use lookup() on the output side.
746 # pass rather than use lookup() on the output side.
746 if ellipses:
747 if ellipses:
747 filtered = []
748 filtered = []
748 adjustedparents = {}
749 adjustedparents = {}
749 linknodes = {}
750 linknodes = {}
750
751
751 for node in nodes:
752 for node in nodes:
752 rev = store.rev(node)
753 rev = store.rev(node)
753 linknode = lookup(node)
754 linknode = lookup(node)
754 linkrev = cl.rev(linknode)
755 linkrev = cl.rev(linknode)
755 clrevtolocalrev[linkrev] = rev
756 clrevtolocalrev[linkrev] = rev
756
757
757 # If linknode is in fullclnodes, it means the corresponding
758 # If linknode is in fullclnodes, it means the corresponding
758 # changeset was a full changeset and is being sent unaltered.
759 # changeset was a full changeset and is being sent unaltered.
759 if linknode in fullclnodes:
760 if linknode in fullclnodes:
760 linknodes[node] = linknode
761 linknodes[node] = linknode
761
762
762 # If the corresponding changeset wasn't in the set computed
763 # If the corresponding changeset wasn't in the set computed
763 # as relevant to us, it should be dropped outright.
764 # as relevant to us, it should be dropped outright.
764 elif linkrev not in precomputedellipsis:
765 elif linkrev not in precomputedellipsis:
765 continue
766 continue
766
767
767 else:
768 else:
768 # We could probably do this later and avoid the dict
769 # We could probably do this later and avoid the dict
769 # holding state. But it likely doesn't matter.
770 # holding state. But it likely doesn't matter.
770 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
771 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
771 cl,
772 cl,
772 store,
773 store,
773 ischangelog,
774 ischangelog,
774 rev,
775 rev,
775 linkrev,
776 linkrev,
776 linknode,
777 linknode,
777 clrevtolocalrev,
778 clrevtolocalrev,
778 fullclnodes,
779 fullclnodes,
779 precomputedellipsis,
780 precomputedellipsis,
780 )
781 )
781
782
782 adjustedparents[node] = (p1node, p2node)
783 adjustedparents[node] = (p1node, p2node)
783 linknodes[node] = linknode
784 linknodes[node] = linknode
784
785
785 filtered.append(node)
786 filtered.append(node)
786
787
787 nodes = filtered
788 nodes = filtered
788
789
789 # We expect the first pass to be fast, so we only engage the progress
790 # We expect the first pass to be fast, so we only engage the progress
790 # meter for constructing the revision deltas.
791 # meter for constructing the revision deltas.
791 progress = None
792 progress = None
792 if topic is not None:
793 if topic is not None:
793 progress = repo.ui.makeprogress(
794 progress = repo.ui.makeprogress(
794 topic, unit=_(b'chunks'), total=len(nodes)
795 topic, unit=_(b'chunks'), total=len(nodes)
795 )
796 )
796
797
797 configtarget = repo.ui.config(b'devel', b'bundle.delta')
798 configtarget = repo.ui.config(b'devel', b'bundle.delta')
798 if configtarget not in (b'', b'p1', b'full'):
799 if configtarget not in (b'', b'p1', b'full'):
799 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
800 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
800 repo.ui.warn(msg % configtarget)
801 repo.ui.warn(msg % configtarget)
801
802
802 deltamode = repository.CG_DELTAMODE_STD
803 deltamode = repository.CG_DELTAMODE_STD
803 if forcedeltaparentprev:
804 if forcedeltaparentprev:
804 deltamode = repository.CG_DELTAMODE_PREV
805 deltamode = repository.CG_DELTAMODE_PREV
805 elif configtarget == b'p1':
806 elif configtarget == b'p1':
806 deltamode = repository.CG_DELTAMODE_P1
807 deltamode = repository.CG_DELTAMODE_P1
807 elif configtarget == b'full':
808 elif configtarget == b'full':
808 deltamode = repository.CG_DELTAMODE_FULL
809 deltamode = repository.CG_DELTAMODE_FULL
809
810
810 revisions = store.emitrevisions(
811 revisions = store.emitrevisions(
811 nodes,
812 nodes,
812 nodesorder=nodesorder,
813 nodesorder=nodesorder,
813 revisiondata=True,
814 revisiondata=True,
814 assumehaveparentrevisions=not ellipses,
815 assumehaveparentrevisions=not ellipses,
815 deltamode=deltamode,
816 deltamode=deltamode,
816 )
817 )
817
818
818 for i, revision in enumerate(revisions):
819 for i, revision in enumerate(revisions):
819 if progress:
820 if progress:
820 progress.update(i + 1)
821 progress.update(i + 1)
821
822
822 if ellipses:
823 if ellipses:
823 linknode = linknodes[revision.node]
824 linknode = linknodes[revision.node]
824
825
825 if revision.node in adjustedparents:
826 if revision.node in adjustedparents:
826 p1node, p2node = adjustedparents[revision.node]
827 p1node, p2node = adjustedparents[revision.node]
827 revision.p1node = p1node
828 revision.p1node = p1node
828 revision.p2node = p2node
829 revision.p2node = p2node
829 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
830 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
830
831
831 else:
832 else:
832 linknode = lookup(revision.node)
833 linknode = lookup(revision.node)
833
834
834 revision.linknode = linknode
835 revision.linknode = linknode
835 yield revision
836 yield revision
836
837
837 if progress:
838 if progress:
838 progress.complete()
839 progress.complete()
839
840
840
841
841 class cgpacker(object):
842 class cgpacker(object):
842 def __init__(
843 def __init__(
843 self,
844 self,
844 repo,
845 repo,
845 oldmatcher,
846 oldmatcher,
846 matcher,
847 matcher,
847 version,
848 version,
848 builddeltaheader,
849 builddeltaheader,
849 manifestsend,
850 manifestsend,
850 forcedeltaparentprev=False,
851 forcedeltaparentprev=False,
851 bundlecaps=None,
852 bundlecaps=None,
852 ellipses=False,
853 ellipses=False,
853 shallow=False,
854 shallow=False,
854 ellipsisroots=None,
855 ellipsisroots=None,
855 fullnodes=None,
856 fullnodes=None,
856 ):
857 ):
857 """Given a source repo, construct a bundler.
858 """Given a source repo, construct a bundler.
858
859
859 oldmatcher is a matcher that matches on files the client already has.
860 oldmatcher is a matcher that matches on files the client already has.
860 These will not be included in the changegroup.
861 These will not be included in the changegroup.
861
862
862 matcher is a matcher that matches on files to include in the
863 matcher is a matcher that matches on files to include in the
863 changegroup. Used to facilitate sparse changegroups.
864 changegroup. Used to facilitate sparse changegroups.
864
865
865 forcedeltaparentprev indicates whether delta parents must be against
866 forcedeltaparentprev indicates whether delta parents must be against
866 the previous revision in a delta group. This should only be used for
867 the previous revision in a delta group. This should only be used for
867 compatibility with changegroup version 1.
868 compatibility with changegroup version 1.
868
869
869 builddeltaheader is a callable that constructs the header for a group
870 builddeltaheader is a callable that constructs the header for a group
870 delta.
871 delta.
871
872
872 manifestsend is a chunk to send after manifests have been fully emitted.
873 manifestsend is a chunk to send after manifests have been fully emitted.
873
874
874 ellipses indicates whether ellipsis serving mode is enabled.
875 ellipses indicates whether ellipsis serving mode is enabled.
875
876
876 bundlecaps is optional and can be used to specify the set of
877 bundlecaps is optional and can be used to specify the set of
877 capabilities which can be used to build the bundle. While bundlecaps is
878 capabilities which can be used to build the bundle. While bundlecaps is
878 unused in core Mercurial, extensions rely on this feature to communicate
879 unused in core Mercurial, extensions rely on this feature to communicate
879 capabilities to customize the changegroup packer.
880 capabilities to customize the changegroup packer.
880
881
881 shallow indicates whether shallow data might be sent. The packer may
882 shallow indicates whether shallow data might be sent. The packer may
882 need to pack file contents not introduced by the changes being packed.
883 need to pack file contents not introduced by the changes being packed.
883
884
884 fullnodes is the set of changelog nodes which should not be ellipsis
885 fullnodes is the set of changelog nodes which should not be ellipsis
885 nodes. We store this rather than the set of nodes that should be
886 nodes. We store this rather than the set of nodes that should be
886 ellipsis because for very large histories we expect this to be
887 ellipsis because for very large histories we expect this to be
887 significantly smaller.
888 significantly smaller.
888 """
889 """
889 assert oldmatcher
890 assert oldmatcher
890 assert matcher
891 assert matcher
891 self._oldmatcher = oldmatcher
892 self._oldmatcher = oldmatcher
892 self._matcher = matcher
893 self._matcher = matcher
893
894
894 self.version = version
895 self.version = version
895 self._forcedeltaparentprev = forcedeltaparentprev
896 self._forcedeltaparentprev = forcedeltaparentprev
896 self._builddeltaheader = builddeltaheader
897 self._builddeltaheader = builddeltaheader
897 self._manifestsend = manifestsend
898 self._manifestsend = manifestsend
898 self._ellipses = ellipses
899 self._ellipses = ellipses
899
900
900 # Set of capabilities we can use to build the bundle.
901 # Set of capabilities we can use to build the bundle.
901 if bundlecaps is None:
902 if bundlecaps is None:
902 bundlecaps = set()
903 bundlecaps = set()
903 self._bundlecaps = bundlecaps
904 self._bundlecaps = bundlecaps
904 self._isshallow = shallow
905 self._isshallow = shallow
905 self._fullclnodes = fullnodes
906 self._fullclnodes = fullnodes
906
907
907 # Maps ellipsis revs to their roots at the changelog level.
908 # Maps ellipsis revs to their roots at the changelog level.
908 self._precomputedellipsis = ellipsisroots
909 self._precomputedellipsis = ellipsisroots
909
910
910 self._repo = repo
911 self._repo = repo
911
912
912 if self._repo.ui.verbose and not self._repo.ui.debugflag:
913 if self._repo.ui.verbose and not self._repo.ui.debugflag:
913 self._verbosenote = self._repo.ui.note
914 self._verbosenote = self._repo.ui.note
914 else:
915 else:
915 self._verbosenote = lambda s: None
916 self._verbosenote = lambda s: None
916
917
917 def generate(
918 def generate(
918 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
919 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
919 ):
920 ):
920 """Yield a sequence of changegroup byte chunks.
921 """Yield a sequence of changegroup byte chunks.
921 If changelog is False, changelog data won't be added to changegroup
922 If changelog is False, changelog data won't be added to changegroup
922 """
923 """
923
924
924 repo = self._repo
925 repo = self._repo
925 cl = repo.changelog
926 cl = repo.changelog
926
927
927 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
928 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
928 size = 0
929 size = 0
929
930
930 clstate, deltas = self._generatechangelog(
931 clstate, deltas = self._generatechangelog(
931 cl, clnodes, generate=changelog
932 cl, clnodes, generate=changelog
932 )
933 )
933 for delta in deltas:
934 for delta in deltas:
934 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
935 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
935 size += len(chunk)
936 size += len(chunk)
936 yield chunk
937 yield chunk
937
938
938 close = closechunk()
939 close = closechunk()
939 size += len(close)
940 size += len(close)
940 yield closechunk()
941 yield closechunk()
941
942
942 self._verbosenote(_(b'%8.i (changelog)\n') % size)
943 self._verbosenote(_(b'%8.i (changelog)\n') % size)
943
944
944 clrevorder = clstate[b'clrevorder']
945 clrevorder = clstate[b'clrevorder']
945 manifests = clstate[b'manifests']
946 manifests = clstate[b'manifests']
946 changedfiles = clstate[b'changedfiles']
947 changedfiles = clstate[b'changedfiles']
947
948
948 # We need to make sure that the linkrev in the changegroup refers to
949 # We need to make sure that the linkrev in the changegroup refers to
949 # the first changeset that introduced the manifest or file revision.
950 # the first changeset that introduced the manifest or file revision.
950 # The fastpath is usually safer than the slowpath, because the filelogs
951 # The fastpath is usually safer than the slowpath, because the filelogs
951 # are walked in revlog order.
952 # are walked in revlog order.
952 #
953 #
953 # When taking the slowpath when the manifest revlog uses generaldelta,
954 # When taking the slowpath when the manifest revlog uses generaldelta,
954 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
955 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
955 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
956 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
956 #
957 #
957 # When taking the fastpath, we are only vulnerable to reordering
958 # When taking the fastpath, we are only vulnerable to reordering
958 # of the changelog itself. The changelog never uses generaldelta and is
959 # of the changelog itself. The changelog never uses generaldelta and is
959 # never reordered. To handle this case, we simply take the slowpath,
960 # never reordered. To handle this case, we simply take the slowpath,
960 # which already has the 'clrevorder' logic. This was also fixed in
961 # which already has the 'clrevorder' logic. This was also fixed in
961 # cc0ff93d0c0c.
962 # cc0ff93d0c0c.
962
963
963 # Treemanifests don't work correctly with fastpathlinkrev
964 # Treemanifests don't work correctly with fastpathlinkrev
964 # either, because we don't discover which directory nodes to
965 # either, because we don't discover which directory nodes to
965 # send along with files. This could probably be fixed.
966 # send along with files. This could probably be fixed.
966 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
967 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
967
968
968 fnodes = {} # needed file nodes
969 fnodes = {} # needed file nodes
969
970
970 size = 0
971 size = 0
971 it = self.generatemanifests(
972 it = self.generatemanifests(
972 commonrevs,
973 commonrevs,
973 clrevorder,
974 clrevorder,
974 fastpathlinkrev,
975 fastpathlinkrev,
975 manifests,
976 manifests,
976 fnodes,
977 fnodes,
977 source,
978 source,
978 clstate[b'clrevtomanifestrev'],
979 clstate[b'clrevtomanifestrev'],
979 )
980 )
980
981
981 for tree, deltas in it:
982 for tree, deltas in it:
982 if tree:
983 if tree:
983 assert self.version == b'03'
984 assert self.version == b'03'
984 chunk = _fileheader(tree)
985 chunk = _fileheader(tree)
985 size += len(chunk)
986 size += len(chunk)
986 yield chunk
987 yield chunk
987
988
988 for delta in deltas:
989 for delta in deltas:
989 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
990 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
990 for chunk in chunks:
991 for chunk in chunks:
991 size += len(chunk)
992 size += len(chunk)
992 yield chunk
993 yield chunk
993
994
994 close = closechunk()
995 close = closechunk()
995 size += len(close)
996 size += len(close)
996 yield close
997 yield close
997
998
998 self._verbosenote(_(b'%8.i (manifests)\n') % size)
999 self._verbosenote(_(b'%8.i (manifests)\n') % size)
999 yield self._manifestsend
1000 yield self._manifestsend
1000
1001
1001 mfdicts = None
1002 mfdicts = None
1002 if self._ellipses and self._isshallow:
1003 if self._ellipses and self._isshallow:
1003 mfdicts = [
1004 mfdicts = [
1004 (self._repo.manifestlog[n].read(), lr)
1005 (self._repo.manifestlog[n].read(), lr)
1005 for (n, lr) in pycompat.iteritems(manifests)
1006 for (n, lr) in pycompat.iteritems(manifests)
1006 ]
1007 ]
1007
1008
1008 manifests.clear()
1009 manifests.clear()
1009 clrevs = {cl.rev(x) for x in clnodes}
1010 clrevs = {cl.rev(x) for x in clnodes}
1010
1011
1011 it = self.generatefiles(
1012 it = self.generatefiles(
1012 changedfiles,
1013 changedfiles,
1013 commonrevs,
1014 commonrevs,
1014 source,
1015 source,
1015 mfdicts,
1016 mfdicts,
1016 fastpathlinkrev,
1017 fastpathlinkrev,
1017 fnodes,
1018 fnodes,
1018 clrevs,
1019 clrevs,
1019 )
1020 )
1020
1021
1021 for path, deltas in it:
1022 for path, deltas in it:
1022 h = _fileheader(path)
1023 h = _fileheader(path)
1023 size = len(h)
1024 size = len(h)
1024 yield h
1025 yield h
1025
1026
1026 for delta in deltas:
1027 for delta in deltas:
1027 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1028 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1028 for chunk in chunks:
1029 for chunk in chunks:
1029 size += len(chunk)
1030 size += len(chunk)
1030 yield chunk
1031 yield chunk
1031
1032
1032 close = closechunk()
1033 close = closechunk()
1033 size += len(close)
1034 size += len(close)
1034 yield close
1035 yield close
1035
1036
1036 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1037 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1037
1038
1038 yield closechunk()
1039 yield closechunk()
1039
1040
1040 if clnodes:
1041 if clnodes:
1041 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1042 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1042
1043
1043 def _generatechangelog(self, cl, nodes, generate=True):
1044 def _generatechangelog(self, cl, nodes, generate=True):
1044 """Generate data for changelog chunks.
1045 """Generate data for changelog chunks.
1045
1046
1046 Returns a 2-tuple of a dict containing state and an iterable of
1047 Returns a 2-tuple of a dict containing state and an iterable of
1047 byte chunks. The state will not be fully populated until the
1048 byte chunks. The state will not be fully populated until the
1048 chunk stream has been fully consumed.
1049 chunk stream has been fully consumed.
1049
1050
1050 if generate is False, the state will be fully populated and no chunk
1051 if generate is False, the state will be fully populated and no chunk
1051 stream will be yielded
1052 stream will be yielded
1052 """
1053 """
1053 clrevorder = {}
1054 clrevorder = {}
1054 manifests = {}
1055 manifests = {}
1055 mfl = self._repo.manifestlog
1056 mfl = self._repo.manifestlog
1056 changedfiles = set()
1057 changedfiles = set()
1057 clrevtomanifestrev = {}
1058 clrevtomanifestrev = {}
1058
1059
1059 state = {
1060 state = {
1060 b'clrevorder': clrevorder,
1061 b'clrevorder': clrevorder,
1061 b'manifests': manifests,
1062 b'manifests': manifests,
1062 b'changedfiles': changedfiles,
1063 b'changedfiles': changedfiles,
1063 b'clrevtomanifestrev': clrevtomanifestrev,
1064 b'clrevtomanifestrev': clrevtomanifestrev,
1064 }
1065 }
1065
1066
1066 if not (generate or self._ellipses):
1067 if not (generate or self._ellipses):
1067 # sort the nodes in storage order
1068 # sort the nodes in storage order
1068 nodes = sorted(nodes, key=cl.rev)
1069 nodes = sorted(nodes, key=cl.rev)
1069 for node in nodes:
1070 for node in nodes:
1070 c = cl.changelogrevision(node)
1071 c = cl.changelogrevision(node)
1071 clrevorder[node] = len(clrevorder)
1072 clrevorder[node] = len(clrevorder)
1072 # record the first changeset introducing this manifest version
1073 # record the first changeset introducing this manifest version
1073 manifests.setdefault(c.manifest, node)
1074 manifests.setdefault(c.manifest, node)
1074 # Record a complete list of potentially-changed files in
1075 # Record a complete list of potentially-changed files in
1075 # this manifest.
1076 # this manifest.
1076 changedfiles.update(c.files)
1077 changedfiles.update(c.files)
1077
1078
1078 return state, ()
1079 return state, ()
1079
1080
1080 # Callback for the changelog, used to collect changed files and
1081 # Callback for the changelog, used to collect changed files and
1081 # manifest nodes.
1082 # manifest nodes.
1082 # Returns the linkrev node (identity in the changelog case).
1083 # Returns the linkrev node (identity in the changelog case).
1083 def lookupcl(x):
1084 def lookupcl(x):
1084 c = cl.changelogrevision(x)
1085 c = cl.changelogrevision(x)
1085 clrevorder[x] = len(clrevorder)
1086 clrevorder[x] = len(clrevorder)
1086
1087
1087 if self._ellipses:
1088 if self._ellipses:
1088 # Only update manifests if x is going to be sent. Otherwise we
1089 # Only update manifests if x is going to be sent. Otherwise we
1089 # end up with bogus linkrevs specified for manifests and
1090 # end up with bogus linkrevs specified for manifests and
1090 # we skip some manifest nodes that we should otherwise
1091 # we skip some manifest nodes that we should otherwise
1091 # have sent.
1092 # have sent.
1092 if (
1093 if (
1093 x in self._fullclnodes
1094 x in self._fullclnodes
1094 or cl.rev(x) in self._precomputedellipsis
1095 or cl.rev(x) in self._precomputedellipsis
1095 ):
1096 ):
1096
1097
1097 manifestnode = c.manifest
1098 manifestnode = c.manifest
1098 # Record the first changeset introducing this manifest
1099 # Record the first changeset introducing this manifest
1099 # version.
1100 # version.
1100 manifests.setdefault(manifestnode, x)
1101 manifests.setdefault(manifestnode, x)
1101 # Set this narrow-specific dict so we have the lowest
1102 # Set this narrow-specific dict so we have the lowest
1102 # manifest revnum to look up for this cl revnum. (Part of
1103 # manifest revnum to look up for this cl revnum. (Part of
1103 # mapping changelog ellipsis parents to manifest ellipsis
1104 # mapping changelog ellipsis parents to manifest ellipsis
1104 # parents)
1105 # parents)
1105 clrevtomanifestrev.setdefault(
1106 clrevtomanifestrev.setdefault(
1106 cl.rev(x), mfl.rev(manifestnode)
1107 cl.rev(x), mfl.rev(manifestnode)
1107 )
1108 )
1108 # We can't trust the changed files list in the changeset if the
1109 # We can't trust the changed files list in the changeset if the
1109 # client requested a shallow clone.
1110 # client requested a shallow clone.
1110 if self._isshallow:
1111 if self._isshallow:
1111 changedfiles.update(mfl[c.manifest].read().keys())
1112 changedfiles.update(mfl[c.manifest].read().keys())
1112 else:
1113 else:
1113 changedfiles.update(c.files)
1114 changedfiles.update(c.files)
1114 else:
1115 else:
1115 # record the first changeset introducing this manifest version
1116 # record the first changeset introducing this manifest version
1116 manifests.setdefault(c.manifest, x)
1117 manifests.setdefault(c.manifest, x)
1117 # Record a complete list of potentially-changed files in
1118 # Record a complete list of potentially-changed files in
1118 # this manifest.
1119 # this manifest.
1119 changedfiles.update(c.files)
1120 changedfiles.update(c.files)
1120
1121
1121 return x
1122 return x
1122
1123
1123 gen = deltagroup(
1124 gen = deltagroup(
1124 self._repo,
1125 self._repo,
1125 cl,
1126 cl,
1126 nodes,
1127 nodes,
1127 True,
1128 True,
1128 lookupcl,
1129 lookupcl,
1129 self._forcedeltaparentprev,
1130 self._forcedeltaparentprev,
1130 ellipses=self._ellipses,
1131 ellipses=self._ellipses,
1131 topic=_(b'changesets'),
1132 topic=_(b'changesets'),
1132 clrevtolocalrev={},
1133 clrevtolocalrev={},
1133 fullclnodes=self._fullclnodes,
1134 fullclnodes=self._fullclnodes,
1134 precomputedellipsis=self._precomputedellipsis,
1135 precomputedellipsis=self._precomputedellipsis,
1135 )
1136 )
1136
1137
1137 return state, gen
1138 return state, gen
1138
1139
1139 def generatemanifests(
1140 def generatemanifests(
1140 self,
1141 self,
1141 commonrevs,
1142 commonrevs,
1142 clrevorder,
1143 clrevorder,
1143 fastpathlinkrev,
1144 fastpathlinkrev,
1144 manifests,
1145 manifests,
1145 fnodes,
1146 fnodes,
1146 source,
1147 source,
1147 clrevtolocalrev,
1148 clrevtolocalrev,
1148 ):
1149 ):
1149 """Returns an iterator of changegroup chunks containing manifests.
1150 """Returns an iterator of changegroup chunks containing manifests.
1150
1151
1151 `source` is unused here, but is used by extensions like remotefilelog to
1152 `source` is unused here, but is used by extensions like remotefilelog to
1152 change what is sent based in pulls vs pushes, etc.
1153 change what is sent based in pulls vs pushes, etc.
1153 """
1154 """
1154 repo = self._repo
1155 repo = self._repo
1155 mfl = repo.manifestlog
1156 mfl = repo.manifestlog
1156 tmfnodes = {b'': manifests}
1157 tmfnodes = {b'': manifests}
1157
1158
1158 # Callback for the manifest, used to collect linkrevs for filelog
1159 # Callback for the manifest, used to collect linkrevs for filelog
1159 # revisions.
1160 # revisions.
1160 # Returns the linkrev node (collected in lookupcl).
1161 # Returns the linkrev node (collected in lookupcl).
1161 def makelookupmflinknode(tree, nodes):
1162 def makelookupmflinknode(tree, nodes):
1162 if fastpathlinkrev:
1163 if fastpathlinkrev:
1163 assert not tree
1164 assert not tree
1164 return (
1165 return (
1165 manifests.__getitem__
1166 manifests.__getitem__
1166 ) # pytype: disable=unsupported-operands
1167 ) # pytype: disable=unsupported-operands
1167
1168
1168 def lookupmflinknode(x):
1169 def lookupmflinknode(x):
1169 """Callback for looking up the linknode for manifests.
1170 """Callback for looking up the linknode for manifests.
1170
1171
1171 Returns the linkrev node for the specified manifest.
1172 Returns the linkrev node for the specified manifest.
1172
1173
1173 SIDE EFFECT:
1174 SIDE EFFECT:
1174
1175
1175 1) fclnodes gets populated with the list of relevant
1176 1) fclnodes gets populated with the list of relevant
1176 file nodes if we're not using fastpathlinkrev
1177 file nodes if we're not using fastpathlinkrev
1177 2) When treemanifests are in use, collects treemanifest nodes
1178 2) When treemanifests are in use, collects treemanifest nodes
1178 to send
1179 to send
1179
1180
1180 Note that this means manifests must be completely sent to
1181 Note that this means manifests must be completely sent to
1181 the client before you can trust the list of files and
1182 the client before you can trust the list of files and
1182 treemanifests to send.
1183 treemanifests to send.
1183 """
1184 """
1184 clnode = nodes[x]
1185 clnode = nodes[x]
1185 mdata = mfl.get(tree, x).readfast(shallow=True)
1186 mdata = mfl.get(tree, x).readfast(shallow=True)
1186 for p, n, fl in mdata.iterentries():
1187 for p, n, fl in mdata.iterentries():
1187 if fl == b't': # subdirectory manifest
1188 if fl == b't': # subdirectory manifest
1188 subtree = tree + p + b'/'
1189 subtree = tree + p + b'/'
1189 tmfclnodes = tmfnodes.setdefault(subtree, {})
1190 tmfclnodes = tmfnodes.setdefault(subtree, {})
1190 tmfclnode = tmfclnodes.setdefault(n, clnode)
1191 tmfclnode = tmfclnodes.setdefault(n, clnode)
1191 if clrevorder[clnode] < clrevorder[tmfclnode]:
1192 if clrevorder[clnode] < clrevorder[tmfclnode]:
1192 tmfclnodes[n] = clnode
1193 tmfclnodes[n] = clnode
1193 else:
1194 else:
1194 f = tree + p
1195 f = tree + p
1195 fclnodes = fnodes.setdefault(f, {})
1196 fclnodes = fnodes.setdefault(f, {})
1196 fclnode = fclnodes.setdefault(n, clnode)
1197 fclnode = fclnodes.setdefault(n, clnode)
1197 if clrevorder[clnode] < clrevorder[fclnode]:
1198 if clrevorder[clnode] < clrevorder[fclnode]:
1198 fclnodes[n] = clnode
1199 fclnodes[n] = clnode
1199 return clnode
1200 return clnode
1200
1201
1201 return lookupmflinknode
1202 return lookupmflinknode
1202
1203
1203 while tmfnodes:
1204 while tmfnodes:
1204 tree, nodes = tmfnodes.popitem()
1205 tree, nodes = tmfnodes.popitem()
1205
1206
1206 should_visit = self._matcher.visitdir(tree[:-1])
1207 should_visit = self._matcher.visitdir(tree[:-1])
1207 if tree and not should_visit:
1208 if tree and not should_visit:
1208 continue
1209 continue
1209
1210
1210 store = mfl.getstorage(tree)
1211 store = mfl.getstorage(tree)
1211
1212
1212 if not should_visit:
1213 if not should_visit:
1213 # No nodes to send because this directory is out of
1214 # No nodes to send because this directory is out of
1214 # the client's view of the repository (probably
1215 # the client's view of the repository (probably
1215 # because of narrow clones). Do this even for the root
1216 # because of narrow clones). Do this even for the root
1216 # directory (tree=='')
1217 # directory (tree=='')
1217 prunednodes = []
1218 prunednodes = []
1218 else:
1219 else:
1219 # Avoid sending any manifest nodes we can prove the
1220 # Avoid sending any manifest nodes we can prove the
1220 # client already has by checking linkrevs. See the
1221 # client already has by checking linkrevs. See the
1221 # related comment in generatefiles().
1222 # related comment in generatefiles().
1222 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1223 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1223
1224
1224 if tree and not prunednodes:
1225 if tree and not prunednodes:
1225 continue
1226 continue
1226
1227
1227 lookupfn = makelookupmflinknode(tree, nodes)
1228 lookupfn = makelookupmflinknode(tree, nodes)
1228
1229
1229 deltas = deltagroup(
1230 deltas = deltagroup(
1230 self._repo,
1231 self._repo,
1231 store,
1232 store,
1232 prunednodes,
1233 prunednodes,
1233 False,
1234 False,
1234 lookupfn,
1235 lookupfn,
1235 self._forcedeltaparentprev,
1236 self._forcedeltaparentprev,
1236 ellipses=self._ellipses,
1237 ellipses=self._ellipses,
1237 topic=_(b'manifests'),
1238 topic=_(b'manifests'),
1238 clrevtolocalrev=clrevtolocalrev,
1239 clrevtolocalrev=clrevtolocalrev,
1239 fullclnodes=self._fullclnodes,
1240 fullclnodes=self._fullclnodes,
1240 precomputedellipsis=self._precomputedellipsis,
1241 precomputedellipsis=self._precomputedellipsis,
1241 )
1242 )
1242
1243
1243 if not self._oldmatcher.visitdir(store.tree[:-1]):
1244 if not self._oldmatcher.visitdir(store.tree[:-1]):
1244 yield tree, deltas
1245 yield tree, deltas
1245 else:
1246 else:
1246 # 'deltas' is a generator and we need to consume it even if
1247 # 'deltas' is a generator and we need to consume it even if
1247 # we are not going to send it because a side-effect is that
1248 # we are not going to send it because a side-effect is that
1248 # it updates tmdnodes (via lookupfn)
1249 # it updates tmdnodes (via lookupfn)
1249 for d in deltas:
1250 for d in deltas:
1250 pass
1251 pass
1251 if not tree:
1252 if not tree:
1252 yield tree, []
1253 yield tree, []
1253
1254
1254 def _prunemanifests(self, store, nodes, commonrevs):
1255 def _prunemanifests(self, store, nodes, commonrevs):
1255 if not self._ellipses:
1256 if not self._ellipses:
1256 # In non-ellipses case and large repositories, it is better to
1257 # In non-ellipses case and large repositories, it is better to
1257 # prevent calling of store.rev and store.linkrev on a lot of
1258 # prevent calling of store.rev and store.linkrev on a lot of
1258 # nodes as compared to sending some extra data
1259 # nodes as compared to sending some extra data
1259 return nodes.copy()
1260 return nodes.copy()
1260 # This is split out as a separate method to allow filtering
1261 # This is split out as a separate method to allow filtering
1261 # commonrevs in extension code.
1262 # commonrevs in extension code.
1262 #
1263 #
1263 # TODO(augie): this shouldn't be required, instead we should
1264 # TODO(augie): this shouldn't be required, instead we should
1264 # make filtering of revisions to send delegated to the store
1265 # make filtering of revisions to send delegated to the store
1265 # layer.
1266 # layer.
1266 frev, flr = store.rev, store.linkrev
1267 frev, flr = store.rev, store.linkrev
1267 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1268 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1268
1269
1269 # The 'source' parameter is useful for extensions
1270 # The 'source' parameter is useful for extensions
1270 def generatefiles(
1271 def generatefiles(
1271 self,
1272 self,
1272 changedfiles,
1273 changedfiles,
1273 commonrevs,
1274 commonrevs,
1274 source,
1275 source,
1275 mfdicts,
1276 mfdicts,
1276 fastpathlinkrev,
1277 fastpathlinkrev,
1277 fnodes,
1278 fnodes,
1278 clrevs,
1279 clrevs,
1279 ):
1280 ):
1280 changedfiles = [
1281 changedfiles = [
1281 f
1282 f
1282 for f in changedfiles
1283 for f in changedfiles
1283 if self._matcher(f) and not self._oldmatcher(f)
1284 if self._matcher(f) and not self._oldmatcher(f)
1284 ]
1285 ]
1285
1286
1286 if not fastpathlinkrev:
1287 if not fastpathlinkrev:
1287
1288
1288 def normallinknodes(unused, fname):
1289 def normallinknodes(unused, fname):
1289 return fnodes.get(fname, {})
1290 return fnodes.get(fname, {})
1290
1291
1291 else:
1292 else:
1292 cln = self._repo.changelog.node
1293 cln = self._repo.changelog.node
1293
1294
1294 def normallinknodes(store, fname):
1295 def normallinknodes(store, fname):
1295 flinkrev = store.linkrev
1296 flinkrev = store.linkrev
1296 fnode = store.node
1297 fnode = store.node
1297 revs = ((r, flinkrev(r)) for r in store)
1298 revs = ((r, flinkrev(r)) for r in store)
1298 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1299 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1299
1300
1300 clrevtolocalrev = {}
1301 clrevtolocalrev = {}
1301
1302
1302 if self._isshallow:
1303 if self._isshallow:
1303 # In a shallow clone, the linknodes callback needs to also include
1304 # In a shallow clone, the linknodes callback needs to also include
1304 # those file nodes that are in the manifests we sent but weren't
1305 # those file nodes that are in the manifests we sent but weren't
1305 # introduced by those manifests.
1306 # introduced by those manifests.
1306 commonctxs = [self._repo[c] for c in commonrevs]
1307 commonctxs = [self._repo[c] for c in commonrevs]
1307 clrev = self._repo.changelog.rev
1308 clrev = self._repo.changelog.rev
1308
1309
1309 def linknodes(flog, fname):
1310 def linknodes(flog, fname):
1310 for c in commonctxs:
1311 for c in commonctxs:
1311 try:
1312 try:
1312 fnode = c.filenode(fname)
1313 fnode = c.filenode(fname)
1313 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1314 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1314 except error.ManifestLookupError:
1315 except error.ManifestLookupError:
1315 pass
1316 pass
1316 links = normallinknodes(flog, fname)
1317 links = normallinknodes(flog, fname)
1317 if len(links) != len(mfdicts):
1318 if len(links) != len(mfdicts):
1318 for mf, lr in mfdicts:
1319 for mf, lr in mfdicts:
1319 fnode = mf.get(fname, None)
1320 fnode = mf.get(fname, None)
1320 if fnode in links:
1321 if fnode in links:
1321 links[fnode] = min(links[fnode], lr, key=clrev)
1322 links[fnode] = min(links[fnode], lr, key=clrev)
1322 elif fnode:
1323 elif fnode:
1323 links[fnode] = lr
1324 links[fnode] = lr
1324 return links
1325 return links
1325
1326
1326 else:
1327 else:
1327 linknodes = normallinknodes
1328 linknodes = normallinknodes
1328
1329
1329 repo = self._repo
1330 repo = self._repo
1330 progress = repo.ui.makeprogress(
1331 progress = repo.ui.makeprogress(
1331 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1332 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1332 )
1333 )
1333 for i, fname in enumerate(sorted(changedfiles)):
1334 for i, fname in enumerate(sorted(changedfiles)):
1334 filerevlog = repo.file(fname)
1335 filerevlog = repo.file(fname)
1335 if not filerevlog:
1336 if not filerevlog:
1336 raise error.Abort(
1337 raise error.Abort(
1337 _(b"empty or missing file data for %s") % fname
1338 _(b"empty or missing file data for %s") % fname
1338 )
1339 )
1339
1340
1340 clrevtolocalrev.clear()
1341 clrevtolocalrev.clear()
1341
1342
1342 linkrevnodes = linknodes(filerevlog, fname)
1343 linkrevnodes = linknodes(filerevlog, fname)
1343 # Lookup for filenodes, we collected the linkrev nodes above in the
1344 # Lookup for filenodes, we collected the linkrev nodes above in the
1344 # fastpath case and with lookupmf in the slowpath case.
1345 # fastpath case and with lookupmf in the slowpath case.
1345 def lookupfilelog(x):
1346 def lookupfilelog(x):
1346 return linkrevnodes[x]
1347 return linkrevnodes[x]
1347
1348
1348 frev, flr = filerevlog.rev, filerevlog.linkrev
1349 frev, flr = filerevlog.rev, filerevlog.linkrev
1349 # Skip sending any filenode we know the client already
1350 # Skip sending any filenode we know the client already
1350 # has. This avoids over-sending files relatively
1351 # has. This avoids over-sending files relatively
1351 # inexpensively, so it's not a problem if we under-filter
1352 # inexpensively, so it's not a problem if we under-filter
1352 # here.
1353 # here.
1353 filenodes = [
1354 filenodes = [
1354 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1355 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1355 ]
1356 ]
1356
1357
1357 if not filenodes:
1358 if not filenodes:
1358 continue
1359 continue
1359
1360
1360 progress.update(i + 1, item=fname)
1361 progress.update(i + 1, item=fname)
1361
1362
1362 deltas = deltagroup(
1363 deltas = deltagroup(
1363 self._repo,
1364 self._repo,
1364 filerevlog,
1365 filerevlog,
1365 filenodes,
1366 filenodes,
1366 False,
1367 False,
1367 lookupfilelog,
1368 lookupfilelog,
1368 self._forcedeltaparentprev,
1369 self._forcedeltaparentprev,
1369 ellipses=self._ellipses,
1370 ellipses=self._ellipses,
1370 clrevtolocalrev=clrevtolocalrev,
1371 clrevtolocalrev=clrevtolocalrev,
1371 fullclnodes=self._fullclnodes,
1372 fullclnodes=self._fullclnodes,
1372 precomputedellipsis=self._precomputedellipsis,
1373 precomputedellipsis=self._precomputedellipsis,
1373 )
1374 )
1374
1375
1375 yield fname, deltas
1376 yield fname, deltas
1376
1377
1377 progress.complete()
1378 progress.complete()
1378
1379
1379
1380
1380 def _makecg1packer(
1381 def _makecg1packer(
1381 repo,
1382 repo,
1382 oldmatcher,
1383 oldmatcher,
1383 matcher,
1384 matcher,
1384 bundlecaps,
1385 bundlecaps,
1385 ellipses=False,
1386 ellipses=False,
1386 shallow=False,
1387 shallow=False,
1387 ellipsisroots=None,
1388 ellipsisroots=None,
1388 fullnodes=None,
1389 fullnodes=None,
1389 ):
1390 ):
1390 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1391 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1391 d.node, d.p1node, d.p2node, d.linknode
1392 d.node, d.p1node, d.p2node, d.linknode
1392 )
1393 )
1393
1394
1394 return cgpacker(
1395 return cgpacker(
1395 repo,
1396 repo,
1396 oldmatcher,
1397 oldmatcher,
1397 matcher,
1398 matcher,
1398 b'01',
1399 b'01',
1399 builddeltaheader=builddeltaheader,
1400 builddeltaheader=builddeltaheader,
1400 manifestsend=b'',
1401 manifestsend=b'',
1401 forcedeltaparentprev=True,
1402 forcedeltaparentprev=True,
1402 bundlecaps=bundlecaps,
1403 bundlecaps=bundlecaps,
1403 ellipses=ellipses,
1404 ellipses=ellipses,
1404 shallow=shallow,
1405 shallow=shallow,
1405 ellipsisroots=ellipsisroots,
1406 ellipsisroots=ellipsisroots,
1406 fullnodes=fullnodes,
1407 fullnodes=fullnodes,
1407 )
1408 )
1408
1409
1409
1410
1410 def _makecg2packer(
1411 def _makecg2packer(
1411 repo,
1412 repo,
1412 oldmatcher,
1413 oldmatcher,
1413 matcher,
1414 matcher,
1414 bundlecaps,
1415 bundlecaps,
1415 ellipses=False,
1416 ellipses=False,
1416 shallow=False,
1417 shallow=False,
1417 ellipsisroots=None,
1418 ellipsisroots=None,
1418 fullnodes=None,
1419 fullnodes=None,
1419 ):
1420 ):
1420 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1421 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1421 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1422 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1422 )
1423 )
1423
1424
1424 return cgpacker(
1425 return cgpacker(
1425 repo,
1426 repo,
1426 oldmatcher,
1427 oldmatcher,
1427 matcher,
1428 matcher,
1428 b'02',
1429 b'02',
1429 builddeltaheader=builddeltaheader,
1430 builddeltaheader=builddeltaheader,
1430 manifestsend=b'',
1431 manifestsend=b'',
1431 bundlecaps=bundlecaps,
1432 bundlecaps=bundlecaps,
1432 ellipses=ellipses,
1433 ellipses=ellipses,
1433 shallow=shallow,
1434 shallow=shallow,
1434 ellipsisroots=ellipsisroots,
1435 ellipsisroots=ellipsisroots,
1435 fullnodes=fullnodes,
1436 fullnodes=fullnodes,
1436 )
1437 )
1437
1438
1438
1439
1439 def _makecg3packer(
1440 def _makecg3packer(
1440 repo,
1441 repo,
1441 oldmatcher,
1442 oldmatcher,
1442 matcher,
1443 matcher,
1443 bundlecaps,
1444 bundlecaps,
1444 ellipses=False,
1445 ellipses=False,
1445 shallow=False,
1446 shallow=False,
1446 ellipsisroots=None,
1447 ellipsisroots=None,
1447 fullnodes=None,
1448 fullnodes=None,
1448 ):
1449 ):
1449 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1450 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1450 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1451 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1451 )
1452 )
1452
1453
1453 return cgpacker(
1454 return cgpacker(
1454 repo,
1455 repo,
1455 oldmatcher,
1456 oldmatcher,
1456 matcher,
1457 matcher,
1457 b'03',
1458 b'03',
1458 builddeltaheader=builddeltaheader,
1459 builddeltaheader=builddeltaheader,
1459 manifestsend=closechunk(),
1460 manifestsend=closechunk(),
1460 bundlecaps=bundlecaps,
1461 bundlecaps=bundlecaps,
1461 ellipses=ellipses,
1462 ellipses=ellipses,
1462 shallow=shallow,
1463 shallow=shallow,
1463 ellipsisroots=ellipsisroots,
1464 ellipsisroots=ellipsisroots,
1464 fullnodes=fullnodes,
1465 fullnodes=fullnodes,
1465 )
1466 )
1466
1467
1467
1468
1468 _packermap = {
1469 _packermap = {
1469 b'01': (_makecg1packer, cg1unpacker),
1470 b'01': (_makecg1packer, cg1unpacker),
1470 # cg2 adds support for exchanging generaldelta
1471 # cg2 adds support for exchanging generaldelta
1471 b'02': (_makecg2packer, cg2unpacker),
1472 b'02': (_makecg2packer, cg2unpacker),
1472 # cg3 adds support for exchanging revlog flags and treemanifests
1473 # cg3 adds support for exchanging revlog flags and treemanifests
1473 b'03': (_makecg3packer, cg3unpacker),
1474 b'03': (_makecg3packer, cg3unpacker),
1474 }
1475 }
1475
1476
1476
1477
1477 def allsupportedversions(repo):
1478 def allsupportedversions(repo):
1478 versions = set(_packermap.keys())
1479 versions = set(_packermap.keys())
1479 needv03 = False
1480 needv03 = False
1480 if (
1481 if (
1481 repo.ui.configbool(b'experimental', b'changegroup3')
1482 repo.ui.configbool(b'experimental', b'changegroup3')
1482 or repo.ui.configbool(b'experimental', b'treemanifest')
1483 or repo.ui.configbool(b'experimental', b'treemanifest')
1483 or scmutil.istreemanifest(repo)
1484 or scmutil.istreemanifest(repo)
1484 ):
1485 ):
1485 # we keep version 03 because we need to to exchange treemanifest data
1486 # we keep version 03 because we need to to exchange treemanifest data
1486 #
1487 #
1487 # we also keep vresion 01 and 02, because it is possible for repo to
1488 # we also keep vresion 01 and 02, because it is possible for repo to
1488 # contains both normal and tree manifest at the same time. so using
1489 # contains both normal and tree manifest at the same time. so using
1489 # older version to pull data is viable
1490 # older version to pull data is viable
1490 #
1491 #
1491 # (or even to push subset of history)
1492 # (or even to push subset of history)
1492 needv03 = True
1493 needv03 = True
1493 if b'exp-sidedata-flag' in repo.requirements:
1494 if b'exp-sidedata-flag' in repo.requirements:
1494 needv03 = True
1495 needv03 = True
1495 # don't attempt to use 01/02 until we do sidedata cleaning
1496 # don't attempt to use 01/02 until we do sidedata cleaning
1496 versions.discard(b'01')
1497 versions.discard(b'01')
1497 versions.discard(b'02')
1498 versions.discard(b'02')
1498 if not needv03:
1499 if not needv03:
1499 versions.discard(b'03')
1500 versions.discard(b'03')
1500 return versions
1501 return versions
1501
1502
1502
1503
1503 # Changegroup versions that can be applied to the repo
1504 # Changegroup versions that can be applied to the repo
1504 def supportedincomingversions(repo):
1505 def supportedincomingversions(repo):
1505 return allsupportedversions(repo)
1506 return allsupportedversions(repo)
1506
1507
1507
1508
1508 # Changegroup versions that can be created from the repo
1509 # Changegroup versions that can be created from the repo
1509 def supportedoutgoingversions(repo):
1510 def supportedoutgoingversions(repo):
1510 versions = allsupportedversions(repo)
1511 versions = allsupportedversions(repo)
1511 if scmutil.istreemanifest(repo):
1512 if scmutil.istreemanifest(repo):
1512 # Versions 01 and 02 support only flat manifests and it's just too
1513 # Versions 01 and 02 support only flat manifests and it's just too
1513 # expensive to convert between the flat manifest and tree manifest on
1514 # expensive to convert between the flat manifest and tree manifest on
1514 # the fly. Since tree manifests are hashed differently, all of history
1515 # the fly. Since tree manifests are hashed differently, all of history
1515 # would have to be converted. Instead, we simply don't even pretend to
1516 # would have to be converted. Instead, we simply don't even pretend to
1516 # support versions 01 and 02.
1517 # support versions 01 and 02.
1517 versions.discard(b'01')
1518 versions.discard(b'01')
1518 versions.discard(b'02')
1519 versions.discard(b'02')
1519 if requirements.NARROW_REQUIREMENT in repo.requirements:
1520 if requirements.NARROW_REQUIREMENT in repo.requirements:
1520 # Versions 01 and 02 don't support revlog flags, and we need to
1521 # Versions 01 and 02 don't support revlog flags, and we need to
1521 # support that for stripping and unbundling to work.
1522 # support that for stripping and unbundling to work.
1522 versions.discard(b'01')
1523 versions.discard(b'01')
1523 versions.discard(b'02')
1524 versions.discard(b'02')
1524 if LFS_REQUIREMENT in repo.requirements:
1525 if LFS_REQUIREMENT in repo.requirements:
1525 # Versions 01 and 02 don't support revlog flags, and we need to
1526 # Versions 01 and 02 don't support revlog flags, and we need to
1526 # mark LFS entries with REVIDX_EXTSTORED.
1527 # mark LFS entries with REVIDX_EXTSTORED.
1527 versions.discard(b'01')
1528 versions.discard(b'01')
1528 versions.discard(b'02')
1529 versions.discard(b'02')
1529
1530
1530 return versions
1531 return versions
1531
1532
1532
1533
1533 def localversion(repo):
1534 def localversion(repo):
1534 # Finds the best version to use for bundles that are meant to be used
1535 # Finds the best version to use for bundles that are meant to be used
1535 # locally, such as those from strip and shelve, and temporary bundles.
1536 # locally, such as those from strip and shelve, and temporary bundles.
1536 return max(supportedoutgoingversions(repo))
1537 return max(supportedoutgoingversions(repo))
1537
1538
1538
1539
1539 def safeversion(repo):
1540 def safeversion(repo):
1540 # Finds the smallest version that it's safe to assume clients of the repo
1541 # Finds the smallest version that it's safe to assume clients of the repo
1541 # will support. For example, all hg versions that support generaldelta also
1542 # will support. For example, all hg versions that support generaldelta also
1542 # support changegroup 02.
1543 # support changegroup 02.
1543 versions = supportedoutgoingversions(repo)
1544 versions = supportedoutgoingversions(repo)
1544 if b'generaldelta' in repo.requirements:
1545 if b'generaldelta' in repo.requirements:
1545 versions.discard(b'01')
1546 versions.discard(b'01')
1546 assert versions
1547 assert versions
1547 return min(versions)
1548 return min(versions)
1548
1549
1549
1550
1550 def getbundler(
1551 def getbundler(
1551 version,
1552 version,
1552 repo,
1553 repo,
1553 bundlecaps=None,
1554 bundlecaps=None,
1554 oldmatcher=None,
1555 oldmatcher=None,
1555 matcher=None,
1556 matcher=None,
1556 ellipses=False,
1557 ellipses=False,
1557 shallow=False,
1558 shallow=False,
1558 ellipsisroots=None,
1559 ellipsisroots=None,
1559 fullnodes=None,
1560 fullnodes=None,
1560 ):
1561 ):
1561 assert version in supportedoutgoingversions(repo)
1562 assert version in supportedoutgoingversions(repo)
1562
1563
1563 if matcher is None:
1564 if matcher is None:
1564 matcher = matchmod.always()
1565 matcher = matchmod.always()
1565 if oldmatcher is None:
1566 if oldmatcher is None:
1566 oldmatcher = matchmod.never()
1567 oldmatcher = matchmod.never()
1567
1568
1568 if version == b'01' and not matcher.always():
1569 if version == b'01' and not matcher.always():
1569 raise error.ProgrammingError(
1570 raise error.ProgrammingError(
1570 b'version 01 changegroups do not support sparse file matchers'
1571 b'version 01 changegroups do not support sparse file matchers'
1571 )
1572 )
1572
1573
1573 if ellipses and version in (b'01', b'02'):
1574 if ellipses and version in (b'01', b'02'):
1574 raise error.Abort(
1575 raise error.Abort(
1575 _(
1576 _(
1576 b'ellipsis nodes require at least cg3 on client and server, '
1577 b'ellipsis nodes require at least cg3 on client and server, '
1577 b'but negotiated version %s'
1578 b'but negotiated version %s'
1578 )
1579 )
1579 % version
1580 % version
1580 )
1581 )
1581
1582
1582 # Requested files could include files not in the local store. So
1583 # Requested files could include files not in the local store. So
1583 # filter those out.
1584 # filter those out.
1584 matcher = repo.narrowmatch(matcher)
1585 matcher = repo.narrowmatch(matcher)
1585
1586
1586 fn = _packermap[version][0]
1587 fn = _packermap[version][0]
1587 return fn(
1588 return fn(
1588 repo,
1589 repo,
1589 oldmatcher,
1590 oldmatcher,
1590 matcher,
1591 matcher,
1591 bundlecaps,
1592 bundlecaps,
1592 ellipses=ellipses,
1593 ellipses=ellipses,
1593 shallow=shallow,
1594 shallow=shallow,
1594 ellipsisroots=ellipsisroots,
1595 ellipsisroots=ellipsisroots,
1595 fullnodes=fullnodes,
1596 fullnodes=fullnodes,
1596 )
1597 )
1597
1598
1598
1599
1599 def getunbundler(version, fh, alg, extras=None):
1600 def getunbundler(version, fh, alg, extras=None):
1600 return _packermap[version][1](fh, alg, extras=extras)
1601 return _packermap[version][1](fh, alg, extras=extras)
1601
1602
1602
1603
1603 def _changegroupinfo(repo, nodes, source):
1604 def _changegroupinfo(repo, nodes, source):
1604 if repo.ui.verbose or source == b'bundle':
1605 if repo.ui.verbose or source == b'bundle':
1605 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1606 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1606 if repo.ui.debugflag:
1607 if repo.ui.debugflag:
1607 repo.ui.debug(b"list of changesets:\n")
1608 repo.ui.debug(b"list of changesets:\n")
1608 for node in nodes:
1609 for node in nodes:
1609 repo.ui.debug(b"%s\n" % hex(node))
1610 repo.ui.debug(b"%s\n" % hex(node))
1610
1611
1611
1612
1612 def makechangegroup(
1613 def makechangegroup(
1613 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1614 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1614 ):
1615 ):
1615 cgstream = makestream(
1616 cgstream = makestream(
1616 repo,
1617 repo,
1617 outgoing,
1618 outgoing,
1618 version,
1619 version,
1619 source,
1620 source,
1620 fastpath=fastpath,
1621 fastpath=fastpath,
1621 bundlecaps=bundlecaps,
1622 bundlecaps=bundlecaps,
1622 )
1623 )
1623 return getunbundler(
1624 return getunbundler(
1624 version,
1625 version,
1625 util.chunkbuffer(cgstream),
1626 util.chunkbuffer(cgstream),
1626 None,
1627 None,
1627 {b'clcount': len(outgoing.missing)},
1628 {b'clcount': len(outgoing.missing)},
1628 )
1629 )
1629
1630
1630
1631
1631 def makestream(
1632 def makestream(
1632 repo,
1633 repo,
1633 outgoing,
1634 outgoing,
1634 version,
1635 version,
1635 source,
1636 source,
1636 fastpath=False,
1637 fastpath=False,
1637 bundlecaps=None,
1638 bundlecaps=None,
1638 matcher=None,
1639 matcher=None,
1639 ):
1640 ):
1640 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1641 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1641
1642
1642 repo = repo.unfiltered()
1643 repo = repo.unfiltered()
1643 commonrevs = outgoing.common
1644 commonrevs = outgoing.common
1644 csets = outgoing.missing
1645 csets = outgoing.missing
1645 heads = outgoing.ancestorsof
1646 heads = outgoing.ancestorsof
1646 # We go through the fast path if we get told to, or if all (unfiltered
1647 # We go through the fast path if we get told to, or if all (unfiltered
1647 # heads have been requested (since we then know there all linkrevs will
1648 # heads have been requested (since we then know there all linkrevs will
1648 # be pulled by the client).
1649 # be pulled by the client).
1649 heads.sort()
1650 heads.sort()
1650 fastpathlinkrev = fastpath or (
1651 fastpathlinkrev = fastpath or (
1651 repo.filtername is None and heads == sorted(repo.heads())
1652 repo.filtername is None and heads == sorted(repo.heads())
1652 )
1653 )
1653
1654
1654 repo.hook(b'preoutgoing', throw=True, source=source)
1655 repo.hook(b'preoutgoing', throw=True, source=source)
1655 _changegroupinfo(repo, csets, source)
1656 _changegroupinfo(repo, csets, source)
1656 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1657 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1657
1658
1658
1659
1659 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1660 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1660 revisions = 0
1661 revisions = 0
1661 files = 0
1662 files = 0
1662 progress = repo.ui.makeprogress(
1663 progress = repo.ui.makeprogress(
1663 _(b'files'), unit=_(b'files'), total=expectedfiles
1664 _(b'files'), unit=_(b'files'), total=expectedfiles
1664 )
1665 )
1665 for chunkdata in iter(source.filelogheader, {}):
1666 for chunkdata in iter(source.filelogheader, {}):
1666 files += 1
1667 files += 1
1667 f = chunkdata[b"filename"]
1668 f = chunkdata[b"filename"]
1668 repo.ui.debug(b"adding %s revisions\n" % f)
1669 repo.ui.debug(b"adding %s revisions\n" % f)
1669 progress.increment()
1670 progress.increment()
1670 fl = repo.file(f)
1671 fl = repo.file(f)
1671 o = len(fl)
1672 o = len(fl)
1672 try:
1673 try:
1673 deltas = source.deltaiter()
1674 deltas = source.deltaiter()
1674 if not fl.addgroup(deltas, revmap, trp):
1675 if not fl.addgroup(deltas, revmap, trp):
1675 raise error.Abort(_(b"received file revlog group is empty"))
1676 raise error.Abort(_(b"received file revlog group is empty"))
1676 except error.CensoredBaseError as e:
1677 except error.CensoredBaseError as e:
1677 raise error.Abort(_(b"received delta base is censored: %s") % e)
1678 raise error.Abort(_(b"received delta base is censored: %s") % e)
1678 revisions += len(fl) - o
1679 revisions += len(fl) - o
1679 if f in needfiles:
1680 if f in needfiles:
1680 needs = needfiles[f]
1681 needs = needfiles[f]
1681 for new in pycompat.xrange(o, len(fl)):
1682 for new in pycompat.xrange(o, len(fl)):
1682 n = fl.node(new)
1683 n = fl.node(new)
1683 if n in needs:
1684 if n in needs:
1684 needs.remove(n)
1685 needs.remove(n)
1685 else:
1686 else:
1686 raise error.Abort(_(b"received spurious file revlog entry"))
1687 raise error.Abort(_(b"received spurious file revlog entry"))
1687 if not needs:
1688 if not needs:
1688 del needfiles[f]
1689 del needfiles[f]
1689 progress.complete()
1690 progress.complete()
1690
1691
1691 for f, needs in pycompat.iteritems(needfiles):
1692 for f, needs in pycompat.iteritems(needfiles):
1692 fl = repo.file(f)
1693 fl = repo.file(f)
1693 for n in needs:
1694 for n in needs:
1694 try:
1695 try:
1695 fl.rev(n)
1696 fl.rev(n)
1696 except error.LookupError:
1697 except error.LookupError:
1697 raise error.Abort(
1698 raise error.Abort(
1698 _(b'missing file data for %s:%s - run hg verify')
1699 _(b'missing file data for %s:%s - run hg verify')
1699 % (f, hex(n))
1700 % (f, hex(n))
1700 )
1701 )
1701
1702
1702 return revisions, files
1703 return revisions, files
@@ -1,932 +1,943 b''
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License version 2 or any later version.
10 of the GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phase' is an indicator that tells us how a changeset is
20 A 'changeset phase' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described
21 manipulated and communicated. The details of each phase is described
22 below, here we describe the properties they have in common.
22 below, here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not
24 Like bookmarks, phases are not stored in history and thus are not
25 permanent and leave no audit trail.
25 permanent and leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered,
27 First, no changeset can be in two phases at once. Phases are ordered,
28 so they can be considered from lowest to highest. The default, lowest
28 so they can be considered from lowest to highest. The default, lowest
29 phase is 'public' - this is the normal phase of existing changesets. A
29 phase is 'public' - this is the normal phase of existing changesets. A
30 child changeset can not be in a lower phase than its parents.
30 child changeset can not be in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 Local commits are draft by default.
39 Local commits are draft by default.
40
40
41 Phase Movement and Exchange
41 Phase Movement and Exchange
42 ===========================
42 ===========================
43
43
44 Phase data is exchanged by pushkey on pull and push. Some servers have
44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 a publish option set, we call such a server a "publishing server".
45 a publish option set, we call such a server a "publishing server".
46 Pushing a draft changeset to a publishing server changes the phase to
46 Pushing a draft changeset to a publishing server changes the phase to
47 public.
47 public.
48
48
49 A small list of fact/rules define the exchange of phase:
49 A small list of fact/rules define the exchange of phase:
50
50
51 * old client never changes server states
51 * old client never changes server states
52 * pull never changes server states
52 * pull never changes server states
53 * publish and old server changesets are seen as public by client
53 * publish and old server changesets are seen as public by client
54 * any secret changeset seen in another repository is lowered to at
54 * any secret changeset seen in another repository is lowered to at
55 least draft
55 least draft
56
56
57 Here is the final table summing up the 49 possible use cases of phase
57 Here is the final table summing up the 49 possible use cases of phase
58 exchange:
58 exchange:
59
59
60 server
60 server
61 old publish non-publish
61 old publish non-publish
62 N X N D P N D P
62 N X N D P N D P
63 old client
63 old client
64 pull
64 pull
65 N - X/X - X/D X/P - X/D X/P
65 N - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
67 push
67 push
68 X X/X X/X X/P X/P X/P X/D X/D X/P
68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 new client
69 new client
70 pull
70 pull
71 N - P/X - P/D P/P - D/D P/P
71 N - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
73 P - P/X - P/D P/P - P/D P/P
73 P - P/X - P/D P/P - P/D P/P
74 push
74 push
75 D P/X P/X P/P P/P P/P D/D D/D P/P
75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
77
77
78 Legend:
78 Legend:
79
79
80 A/B = final state on client / state on server
80 A/B = final state on client / state on server
81
81
82 * N = new/not present,
82 * N = new/not present,
83 * P = public,
83 * P = public,
84 * D = draft,
84 * D = draft,
85 * X = not tracked (i.e., the old client or server has no internal
85 * X = not tracked (i.e., the old client or server has no internal
86 way of recording the phase.)
86 way of recording the phase.)
87
87
88 passive = only pushes
88 passive = only pushes
89
89
90
90
91 A cell here can be read like this:
91 A cell here can be read like this:
92
92
93 "When a new client pushes a draft changeset (D) to a publishing
93 "When a new client pushes a draft changeset (D) to a publishing
94 server where it's not present (N), it's marked public on both
94 server where it's not present (N), it's marked public on both
95 sides (P/P)."
95 sides (P/P)."
96
96
97 Note: old client behave as a publishing server with draft only content
97 Note: old client behave as a publishing server with draft only content
98 - other people see it as public
98 - other people see it as public
99 - content is pushed as draft
99 - content is pushed as draft
100
100
101 """
101 """
102
102
103 from __future__ import absolute_import
103 from __future__ import absolute_import
104
104
105 import errno
105 import errno
106 import struct
106 import struct
107
107
108 from .i18n import _
108 from .i18n import _
109 from .node import (
109 from .node import (
110 bin,
110 bin,
111 hex,
111 hex,
112 nullid,
112 nullid,
113 nullrev,
113 nullrev,
114 short,
114 short,
115 wdirrev,
115 wdirrev,
116 )
116 )
117 from .pycompat import (
117 from .pycompat import (
118 getattr,
118 getattr,
119 setattr,
119 setattr,
120 )
120 )
121 from . import (
121 from . import (
122 error,
122 error,
123 pycompat,
123 pycompat,
124 requirements,
124 requirements,
125 smartset,
125 smartset,
126 txnutil,
126 txnutil,
127 util,
127 util,
128 )
128 )
129
129
130 _fphasesentry = struct.Struct(b'>i20s')
130 _fphasesentry = struct.Struct(b'>i20s')
131
131
132 # record phase index
132 # record phase index
133 public, draft, secret = range(3)
133 public, draft, secret = range(3)
134 archived = 32 # non-continuous for compatibility
134 archived = 32 # non-continuous for compatibility
135 internal = 96 # non-continuous for compatibility
135 internal = 96 # non-continuous for compatibility
136 allphases = (public, draft, secret, archived, internal)
136 allphases = (public, draft, secret, archived, internal)
137 trackedphases = (draft, secret, archived, internal)
137 trackedphases = (draft, secret, archived, internal)
138 # record phase names
138 # record phase names
139 cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command
139 cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command
140 phasenames = dict(enumerate(cmdphasenames))
140 phasenames = dict(enumerate(cmdphasenames))
141 phasenames[archived] = b'archived'
141 phasenames[archived] = b'archived'
142 phasenames[internal] = b'internal'
142 phasenames[internal] = b'internal'
143 # map phase name to phase number
143 # map phase name to phase number
144 phasenumber = {name: phase for phase, name in phasenames.items()}
144 phasenumber = {name: phase for phase, name in phasenames.items()}
145 # like phasenumber, but also include maps for the numeric and binary
145 # like phasenumber, but also include maps for the numeric and binary
146 # phase number to the phase number
146 # phase number to the phase number
147 phasenumber2 = phasenumber.copy()
147 phasenumber2 = phasenumber.copy()
148 phasenumber2.update({phase: phase for phase in phasenames})
148 phasenumber2.update({phase: phase for phase in phasenames})
149 phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
149 phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
150 # record phase property
150 # record phase property
151 mutablephases = (draft, secret, archived, internal)
151 mutablephases = (draft, secret, archived, internal)
152 remotehiddenphases = (secret, archived, internal)
152 remotehiddenphases = (secret, archived, internal)
153 localhiddenphases = (internal, archived)
153 localhiddenphases = (internal, archived)
154
154
155
155
156 def supportinternal(repo):
156 def supportinternal(repo):
157 """True if the internal phase can be used on a repository"""
157 """True if the internal phase can be used on a repository"""
158 return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
158 return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
159
159
160
160
161 def _readroots(repo, phasedefaults=None):
161 def _readroots(repo, phasedefaults=None):
162 """Read phase roots from disk
162 """Read phase roots from disk
163
163
164 phasedefaults is a list of fn(repo, roots) callable, which are
164 phasedefaults is a list of fn(repo, roots) callable, which are
165 executed if the phase roots file does not exist. When phases are
165 executed if the phase roots file does not exist. When phases are
166 being initialized on an existing repository, this could be used to
166 being initialized on an existing repository, this could be used to
167 set selected changesets phase to something else than public.
167 set selected changesets phase to something else than public.
168
168
169 Return (roots, dirty) where dirty is true if roots differ from
169 Return (roots, dirty) where dirty is true if roots differ from
170 what is being stored.
170 what is being stored.
171 """
171 """
172 repo = repo.unfiltered()
172 repo = repo.unfiltered()
173 dirty = False
173 dirty = False
174 roots = {i: set() for i in allphases}
174 roots = {i: set() for i in allphases}
175 try:
175 try:
176 f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
176 f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
177 try:
177 try:
178 for line in f:
178 for line in f:
179 phase, nh = line.split()
179 phase, nh = line.split()
180 roots[int(phase)].add(bin(nh))
180 roots[int(phase)].add(bin(nh))
181 finally:
181 finally:
182 f.close()
182 f.close()
183 except IOError as inst:
183 except IOError as inst:
184 if inst.errno != errno.ENOENT:
184 if inst.errno != errno.ENOENT:
185 raise
185 raise
186 if phasedefaults:
186 if phasedefaults:
187 for f in phasedefaults:
187 for f in phasedefaults:
188 roots = f(repo, roots)
188 roots = f(repo, roots)
189 dirty = True
189 dirty = True
190 return roots, dirty
190 return roots, dirty
191
191
192
192
193 def binaryencode(phasemapping):
193 def binaryencode(phasemapping):
194 """encode a 'phase -> nodes' mapping into a binary stream
194 """encode a 'phase -> nodes' mapping into a binary stream
195
195
196 The revision lists are encoded as (phase, root) pairs.
196 The revision lists are encoded as (phase, root) pairs.
197 """
197 """
198 binarydata = []
198 binarydata = []
199 for phase, nodes in pycompat.iteritems(phasemapping):
199 for phase, nodes in pycompat.iteritems(phasemapping):
200 for head in nodes:
200 for head in nodes:
201 binarydata.append(_fphasesentry.pack(phase, head))
201 binarydata.append(_fphasesentry.pack(phase, head))
202 return b''.join(binarydata)
202 return b''.join(binarydata)
203
203
204
204
205 def binarydecode(stream):
205 def binarydecode(stream):
206 """decode a binary stream into a 'phase -> nodes' mapping
206 """decode a binary stream into a 'phase -> nodes' mapping
207
207
208 The (phase, root) pairs are turned back into a dictionary with
208 The (phase, root) pairs are turned back into a dictionary with
209 the phase as index and the aggregated roots of that phase as value."""
209 the phase as index and the aggregated roots of that phase as value."""
210 headsbyphase = {i: [] for i in allphases}
210 headsbyphase = {i: [] for i in allphases}
211 entrysize = _fphasesentry.size
211 entrysize = _fphasesentry.size
212 while True:
212 while True:
213 entry = stream.read(entrysize)
213 entry = stream.read(entrysize)
214 if len(entry) < entrysize:
214 if len(entry) < entrysize:
215 if entry:
215 if entry:
216 raise error.Abort(_(b'bad phase-heads stream'))
216 raise error.Abort(_(b'bad phase-heads stream'))
217 break
217 break
218 phase, node = _fphasesentry.unpack(entry)
218 phase, node = _fphasesentry.unpack(entry)
219 headsbyphase[phase].append(node)
219 headsbyphase[phase].append(node)
220 return headsbyphase
220 return headsbyphase
221
221
222
222
223 def _sortedrange_insert(data, idx, rev, t):
223 def _sortedrange_insert(data, idx, rev, t):
224 merge_before = False
224 merge_before = False
225 if idx:
225 if idx:
226 r1, t1 = data[idx - 1]
226 r1, t1 = data[idx - 1]
227 merge_before = r1[-1] + 1 == rev and t1 == t
227 merge_before = r1[-1] + 1 == rev and t1 == t
228 merge_after = False
228 merge_after = False
229 if idx < len(data):
229 if idx < len(data):
230 r2, t2 = data[idx]
230 r2, t2 = data[idx]
231 merge_after = r2[0] == rev + 1 and t2 == t
231 merge_after = r2[0] == rev + 1 and t2 == t
232
232
233 if merge_before and merge_after:
233 if merge_before and merge_after:
234 data[idx - 1] = (pycompat.xrange(r1[0], r2[-1] + 1), t)
234 data[idx - 1] = (pycompat.xrange(r1[0], r2[-1] + 1), t)
235 data.pop(idx)
235 data.pop(idx)
236 elif merge_before:
236 elif merge_before:
237 data[idx - 1] = (pycompat.xrange(r1[0], rev + 1), t)
237 data[idx - 1] = (pycompat.xrange(r1[0], rev + 1), t)
238 elif merge_after:
238 elif merge_after:
239 data[idx] = (pycompat.xrange(rev, r2[-1] + 1), t)
239 data[idx] = (pycompat.xrange(rev, r2[-1] + 1), t)
240 else:
240 else:
241 data.insert(idx, (pycompat.xrange(rev, rev + 1), t))
241 data.insert(idx, (pycompat.xrange(rev, rev + 1), t))
242
242
243
243
244 def _sortedrange_split(data, idx, rev, t):
244 def _sortedrange_split(data, idx, rev, t):
245 r1, t1 = data[idx]
245 r1, t1 = data[idx]
246 if t == t1:
246 if t == t1:
247 return
247 return
248 t = (t1[0], t[1])
248 t = (t1[0], t[1])
249 if len(r1) == 1:
249 if len(r1) == 1:
250 data.pop(idx)
250 data.pop(idx)
251 _sortedrange_insert(data, idx, rev, t)
251 _sortedrange_insert(data, idx, rev, t)
252 elif r1[0] == rev:
252 elif r1[0] == rev:
253 data[idx] = (pycompat.xrange(rev + 1, r1[-1] + 1), t1)
253 data[idx] = (pycompat.xrange(rev + 1, r1[-1] + 1), t1)
254 _sortedrange_insert(data, idx, rev, t)
254 _sortedrange_insert(data, idx, rev, t)
255 elif r1[-1] == rev:
255 elif r1[-1] == rev:
256 data[idx] = (pycompat.xrange(r1[0], rev), t1)
256 data[idx] = (pycompat.xrange(r1[0], rev), t1)
257 _sortedrange_insert(data, idx + 1, rev, t)
257 _sortedrange_insert(data, idx + 1, rev, t)
258 else:
258 else:
259 data[idx : idx + 1] = [
259 data[idx : idx + 1] = [
260 (pycompat.xrange(r1[0], rev), t1),
260 (pycompat.xrange(r1[0], rev), t1),
261 (pycompat.xrange(rev, rev + 1), t),
261 (pycompat.xrange(rev, rev + 1), t),
262 (pycompat.xrange(rev + 1, r1[-1] + 1), t1),
262 (pycompat.xrange(rev + 1, r1[-1] + 1), t1),
263 ]
263 ]
264
264
265
265
266 def _trackphasechange(data, rev, old, new):
266 def _trackphasechange(data, rev, old, new):
267 """add a phase move to the <data> list of ranges
267 """add a phase move to the <data> list of ranges
268
268
269 If data is None, nothing happens.
269 If data is None, nothing happens.
270 """
270 """
271 if data is None:
271 if data is None:
272 return
272 return
273
273
274 # If data is empty, create a one-revision range and done
274 # If data is empty, create a one-revision range and done
275 if not data:
275 if not data:
276 data.insert(0, (pycompat.xrange(rev, rev + 1), (old, new)))
276 data.insert(0, (pycompat.xrange(rev, rev + 1), (old, new)))
277 return
277 return
278
278
279 low = 0
279 low = 0
280 high = len(data)
280 high = len(data)
281 t = (old, new)
281 t = (old, new)
282 while low < high:
282 while low < high:
283 mid = (low + high) // 2
283 mid = (low + high) // 2
284 revs = data[mid][0]
284 revs = data[mid][0]
285 revs_low = revs[0]
285 revs_low = revs[0]
286 revs_high = revs[-1]
286 revs_high = revs[-1]
287
287
288 if rev >= revs_low and rev <= revs_high:
288 if rev >= revs_low and rev <= revs_high:
289 _sortedrange_split(data, mid, rev, t)
289 _sortedrange_split(data, mid, rev, t)
290 return
290 return
291
291
292 if revs_low == rev + 1:
292 if revs_low == rev + 1:
293 if mid and data[mid - 1][0][-1] == rev:
293 if mid and data[mid - 1][0][-1] == rev:
294 _sortedrange_split(data, mid - 1, rev, t)
294 _sortedrange_split(data, mid - 1, rev, t)
295 else:
295 else:
296 _sortedrange_insert(data, mid, rev, t)
296 _sortedrange_insert(data, mid, rev, t)
297 return
297 return
298
298
299 if revs_high == rev - 1:
299 if revs_high == rev - 1:
300 if mid + 1 < len(data) and data[mid + 1][0][0] == rev:
300 if mid + 1 < len(data) and data[mid + 1][0][0] == rev:
301 _sortedrange_split(data, mid + 1, rev, t)
301 _sortedrange_split(data, mid + 1, rev, t)
302 else:
302 else:
303 _sortedrange_insert(data, mid + 1, rev, t)
303 _sortedrange_insert(data, mid + 1, rev, t)
304 return
304 return
305
305
306 if revs_low > rev:
306 if revs_low > rev:
307 high = mid
307 high = mid
308 else:
308 else:
309 low = mid + 1
309 low = mid + 1
310
310
311 if low == len(data):
311 if low == len(data):
312 data.append((pycompat.xrange(rev, rev + 1), t))
312 data.append((pycompat.xrange(rev, rev + 1), t))
313 return
313 return
314
314
315 r1, t1 = data[low]
315 r1, t1 = data[low]
316 if r1[0] > rev:
316 if r1[0] > rev:
317 data.insert(low, (pycompat.xrange(rev, rev + 1), t))
317 data.insert(low, (pycompat.xrange(rev, rev + 1), t))
318 else:
318 else:
319 data.insert(low + 1, (pycompat.xrange(rev, rev + 1), t))
319 data.insert(low + 1, (pycompat.xrange(rev, rev + 1), t))
320
320
321
321
322 class phasecache(object):
322 class phasecache(object):
323 def __init__(self, repo, phasedefaults, _load=True):
323 def __init__(self, repo, phasedefaults, _load=True):
324 if _load:
324 if _load:
325 # Cheap trick to allow shallow-copy without copy module
325 # Cheap trick to allow shallow-copy without copy module
326 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
326 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
327 self._loadedrevslen = 0
327 self._loadedrevslen = 0
328 self._phasesets = None
328 self._phasesets = None
329 self.filterunknown(repo)
329 self.filterunknown(repo)
330 self.opener = repo.svfs
330 self.opener = repo.svfs
331
331
332 def hasnonpublicphases(self, repo):
332 def hasnonpublicphases(self, repo):
333 """detect if there are revisions with non-public phase"""
333 """detect if there are revisions with non-public phase"""
334 repo = repo.unfiltered()
334 repo = repo.unfiltered()
335 cl = repo.changelog
335 cl = repo.changelog
336 if len(cl) >= self._loadedrevslen:
336 if len(cl) >= self._loadedrevslen:
337 self.invalidate()
337 self.invalidate()
338 self.loadphaserevs(repo)
338 self.loadphaserevs(repo)
339 return any(
339 return any(
340 revs
340 revs
341 for phase, revs in pycompat.iteritems(self.phaseroots)
341 for phase, revs in pycompat.iteritems(self.phaseroots)
342 if phase != public
342 if phase != public
343 )
343 )
344
344
345 def nonpublicphaseroots(self, repo):
345 def nonpublicphaseroots(self, repo):
346 """returns the roots of all non-public phases
346 """returns the roots of all non-public phases
347
347
348 The roots are not minimized, so if the secret revisions are
348 The roots are not minimized, so if the secret revisions are
349 descendants of draft revisions, their roots will still be present.
349 descendants of draft revisions, their roots will still be present.
350 """
350 """
351 repo = repo.unfiltered()
351 repo = repo.unfiltered()
352 cl = repo.changelog
352 cl = repo.changelog
353 if len(cl) >= self._loadedrevslen:
353 if len(cl) >= self._loadedrevslen:
354 self.invalidate()
354 self.invalidate()
355 self.loadphaserevs(repo)
355 self.loadphaserevs(repo)
356 return set().union(
356 return set().union(
357 *[
357 *[
358 revs
358 revs
359 for phase, revs in pycompat.iteritems(self.phaseroots)
359 for phase, revs in pycompat.iteritems(self.phaseroots)
360 if phase != public
360 if phase != public
361 ]
361 ]
362 )
362 )
363
363
364 def getrevset(self, repo, phases, subset=None):
364 def getrevset(self, repo, phases, subset=None):
365 """return a smartset for the given phases"""
365 """return a smartset for the given phases"""
366 self.loadphaserevs(repo) # ensure phase's sets are loaded
366 self.loadphaserevs(repo) # ensure phase's sets are loaded
367 phases = set(phases)
367 phases = set(phases)
368 publicphase = public in phases
368 publicphase = public in phases
369
369
370 if publicphase:
370 if publicphase:
371 # In this case, phases keeps all the *other* phases.
371 # In this case, phases keeps all the *other* phases.
372 phases = set(allphases).difference(phases)
372 phases = set(allphases).difference(phases)
373 if not phases:
373 if not phases:
374 return smartset.fullreposet(repo)
374 return smartset.fullreposet(repo)
375
375
376 # fast path: _phasesets contains the interesting sets,
376 # fast path: _phasesets contains the interesting sets,
377 # might only need a union and post-filtering.
377 # might only need a union and post-filtering.
378 revsneedscopy = False
378 revsneedscopy = False
379 if len(phases) == 1:
379 if len(phases) == 1:
380 [p] = phases
380 [p] = phases
381 revs = self._phasesets[p]
381 revs = self._phasesets[p]
382 revsneedscopy = True # Don't modify _phasesets
382 revsneedscopy = True # Don't modify _phasesets
383 else:
383 else:
384 # revs has the revisions in all *other* phases.
384 # revs has the revisions in all *other* phases.
385 revs = set.union(*[self._phasesets[p] for p in phases])
385 revs = set.union(*[self._phasesets[p] for p in phases])
386
386
387 def _addwdir(wdirsubset, wdirrevs):
387 def _addwdir(wdirsubset, wdirrevs):
388 if wdirrev in wdirsubset and repo[None].phase() in phases:
388 if wdirrev in wdirsubset and repo[None].phase() in phases:
389 if revsneedscopy:
389 if revsneedscopy:
390 wdirrevs = wdirrevs.copy()
390 wdirrevs = wdirrevs.copy()
391 # The working dir would never be in the # cache, but it was in
391 # The working dir would never be in the # cache, but it was in
392 # the subset being filtered for its phase (or filtered out,
392 # the subset being filtered for its phase (or filtered out,
393 # depending on publicphase), so add it to the output to be
393 # depending on publicphase), so add it to the output to be
394 # included (or filtered out).
394 # included (or filtered out).
395 wdirrevs.add(wdirrev)
395 wdirrevs.add(wdirrev)
396 return wdirrevs
396 return wdirrevs
397
397
398 if not publicphase:
398 if not publicphase:
399 if repo.changelog.filteredrevs:
399 if repo.changelog.filteredrevs:
400 revs = revs - repo.changelog.filteredrevs
400 revs = revs - repo.changelog.filteredrevs
401
401
402 if subset is None:
402 if subset is None:
403 return smartset.baseset(revs)
403 return smartset.baseset(revs)
404 else:
404 else:
405 revs = _addwdir(subset, revs)
405 revs = _addwdir(subset, revs)
406 return subset & smartset.baseset(revs)
406 return subset & smartset.baseset(revs)
407 else:
407 else:
408 if subset is None:
408 if subset is None:
409 subset = smartset.fullreposet(repo)
409 subset = smartset.fullreposet(repo)
410
410
411 revs = _addwdir(subset, revs)
411 revs = _addwdir(subset, revs)
412
412
413 if not revs:
413 if not revs:
414 return subset
414 return subset
415 return subset.filter(lambda r: r not in revs)
415 return subset.filter(lambda r: r not in revs)
416
416
417 def copy(self):
417 def copy(self):
418 # Shallow copy meant to ensure isolation in
418 # Shallow copy meant to ensure isolation in
419 # advance/retractboundary(), nothing more.
419 # advance/retractboundary(), nothing more.
420 ph = self.__class__(None, None, _load=False)
420 ph = self.__class__(None, None, _load=False)
421 ph.phaseroots = self.phaseroots.copy()
421 ph.phaseroots = self.phaseroots.copy()
422 ph.dirty = self.dirty
422 ph.dirty = self.dirty
423 ph.opener = self.opener
423 ph.opener = self.opener
424 ph._loadedrevslen = self._loadedrevslen
424 ph._loadedrevslen = self._loadedrevslen
425 ph._phasesets = self._phasesets
425 ph._phasesets = self._phasesets
426 return ph
426 return ph
427
427
428 def replace(self, phcache):
428 def replace(self, phcache):
429 """replace all values in 'self' with content of phcache"""
429 """replace all values in 'self' with content of phcache"""
430 for a in (
430 for a in (
431 b'phaseroots',
431 b'phaseroots',
432 b'dirty',
432 b'dirty',
433 b'opener',
433 b'opener',
434 b'_loadedrevslen',
434 b'_loadedrevslen',
435 b'_phasesets',
435 b'_phasesets',
436 ):
436 ):
437 setattr(self, a, getattr(phcache, a))
437 setattr(self, a, getattr(phcache, a))
438
438
439 def _getphaserevsnative(self, repo):
439 def _getphaserevsnative(self, repo):
440 repo = repo.unfiltered()
440 repo = repo.unfiltered()
441 return repo.changelog.computephases(self.phaseroots)
441 return repo.changelog.computephases(self.phaseroots)
442
442
443 def _computephaserevspure(self, repo):
443 def _computephaserevspure(self, repo):
444 repo = repo.unfiltered()
444 repo = repo.unfiltered()
445 cl = repo.changelog
445 cl = repo.changelog
446 self._phasesets = {phase: set() for phase in allphases}
446 self._phasesets = {phase: set() for phase in allphases}
447 lowerroots = set()
447 lowerroots = set()
448 for phase in reversed(trackedphases):
448 for phase in reversed(trackedphases):
449 roots = pycompat.maplist(cl.rev, self.phaseroots[phase])
449 roots = pycompat.maplist(cl.rev, self.phaseroots[phase])
450 if roots:
450 if roots:
451 ps = set(cl.descendants(roots))
451 ps = set(cl.descendants(roots))
452 for root in roots:
452 for root in roots:
453 ps.add(root)
453 ps.add(root)
454 ps.difference_update(lowerroots)
454 ps.difference_update(lowerroots)
455 lowerroots.update(ps)
455 lowerroots.update(ps)
456 self._phasesets[phase] = ps
456 self._phasesets[phase] = ps
457 self._loadedrevslen = len(cl)
457 self._loadedrevslen = len(cl)
458
458
459 def loadphaserevs(self, repo):
459 def loadphaserevs(self, repo):
460 """ensure phase information is loaded in the object"""
460 """ensure phase information is loaded in the object"""
461 if self._phasesets is None:
461 if self._phasesets is None:
462 try:
462 try:
463 res = self._getphaserevsnative(repo)
463 res = self._getphaserevsnative(repo)
464 self._loadedrevslen, self._phasesets = res
464 self._loadedrevslen, self._phasesets = res
465 except AttributeError:
465 except AttributeError:
466 self._computephaserevspure(repo)
466 self._computephaserevspure(repo)
467
467
468 def invalidate(self):
468 def invalidate(self):
469 self._loadedrevslen = 0
469 self._loadedrevslen = 0
470 self._phasesets = None
470 self._phasesets = None
471
471
472 def phase(self, repo, rev):
472 def phase(self, repo, rev):
473 # We need a repo argument here to be able to build _phasesets
473 # We need a repo argument here to be able to build _phasesets
474 # if necessary. The repository instance is not stored in
474 # if necessary. The repository instance is not stored in
475 # phasecache to avoid reference cycles. The changelog instance
475 # phasecache to avoid reference cycles. The changelog instance
476 # is not stored because it is a filecache() property and can
476 # is not stored because it is a filecache() property and can
477 # be replaced without us being notified.
477 # be replaced without us being notified.
478 if rev == nullrev:
478 if rev == nullrev:
479 return public
479 return public
480 if rev < nullrev:
480 if rev < nullrev:
481 raise ValueError(_(b'cannot lookup negative revision'))
481 raise ValueError(_(b'cannot lookup negative revision'))
482 if rev >= self._loadedrevslen:
482 if rev >= self._loadedrevslen:
483 self.invalidate()
483 self.invalidate()
484 self.loadphaserevs(repo)
484 self.loadphaserevs(repo)
485 for phase in trackedphases:
485 for phase in trackedphases:
486 if rev in self._phasesets[phase]:
486 if rev in self._phasesets[phase]:
487 return phase
487 return phase
488 return public
488 return public
489
489
490 def write(self):
490 def write(self):
491 if not self.dirty:
491 if not self.dirty:
492 return
492 return
493 f = self.opener(b'phaseroots', b'w', atomictemp=True, checkambig=True)
493 f = self.opener(b'phaseroots', b'w', atomictemp=True, checkambig=True)
494 try:
494 try:
495 self._write(f)
495 self._write(f)
496 finally:
496 finally:
497 f.close()
497 f.close()
498
498
499 def _write(self, fp):
499 def _write(self, fp):
500 for phase, roots in pycompat.iteritems(self.phaseroots):
500 for phase, roots in pycompat.iteritems(self.phaseroots):
501 for h in sorted(roots):
501 for h in sorted(roots):
502 fp.write(b'%i %s\n' % (phase, hex(h)))
502 fp.write(b'%i %s\n' % (phase, hex(h)))
503 self.dirty = False
503 self.dirty = False
504
504
505 def _updateroots(self, phase, newroots, tr):
505 def _updateroots(self, phase, newroots, tr):
506 self.phaseroots[phase] = newroots
506 self.phaseroots[phase] = newroots
507 self.invalidate()
507 self.invalidate()
508 self.dirty = True
508 self.dirty = True
509
509
510 tr.addfilegenerator(b'phase', (b'phaseroots',), self._write)
510 tr.addfilegenerator(b'phase', (b'phaseroots',), self._write)
511 tr.hookargs[b'phases_moved'] = b'1'
511 tr.hookargs[b'phases_moved'] = b'1'
512
512
513 def registernew(self, repo, tr, targetphase, nodes):
513 def registernew(self, repo, tr, targetphase, nodes, revs=None):
514 if revs is None:
515 revs = []
514 repo = repo.unfiltered()
516 repo = repo.unfiltered()
515 self._retractboundary(repo, tr, targetphase, nodes)
517 self._retractboundary(repo, tr, targetphase, nodes, revs=revs)
516 if tr is not None and b'phases' in tr.changes:
518 if tr is not None and b'phases' in tr.changes:
517 phasetracking = tr.changes[b'phases']
519 phasetracking = tr.changes[b'phases']
518 torev = repo.changelog.rev
520 torev = repo.changelog.rev
519 phase = self.phase
521 phase = self.phase
520 revs = [torev(node) for node in nodes]
522 revs = [torev(node) for node in nodes] + sorted(revs)
521 revs.sort()
523 revs.sort()
522 for rev in revs:
524 for rev in revs:
523 revphase = phase(repo, rev)
525 revphase = phase(repo, rev)
524 _trackphasechange(phasetracking, rev, None, revphase)
526 _trackphasechange(phasetracking, rev, None, revphase)
525 repo.invalidatevolatilesets()
527 repo.invalidatevolatilesets()
526
528
527 def advanceboundary(self, repo, tr, targetphase, nodes, dryrun=None):
529 def advanceboundary(
530 self, repo, tr, targetphase, nodes, revs=None, dryrun=None
531 ):
528 """Set all 'nodes' to phase 'targetphase'
532 """Set all 'nodes' to phase 'targetphase'
529
533
530 Nodes with a phase lower than 'targetphase' are not affected.
534 Nodes with a phase lower than 'targetphase' are not affected.
531
535
532 If dryrun is True, no actions will be performed
536 If dryrun is True, no actions will be performed
533
537
534 Returns a set of revs whose phase is changed or should be changed
538 Returns a set of revs whose phase is changed or should be changed
535 """
539 """
536 # Be careful to preserve shallow-copied values: do not update
540 # Be careful to preserve shallow-copied values: do not update
537 # phaseroots values, replace them.
541 # phaseroots values, replace them.
542 if revs is None:
543 revs = []
538 if tr is None:
544 if tr is None:
539 phasetracking = None
545 phasetracking = None
540 else:
546 else:
541 phasetracking = tr.changes.get(b'phases')
547 phasetracking = tr.changes.get(b'phases')
542
548
543 repo = repo.unfiltered()
549 repo = repo.unfiltered()
550 revs = [repo[n].rev() for n in nodes] + [r for r in revs]
544
551
545 changes = set() # set of revisions to be changed
552 changes = set() # set of revisions to be changed
546 delroots = [] # set of root deleted by this path
553 delroots = [] # set of root deleted by this path
547 for phase in (phase for phase in allphases if phase > targetphase):
554 for phase in (phase for phase in allphases if phase > targetphase):
548 # filter nodes that are not in a compatible phase already
555 # filter nodes that are not in a compatible phase already
549 nodes = [
556 revs = [rev for rev in revs if self.phase(repo, rev) >= phase]
550 n for n in nodes if self.phase(repo, repo[n].rev()) >= phase
557 if not revs:
551 ]
552 if not nodes:
553 break # no roots to move anymore
558 break # no roots to move anymore
554
559
555 olds = self.phaseroots[phase]
560 olds = self.phaseroots[phase]
556
561
557 affected = repo.revs(b'%ln::%ln', olds, nodes)
562 affected = repo.revs(b'%ln::%ld', olds, revs)
558 changes.update(affected)
563 changes.update(affected)
559 if dryrun:
564 if dryrun:
560 continue
565 continue
561 for r in affected:
566 for r in affected:
562 _trackphasechange(
567 _trackphasechange(
563 phasetracking, r, self.phase(repo, r), targetphase
568 phasetracking, r, self.phase(repo, r), targetphase
564 )
569 )
565
570
566 roots = {
571 roots = {
567 ctx.node()
572 ctx.node()
568 for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected)
573 for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected)
569 }
574 }
570 if olds != roots:
575 if olds != roots:
571 self._updateroots(phase, roots, tr)
576 self._updateroots(phase, roots, tr)
572 # some roots may need to be declared for lower phases
577 # some roots may need to be declared for lower phases
573 delroots.extend(olds - roots)
578 delroots.extend(olds - roots)
574 if not dryrun:
579 if not dryrun:
575 # declare deleted root in the target phase
580 # declare deleted root in the target phase
576 if targetphase != 0:
581 if targetphase != 0:
577 self._retractboundary(repo, tr, targetphase, delroots)
582 self._retractboundary(repo, tr, targetphase, delroots)
578 repo.invalidatevolatilesets()
583 repo.invalidatevolatilesets()
579 return changes
584 return changes
580
585
581 def retractboundary(self, repo, tr, targetphase, nodes):
586 def retractboundary(self, repo, tr, targetphase, nodes):
582 oldroots = {
587 oldroots = {
583 phase: revs
588 phase: revs
584 for phase, revs in pycompat.iteritems(self.phaseroots)
589 for phase, revs in pycompat.iteritems(self.phaseroots)
585 if phase <= targetphase
590 if phase <= targetphase
586 }
591 }
587 if tr is None:
592 if tr is None:
588 phasetracking = None
593 phasetracking = None
589 else:
594 else:
590 phasetracking = tr.changes.get(b'phases')
595 phasetracking = tr.changes.get(b'phases')
591 repo = repo.unfiltered()
596 repo = repo.unfiltered()
592 if (
597 if (
593 self._retractboundary(repo, tr, targetphase, nodes)
598 self._retractboundary(repo, tr, targetphase, nodes)
594 and phasetracking is not None
599 and phasetracking is not None
595 ):
600 ):
596
601
597 # find the affected revisions
602 # find the affected revisions
598 new = self.phaseroots[targetphase]
603 new = self.phaseroots[targetphase]
599 old = oldroots[targetphase]
604 old = oldroots[targetphase]
600 affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old))
605 affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old))
601
606
602 # find the phase of the affected revision
607 # find the phase of the affected revision
603 for phase in pycompat.xrange(targetphase, -1, -1):
608 for phase in pycompat.xrange(targetphase, -1, -1):
604 if phase:
609 if phase:
605 roots = oldroots.get(phase, [])
610 roots = oldroots.get(phase, [])
606 revs = set(repo.revs(b'%ln::%ld', roots, affected))
611 revs = set(repo.revs(b'%ln::%ld', roots, affected))
607 affected -= revs
612 affected -= revs
608 else: # public phase
613 else: # public phase
609 revs = affected
614 revs = affected
610 for r in sorted(revs):
615 for r in sorted(revs):
611 _trackphasechange(phasetracking, r, phase, targetphase)
616 _trackphasechange(phasetracking, r, phase, targetphase)
612 repo.invalidatevolatilesets()
617 repo.invalidatevolatilesets()
613
618
614 def _retractboundary(self, repo, tr, targetphase, nodes):
619 def _retractboundary(self, repo, tr, targetphase, nodes, revs=None):
615 # Be careful to preserve shallow-copied values: do not update
620 # Be careful to preserve shallow-copied values: do not update
616 # phaseroots values, replace them.
621 # phaseroots values, replace them.
622 if revs is None:
623 revs = []
617 if targetphase in (archived, internal) and not supportinternal(repo):
624 if targetphase in (archived, internal) and not supportinternal(repo):
618 name = phasenames[targetphase]
625 name = phasenames[targetphase]
619 msg = b'this repository does not support the %s phase' % name
626 msg = b'this repository does not support the %s phase' % name
620 raise error.ProgrammingError(msg)
627 raise error.ProgrammingError(msg)
621
628
622 repo = repo.unfiltered()
629 repo = repo.unfiltered()
623 torev = repo.changelog.rev
630 torev = repo.changelog.rev
624 tonode = repo.changelog.node
631 tonode = repo.changelog.node
625 currentroots = {torev(node) for node in self.phaseroots[targetphase]}
632 currentroots = {torev(node) for node in self.phaseroots[targetphase]}
626 finalroots = oldroots = set(currentroots)
633 finalroots = oldroots = set(currentroots)
627 newroots = [torev(node) for node in nodes]
634 newroots = [torev(node) for node in nodes] + [r for r in revs]
628 newroots = [
635 newroots = [
629 rev for rev in newroots if self.phase(repo, rev) < targetphase
636 rev for rev in newroots if self.phase(repo, rev) < targetphase
630 ]
637 ]
631
638
632 if newroots:
639 if newroots:
633 if nullrev in newroots:
640 if nullrev in newroots:
634 raise error.Abort(_(b'cannot change null revision phase'))
641 raise error.Abort(_(b'cannot change null revision phase'))
635 currentroots.update(newroots)
642 currentroots.update(newroots)
636
643
637 # Only compute new roots for revs above the roots that are being
644 # Only compute new roots for revs above the roots that are being
638 # retracted.
645 # retracted.
639 minnewroot = min(newroots)
646 minnewroot = min(newroots)
640 aboveroots = [rev for rev in currentroots if rev >= minnewroot]
647 aboveroots = [rev for rev in currentroots if rev >= minnewroot]
641 updatedroots = repo.revs(b'roots(%ld::)', aboveroots)
648 updatedroots = repo.revs(b'roots(%ld::)', aboveroots)
642
649
643 finalroots = {rev for rev in currentroots if rev < minnewroot}
650 finalroots = {rev for rev in currentroots if rev < minnewroot}
644 finalroots.update(updatedroots)
651 finalroots.update(updatedroots)
645 if finalroots != oldroots:
652 if finalroots != oldroots:
646 self._updateroots(
653 self._updateroots(
647 targetphase, {tonode(rev) for rev in finalroots}, tr
654 targetphase, {tonode(rev) for rev in finalroots}, tr
648 )
655 )
649 return True
656 return True
650 return False
657 return False
651
658
652 def filterunknown(self, repo):
659 def filterunknown(self, repo):
653 """remove unknown nodes from the phase boundary
660 """remove unknown nodes from the phase boundary
654
661
655 Nothing is lost as unknown nodes only hold data for their descendants.
662 Nothing is lost as unknown nodes only hold data for their descendants.
656 """
663 """
657 filtered = False
664 filtered = False
658 has_node = repo.changelog.index.has_node # to filter unknown nodes
665 has_node = repo.changelog.index.has_node # to filter unknown nodes
659 for phase, nodes in pycompat.iteritems(self.phaseroots):
666 for phase, nodes in pycompat.iteritems(self.phaseroots):
660 missing = sorted(node for node in nodes if not has_node(node))
667 missing = sorted(node for node in nodes if not has_node(node))
661 if missing:
668 if missing:
662 for mnode in missing:
669 for mnode in missing:
663 repo.ui.debug(
670 repo.ui.debug(
664 b'removing unknown node %s from %i-phase boundary\n'
671 b'removing unknown node %s from %i-phase boundary\n'
665 % (short(mnode), phase)
672 % (short(mnode), phase)
666 )
673 )
667 nodes.symmetric_difference_update(missing)
674 nodes.symmetric_difference_update(missing)
668 filtered = True
675 filtered = True
669 if filtered:
676 if filtered:
670 self.dirty = True
677 self.dirty = True
671 # filterunknown is called by repo.destroyed, we may have no changes in
678 # filterunknown is called by repo.destroyed, we may have no changes in
672 # root but _phasesets contents is certainly invalid (or at least we
679 # root but _phasesets contents is certainly invalid (or at least we
673 # have not proper way to check that). related to issue 3858.
680 # have not proper way to check that). related to issue 3858.
674 #
681 #
675 # The other caller is __init__ that have no _phasesets initialized
682 # The other caller is __init__ that have no _phasesets initialized
676 # anyway. If this change we should consider adding a dedicated
683 # anyway. If this change we should consider adding a dedicated
677 # "destroyed" function to phasecache or a proper cache key mechanism
684 # "destroyed" function to phasecache or a proper cache key mechanism
678 # (see branchmap one)
685 # (see branchmap one)
679 self.invalidate()
686 self.invalidate()
680
687
681
688
682 def advanceboundary(repo, tr, targetphase, nodes, dryrun=None):
689 def advanceboundary(repo, tr, targetphase, nodes, revs=None, dryrun=None):
683 """Add nodes to a phase changing other nodes phases if necessary.
690 """Add nodes to a phase changing other nodes phases if necessary.
684
691
685 This function move boundary *forward* this means that all nodes
692 This function move boundary *forward* this means that all nodes
686 are set in the target phase or kept in a *lower* phase.
693 are set in the target phase or kept in a *lower* phase.
687
694
688 Simplify boundary to contains phase roots only.
695 Simplify boundary to contains phase roots only.
689
696
690 If dryrun is True, no actions will be performed
697 If dryrun is True, no actions will be performed
691
698
692 Returns a set of revs whose phase is changed or should be changed
699 Returns a set of revs whose phase is changed or should be changed
693 """
700 """
701 if revs is None:
702 revs = []
694 phcache = repo._phasecache.copy()
703 phcache = repo._phasecache.copy()
695 changes = phcache.advanceboundary(
704 changes = phcache.advanceboundary(
696 repo, tr, targetphase, nodes, dryrun=dryrun
705 repo, tr, targetphase, nodes, revs=revs, dryrun=dryrun
697 )
706 )
698 if not dryrun:
707 if not dryrun:
699 repo._phasecache.replace(phcache)
708 repo._phasecache.replace(phcache)
700 return changes
709 return changes
701
710
702
711
703 def retractboundary(repo, tr, targetphase, nodes):
712 def retractboundary(repo, tr, targetphase, nodes):
704 """Set nodes back to a phase changing other nodes phases if
713 """Set nodes back to a phase changing other nodes phases if
705 necessary.
714 necessary.
706
715
707 This function move boundary *backward* this means that all nodes
716 This function move boundary *backward* this means that all nodes
708 are set in the target phase or kept in a *higher* phase.
717 are set in the target phase or kept in a *higher* phase.
709
718
710 Simplify boundary to contains phase roots only."""
719 Simplify boundary to contains phase roots only."""
711 phcache = repo._phasecache.copy()
720 phcache = repo._phasecache.copy()
712 phcache.retractboundary(repo, tr, targetphase, nodes)
721 phcache.retractboundary(repo, tr, targetphase, nodes)
713 repo._phasecache.replace(phcache)
722 repo._phasecache.replace(phcache)
714
723
715
724
716 def registernew(repo, tr, targetphase, nodes):
725 def registernew(repo, tr, targetphase, nodes, revs=None):
717 """register a new revision and its phase
726 """register a new revision and its phase
718
727
719 Code adding revisions to the repository should use this function to
728 Code adding revisions to the repository should use this function to
720 set new changeset in their target phase (or higher).
729 set new changeset in their target phase (or higher).
721 """
730 """
731 if revs is None:
732 revs = []
722 phcache = repo._phasecache.copy()
733 phcache = repo._phasecache.copy()
723 phcache.registernew(repo, tr, targetphase, nodes)
734 phcache.registernew(repo, tr, targetphase, nodes, revs=revs)
724 repo._phasecache.replace(phcache)
735 repo._phasecache.replace(phcache)
725
736
726
737
727 def listphases(repo):
738 def listphases(repo):
728 """List phases root for serialization over pushkey"""
739 """List phases root for serialization over pushkey"""
729 # Use ordered dictionary so behavior is deterministic.
740 # Use ordered dictionary so behavior is deterministic.
730 keys = util.sortdict()
741 keys = util.sortdict()
731 value = b'%i' % draft
742 value = b'%i' % draft
732 cl = repo.unfiltered().changelog
743 cl = repo.unfiltered().changelog
733 for root in repo._phasecache.phaseroots[draft]:
744 for root in repo._phasecache.phaseroots[draft]:
734 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
745 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
735 keys[hex(root)] = value
746 keys[hex(root)] = value
736
747
737 if repo.publishing():
748 if repo.publishing():
738 # Add an extra data to let remote know we are a publishing
749 # Add an extra data to let remote know we are a publishing
739 # repo. Publishing repo can't just pretend they are old repo.
750 # repo. Publishing repo can't just pretend they are old repo.
740 # When pushing to a publishing repo, the client still need to
751 # When pushing to a publishing repo, the client still need to
741 # push phase boundary
752 # push phase boundary
742 #
753 #
743 # Push do not only push changeset. It also push phase data.
754 # Push do not only push changeset. It also push phase data.
744 # New phase data may apply to common changeset which won't be
755 # New phase data may apply to common changeset which won't be
745 # push (as they are common). Here is a very simple example:
756 # push (as they are common). Here is a very simple example:
746 #
757 #
747 # 1) repo A push changeset X as draft to repo B
758 # 1) repo A push changeset X as draft to repo B
748 # 2) repo B make changeset X public
759 # 2) repo B make changeset X public
749 # 3) repo B push to repo A. X is not pushed but the data that
760 # 3) repo B push to repo A. X is not pushed but the data that
750 # X as now public should
761 # X as now public should
751 #
762 #
752 # The server can't handle it on it's own as it has no idea of
763 # The server can't handle it on it's own as it has no idea of
753 # client phase data.
764 # client phase data.
754 keys[b'publishing'] = b'True'
765 keys[b'publishing'] = b'True'
755 return keys
766 return keys
756
767
757
768
758 def pushphase(repo, nhex, oldphasestr, newphasestr):
769 def pushphase(repo, nhex, oldphasestr, newphasestr):
759 """List phases root for serialization over pushkey"""
770 """List phases root for serialization over pushkey"""
760 repo = repo.unfiltered()
771 repo = repo.unfiltered()
761 with repo.lock():
772 with repo.lock():
762 currentphase = repo[nhex].phase()
773 currentphase = repo[nhex].phase()
763 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
774 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
764 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
775 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
765 if currentphase == oldphase and newphase < oldphase:
776 if currentphase == oldphase and newphase < oldphase:
766 with repo.transaction(b'pushkey-phase') as tr:
777 with repo.transaction(b'pushkey-phase') as tr:
767 advanceboundary(repo, tr, newphase, [bin(nhex)])
778 advanceboundary(repo, tr, newphase, [bin(nhex)])
768 return True
779 return True
769 elif currentphase == newphase:
780 elif currentphase == newphase:
770 # raced, but got correct result
781 # raced, but got correct result
771 return True
782 return True
772 else:
783 else:
773 return False
784 return False
774
785
775
786
776 def subsetphaseheads(repo, subset):
787 def subsetphaseheads(repo, subset):
777 """Finds the phase heads for a subset of a history
788 """Finds the phase heads for a subset of a history
778
789
779 Returns a list indexed by phase number where each item is a list of phase
790 Returns a list indexed by phase number where each item is a list of phase
780 head nodes.
791 head nodes.
781 """
792 """
782 cl = repo.changelog
793 cl = repo.changelog
783
794
784 headsbyphase = {i: [] for i in allphases}
795 headsbyphase = {i: [] for i in allphases}
785 # No need to keep track of secret phase; any heads in the subset that
796 # No need to keep track of secret phase; any heads in the subset that
786 # are not mentioned are implicitly secret.
797 # are not mentioned are implicitly secret.
787 for phase in allphases[:secret]:
798 for phase in allphases[:secret]:
788 revset = b"heads(%%ln & %s())" % phasenames[phase]
799 revset = b"heads(%%ln & %s())" % phasenames[phase]
789 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
800 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
790 return headsbyphase
801 return headsbyphase
791
802
792
803
793 def updatephases(repo, trgetter, headsbyphase):
804 def updatephases(repo, trgetter, headsbyphase):
794 """Updates the repo with the given phase heads"""
805 """Updates the repo with the given phase heads"""
795 # Now advance phase boundaries of all phases
806 # Now advance phase boundaries of all phases
796 #
807 #
797 # run the update (and fetch transaction) only if there are actually things
808 # run the update (and fetch transaction) only if there are actually things
798 # to update. This avoid creating empty transaction during no-op operation.
809 # to update. This avoid creating empty transaction during no-op operation.
799
810
800 for phase in allphases:
811 for phase in allphases:
801 revset = b'%ln - _phase(%s)'
812 revset = b'%ln - _phase(%s)'
802 heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
813 heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
803 if heads:
814 if heads:
804 advanceboundary(repo, trgetter(), phase, heads)
815 advanceboundary(repo, trgetter(), phase, heads)
805
816
806
817
807 def analyzeremotephases(repo, subset, roots):
818 def analyzeremotephases(repo, subset, roots):
808 """Compute phases heads and root in a subset of node from root dict
819 """Compute phases heads and root in a subset of node from root dict
809
820
810 * subset is heads of the subset
821 * subset is heads of the subset
811 * roots is {<nodeid> => phase} mapping. key and value are string.
822 * roots is {<nodeid> => phase} mapping. key and value are string.
812
823
813 Accept unknown element input
824 Accept unknown element input
814 """
825 """
815 repo = repo.unfiltered()
826 repo = repo.unfiltered()
816 # build list from dictionary
827 # build list from dictionary
817 draftroots = []
828 draftroots = []
818 has_node = repo.changelog.index.has_node # to filter unknown nodes
829 has_node = repo.changelog.index.has_node # to filter unknown nodes
819 for nhex, phase in pycompat.iteritems(roots):
830 for nhex, phase in pycompat.iteritems(roots):
820 if nhex == b'publishing': # ignore data related to publish option
831 if nhex == b'publishing': # ignore data related to publish option
821 continue
832 continue
822 node = bin(nhex)
833 node = bin(nhex)
823 phase = int(phase)
834 phase = int(phase)
824 if phase == public:
835 if phase == public:
825 if node != nullid:
836 if node != nullid:
826 repo.ui.warn(
837 repo.ui.warn(
827 _(
838 _(
828 b'ignoring inconsistent public root'
839 b'ignoring inconsistent public root'
829 b' from remote: %s\n'
840 b' from remote: %s\n'
830 )
841 )
831 % nhex
842 % nhex
832 )
843 )
833 elif phase == draft:
844 elif phase == draft:
834 if has_node(node):
845 if has_node(node):
835 draftroots.append(node)
846 draftroots.append(node)
836 else:
847 else:
837 repo.ui.warn(
848 repo.ui.warn(
838 _(b'ignoring unexpected root from remote: %i %s\n')
849 _(b'ignoring unexpected root from remote: %i %s\n')
839 % (phase, nhex)
850 % (phase, nhex)
840 )
851 )
841 # compute heads
852 # compute heads
842 publicheads = newheads(repo, subset, draftroots)
853 publicheads = newheads(repo, subset, draftroots)
843 return publicheads, draftroots
854 return publicheads, draftroots
844
855
845
856
846 class remotephasessummary(object):
857 class remotephasessummary(object):
847 """summarize phase information on the remote side
858 """summarize phase information on the remote side
848
859
849 :publishing: True is the remote is publishing
860 :publishing: True is the remote is publishing
850 :publicheads: list of remote public phase heads (nodes)
861 :publicheads: list of remote public phase heads (nodes)
851 :draftheads: list of remote draft phase heads (nodes)
862 :draftheads: list of remote draft phase heads (nodes)
852 :draftroots: list of remote draft phase root (nodes)
863 :draftroots: list of remote draft phase root (nodes)
853 """
864 """
854
865
855 def __init__(self, repo, remotesubset, remoteroots):
866 def __init__(self, repo, remotesubset, remoteroots):
856 unfi = repo.unfiltered()
867 unfi = repo.unfiltered()
857 self._allremoteroots = remoteroots
868 self._allremoteroots = remoteroots
858
869
859 self.publishing = remoteroots.get(b'publishing', False)
870 self.publishing = remoteroots.get(b'publishing', False)
860
871
861 ana = analyzeremotephases(repo, remotesubset, remoteroots)
872 ana = analyzeremotephases(repo, remotesubset, remoteroots)
862 self.publicheads, self.draftroots = ana
873 self.publicheads, self.draftroots = ana
863 # Get the list of all "heads" revs draft on remote
874 # Get the list of all "heads" revs draft on remote
864 dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
875 dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
865 self.draftheads = [c.node() for c in dheads]
876 self.draftheads = [c.node() for c in dheads]
866
877
867
878
868 def newheads(repo, heads, roots):
879 def newheads(repo, heads, roots):
869 """compute new head of a subset minus another
880 """compute new head of a subset minus another
870
881
871 * `heads`: define the first subset
882 * `heads`: define the first subset
872 * `roots`: define the second we subtract from the first"""
883 * `roots`: define the second we subtract from the first"""
873 # prevent an import cycle
884 # prevent an import cycle
874 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
885 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
875 from . import dagop
886 from . import dagop
876
887
877 repo = repo.unfiltered()
888 repo = repo.unfiltered()
878 cl = repo.changelog
889 cl = repo.changelog
879 rev = cl.index.get_rev
890 rev = cl.index.get_rev
880 if not roots:
891 if not roots:
881 return heads
892 return heads
882 if not heads or heads == [nullid]:
893 if not heads or heads == [nullid]:
883 return []
894 return []
884 # The logic operated on revisions, convert arguments early for convenience
895 # The logic operated on revisions, convert arguments early for convenience
885 new_heads = {rev(n) for n in heads if n != nullid}
896 new_heads = {rev(n) for n in heads if n != nullid}
886 roots = [rev(n) for n in roots]
897 roots = [rev(n) for n in roots]
887 # compute the area we need to remove
898 # compute the area we need to remove
888 affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
899 affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
889 # heads in the area are no longer heads
900 # heads in the area are no longer heads
890 new_heads.difference_update(affected_zone)
901 new_heads.difference_update(affected_zone)
891 # revisions in the area have children outside of it,
902 # revisions in the area have children outside of it,
892 # They might be new heads
903 # They might be new heads
893 candidates = repo.revs(
904 candidates = repo.revs(
894 b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone
905 b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone
895 )
906 )
896 candidates -= affected_zone
907 candidates -= affected_zone
897 if new_heads or candidates:
908 if new_heads or candidates:
898 # remove candidate that are ancestors of other heads
909 # remove candidate that are ancestors of other heads
899 new_heads.update(candidates)
910 new_heads.update(candidates)
900 prunestart = repo.revs(b"parents(%ld) and not null", new_heads)
911 prunestart = repo.revs(b"parents(%ld) and not null", new_heads)
901 pruned = dagop.reachableroots(repo, candidates, prunestart)
912 pruned = dagop.reachableroots(repo, candidates, prunestart)
902 new_heads.difference_update(pruned)
913 new_heads.difference_update(pruned)
903
914
904 return pycompat.maplist(cl.node, sorted(new_heads))
915 return pycompat.maplist(cl.node, sorted(new_heads))
905
916
906
917
907 def newcommitphase(ui):
918 def newcommitphase(ui):
908 """helper to get the target phase of new commit
919 """helper to get the target phase of new commit
909
920
910 Handle all possible values for the phases.new-commit options.
921 Handle all possible values for the phases.new-commit options.
911
922
912 """
923 """
913 v = ui.config(b'phases', b'new-commit')
924 v = ui.config(b'phases', b'new-commit')
914 try:
925 try:
915 return phasenumber2[v]
926 return phasenumber2[v]
916 except KeyError:
927 except KeyError:
917 raise error.ConfigError(
928 raise error.ConfigError(
918 _(b"phases.new-commit: not a valid phase name ('%s')") % v
929 _(b"phases.new-commit: not a valid phase name ('%s')") % v
919 )
930 )
920
931
921
932
922 def hassecret(repo):
933 def hassecret(repo):
923 """utility function that check if a repo have any secret changeset."""
934 """utility function that check if a repo have any secret changeset."""
924 return bool(repo._phasecache.phaseroots[secret])
935 return bool(repo._phasecache.phaseroots[secret])
925
936
926
937
927 def preparehookargs(node, old, new):
938 def preparehookargs(node, old, new):
928 if old is None:
939 if old is None:
929 old = b''
940 old = b''
930 else:
941 else:
931 old = phasenames[old]
942 old = phasenames[old]
932 return {b'node': node, b'oldphase': old, b'phase': phasenames[new]}
943 return {b'node': node, b'oldphase': old, b'phase': phasenames[new]}
General Comments 0
You need to be logged in to leave comments. Login now