##// END OF EJS Templates
phases: convert registernew users to use revision sets...
Joerg Sonnenberger -
r46375:5d65e04b default
parent child Browse files
Show More
@@ -1,730 +1,730 b''
1 # hg.py - hg backend for convert extension
1 # hg.py - hg backend for convert extension
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # Notes for hg->hg conversion:
8 # Notes for hg->hg conversion:
9 #
9 #
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
10 # * Old versions of Mercurial didn't trim the whitespace from the ends
11 # of commit messages, but new versions do. Changesets created by
11 # of commit messages, but new versions do. Changesets created by
12 # those older versions, then converted, may thus have different
12 # those older versions, then converted, may thus have different
13 # hashes for changesets that are otherwise identical.
13 # hashes for changesets that are otherwise identical.
14 #
14 #
15 # * Using "--config convert.hg.saverev=true" will make the source
15 # * Using "--config convert.hg.saverev=true" will make the source
16 # identifier to be stored in the converted revision. This will cause
16 # identifier to be stored in the converted revision. This will cause
17 # the converted revision to have a different identity than the
17 # the converted revision to have a different identity than the
18 # source.
18 # source.
19 from __future__ import absolute_import
19 from __future__ import absolute_import
20
20
21 import os
21 import os
22 import re
22 import re
23 import time
23 import time
24
24
25 from mercurial.i18n import _
25 from mercurial.i18n import _
26 from mercurial.pycompat import open
26 from mercurial.pycompat import open
27 from mercurial import (
27 from mercurial import (
28 bookmarks,
28 bookmarks,
29 context,
29 context,
30 error,
30 error,
31 exchange,
31 exchange,
32 hg,
32 hg,
33 lock as lockmod,
33 lock as lockmod,
34 merge as mergemod,
34 merge as mergemod,
35 node as nodemod,
35 node as nodemod,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 util,
39 util,
40 )
40 )
41 from mercurial.utils import dateutil
41 from mercurial.utils import dateutil
42
42
43 stringio = util.stringio
43 stringio = util.stringio
44
44
45 from . import common
45 from . import common
46
46
47 mapfile = common.mapfile
47 mapfile = common.mapfile
48 NoRepo = common.NoRepo
48 NoRepo = common.NoRepo
49
49
50 sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
50 sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
51
51
52
52
53 class mercurial_sink(common.converter_sink):
53 class mercurial_sink(common.converter_sink):
54 def __init__(self, ui, repotype, path):
54 def __init__(self, ui, repotype, path):
55 common.converter_sink.__init__(self, ui, repotype, path)
55 common.converter_sink.__init__(self, ui, repotype, path)
56 self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames')
56 self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames')
57 self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches')
57 self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches')
58 self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch')
58 self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch')
59 self.lastbranch = None
59 self.lastbranch = None
60 if os.path.isdir(path) and len(os.listdir(path)) > 0:
60 if os.path.isdir(path) and len(os.listdir(path)) > 0:
61 try:
61 try:
62 self.repo = hg.repository(self.ui, path)
62 self.repo = hg.repository(self.ui, path)
63 if not self.repo.local():
63 if not self.repo.local():
64 raise NoRepo(
64 raise NoRepo(
65 _(b'%s is not a local Mercurial repository') % path
65 _(b'%s is not a local Mercurial repository') % path
66 )
66 )
67 except error.RepoError as err:
67 except error.RepoError as err:
68 ui.traceback()
68 ui.traceback()
69 raise NoRepo(err.args[0])
69 raise NoRepo(err.args[0])
70 else:
70 else:
71 try:
71 try:
72 ui.status(_(b'initializing destination %s repository\n') % path)
72 ui.status(_(b'initializing destination %s repository\n') % path)
73 self.repo = hg.repository(self.ui, path, create=True)
73 self.repo = hg.repository(self.ui, path, create=True)
74 if not self.repo.local():
74 if not self.repo.local():
75 raise NoRepo(
75 raise NoRepo(
76 _(b'%s is not a local Mercurial repository') % path
76 _(b'%s is not a local Mercurial repository') % path
77 )
77 )
78 self.created.append(path)
78 self.created.append(path)
79 except error.RepoError:
79 except error.RepoError:
80 ui.traceback()
80 ui.traceback()
81 raise NoRepo(
81 raise NoRepo(
82 _(b"could not create hg repository %s as sink") % path
82 _(b"could not create hg repository %s as sink") % path
83 )
83 )
84 self.lock = None
84 self.lock = None
85 self.wlock = None
85 self.wlock = None
86 self.filemapmode = False
86 self.filemapmode = False
87 self.subrevmaps = {}
87 self.subrevmaps = {}
88
88
89 def before(self):
89 def before(self):
90 self.ui.debug(b'run hg sink pre-conversion action\n')
90 self.ui.debug(b'run hg sink pre-conversion action\n')
91 self.wlock = self.repo.wlock()
91 self.wlock = self.repo.wlock()
92 self.lock = self.repo.lock()
92 self.lock = self.repo.lock()
93
93
94 def after(self):
94 def after(self):
95 self.ui.debug(b'run hg sink post-conversion action\n')
95 self.ui.debug(b'run hg sink post-conversion action\n')
96 if self.lock:
96 if self.lock:
97 self.lock.release()
97 self.lock.release()
98 if self.wlock:
98 if self.wlock:
99 self.wlock.release()
99 self.wlock.release()
100
100
101 def revmapfile(self):
101 def revmapfile(self):
102 return self.repo.vfs.join(b"shamap")
102 return self.repo.vfs.join(b"shamap")
103
103
104 def authorfile(self):
104 def authorfile(self):
105 return self.repo.vfs.join(b"authormap")
105 return self.repo.vfs.join(b"authormap")
106
106
107 def setbranch(self, branch, pbranches):
107 def setbranch(self, branch, pbranches):
108 if not self.clonebranches:
108 if not self.clonebranches:
109 return
109 return
110
110
111 setbranch = branch != self.lastbranch
111 setbranch = branch != self.lastbranch
112 self.lastbranch = branch
112 self.lastbranch = branch
113 if not branch:
113 if not branch:
114 branch = b'default'
114 branch = b'default'
115 pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches]
115 pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches]
116
116
117 branchpath = os.path.join(self.path, branch)
117 branchpath = os.path.join(self.path, branch)
118 if setbranch:
118 if setbranch:
119 self.after()
119 self.after()
120 try:
120 try:
121 self.repo = hg.repository(self.ui, branchpath)
121 self.repo = hg.repository(self.ui, branchpath)
122 except Exception:
122 except Exception:
123 self.repo = hg.repository(self.ui, branchpath, create=True)
123 self.repo = hg.repository(self.ui, branchpath, create=True)
124 self.before()
124 self.before()
125
125
126 # pbranches may bring revisions from other branches (merge parents)
126 # pbranches may bring revisions from other branches (merge parents)
127 # Make sure we have them, or pull them.
127 # Make sure we have them, or pull them.
128 missings = {}
128 missings = {}
129 for b in pbranches:
129 for b in pbranches:
130 try:
130 try:
131 self.repo.lookup(b[0])
131 self.repo.lookup(b[0])
132 except Exception:
132 except Exception:
133 missings.setdefault(b[1], []).append(b[0])
133 missings.setdefault(b[1], []).append(b[0])
134
134
135 if missings:
135 if missings:
136 self.after()
136 self.after()
137 for pbranch, heads in sorted(pycompat.iteritems(missings)):
137 for pbranch, heads in sorted(pycompat.iteritems(missings)):
138 pbranchpath = os.path.join(self.path, pbranch)
138 pbranchpath = os.path.join(self.path, pbranch)
139 prepo = hg.peer(self.ui, {}, pbranchpath)
139 prepo = hg.peer(self.ui, {}, pbranchpath)
140 self.ui.note(
140 self.ui.note(
141 _(b'pulling from %s into %s\n') % (pbranch, branch)
141 _(b'pulling from %s into %s\n') % (pbranch, branch)
142 )
142 )
143 exchange.pull(
143 exchange.pull(
144 self.repo, prepo, [prepo.lookup(h) for h in heads]
144 self.repo, prepo, [prepo.lookup(h) for h in heads]
145 )
145 )
146 self.before()
146 self.before()
147
147
148 def _rewritetags(self, source, revmap, data):
148 def _rewritetags(self, source, revmap, data):
149 fp = stringio()
149 fp = stringio()
150 for line in data.splitlines():
150 for line in data.splitlines():
151 s = line.split(b' ', 1)
151 s = line.split(b' ', 1)
152 if len(s) != 2:
152 if len(s) != 2:
153 self.ui.warn(_(b'invalid tag entry: "%s"\n') % line)
153 self.ui.warn(_(b'invalid tag entry: "%s"\n') % line)
154 fp.write(b'%s\n' % line) # Bogus, but keep for hash stability
154 fp.write(b'%s\n' % line) # Bogus, but keep for hash stability
155 continue
155 continue
156 revid = revmap.get(source.lookuprev(s[0]))
156 revid = revmap.get(source.lookuprev(s[0]))
157 if not revid:
157 if not revid:
158 if s[0] == nodemod.nullhex:
158 if s[0] == nodemod.nullhex:
159 revid = s[0]
159 revid = s[0]
160 else:
160 else:
161 # missing, but keep for hash stability
161 # missing, but keep for hash stability
162 self.ui.warn(_(b'missing tag entry: "%s"\n') % line)
162 self.ui.warn(_(b'missing tag entry: "%s"\n') % line)
163 fp.write(b'%s\n' % line)
163 fp.write(b'%s\n' % line)
164 continue
164 continue
165 fp.write(b'%s %s\n' % (revid, s[1]))
165 fp.write(b'%s %s\n' % (revid, s[1]))
166 return fp.getvalue()
166 return fp.getvalue()
167
167
168 def _rewritesubstate(self, source, data):
168 def _rewritesubstate(self, source, data):
169 fp = stringio()
169 fp = stringio()
170 for line in data.splitlines():
170 for line in data.splitlines():
171 s = line.split(b' ', 1)
171 s = line.split(b' ', 1)
172 if len(s) != 2:
172 if len(s) != 2:
173 continue
173 continue
174
174
175 revid = s[0]
175 revid = s[0]
176 subpath = s[1]
176 subpath = s[1]
177 if revid != nodemod.nullhex:
177 if revid != nodemod.nullhex:
178 revmap = self.subrevmaps.get(subpath)
178 revmap = self.subrevmaps.get(subpath)
179 if revmap is None:
179 if revmap is None:
180 revmap = mapfile(
180 revmap = mapfile(
181 self.ui, self.repo.wjoin(subpath, b'.hg/shamap')
181 self.ui, self.repo.wjoin(subpath, b'.hg/shamap')
182 )
182 )
183 self.subrevmaps[subpath] = revmap
183 self.subrevmaps[subpath] = revmap
184
184
185 # It is reasonable that one or more of the subrepos don't
185 # It is reasonable that one or more of the subrepos don't
186 # need to be converted, in which case they can be cloned
186 # need to be converted, in which case they can be cloned
187 # into place instead of converted. Therefore, only warn
187 # into place instead of converted. Therefore, only warn
188 # once.
188 # once.
189 msg = _(b'no ".hgsubstate" updates will be made for "%s"\n')
189 msg = _(b'no ".hgsubstate" updates will be made for "%s"\n')
190 if len(revmap) == 0:
190 if len(revmap) == 0:
191 sub = self.repo.wvfs.reljoin(subpath, b'.hg')
191 sub = self.repo.wvfs.reljoin(subpath, b'.hg')
192
192
193 if self.repo.wvfs.exists(sub):
193 if self.repo.wvfs.exists(sub):
194 self.ui.warn(msg % subpath)
194 self.ui.warn(msg % subpath)
195
195
196 newid = revmap.get(revid)
196 newid = revmap.get(revid)
197 if not newid:
197 if not newid:
198 if len(revmap) > 0:
198 if len(revmap) > 0:
199 self.ui.warn(
199 self.ui.warn(
200 _(b"%s is missing from %s/.hg/shamap\n")
200 _(b"%s is missing from %s/.hg/shamap\n")
201 % (revid, subpath)
201 % (revid, subpath)
202 )
202 )
203 else:
203 else:
204 revid = newid
204 revid = newid
205
205
206 fp.write(b'%s %s\n' % (revid, subpath))
206 fp.write(b'%s %s\n' % (revid, subpath))
207
207
208 return fp.getvalue()
208 return fp.getvalue()
209
209
210 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
210 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
211 """Calculates the files from p2 that we need to pull in when merging p1
211 """Calculates the files from p2 that we need to pull in when merging p1
212 and p2, given that the merge is coming from the given source.
212 and p2, given that the merge is coming from the given source.
213
213
214 This prevents us from losing files that only exist in the target p2 and
214 This prevents us from losing files that only exist in the target p2 and
215 that don't come from the source repo (like if you're merging multiple
215 that don't come from the source repo (like if you're merging multiple
216 repositories together).
216 repositories together).
217 """
217 """
218 anc = [p1ctx.ancestor(p2ctx)]
218 anc = [p1ctx.ancestor(p2ctx)]
219 # Calculate what files are coming from p2
219 # Calculate what files are coming from p2
220 # TODO: mresult.commitinfo might be able to get that info
220 # TODO: mresult.commitinfo might be able to get that info
221 mresult = mergemod.calculateupdates(
221 mresult = mergemod.calculateupdates(
222 self.repo,
222 self.repo,
223 p1ctx,
223 p1ctx,
224 p2ctx,
224 p2ctx,
225 anc,
225 anc,
226 branchmerge=True,
226 branchmerge=True,
227 force=True,
227 force=True,
228 acceptremote=False,
228 acceptremote=False,
229 followcopies=False,
229 followcopies=False,
230 )
230 )
231
231
232 for file, (action, info, msg) in mresult.filemap():
232 for file, (action, info, msg) in mresult.filemap():
233 if source.targetfilebelongstosource(file):
233 if source.targetfilebelongstosource(file):
234 # If the file belongs to the source repo, ignore the p2
234 # If the file belongs to the source repo, ignore the p2
235 # since it will be covered by the existing fileset.
235 # since it will be covered by the existing fileset.
236 continue
236 continue
237
237
238 # If the file requires actual merging, abort. We don't have enough
238 # If the file requires actual merging, abort. We don't have enough
239 # context to resolve merges correctly.
239 # context to resolve merges correctly.
240 if action in [b'm', b'dm', b'cd', b'dc']:
240 if action in [b'm', b'dm', b'cd', b'dc']:
241 raise error.Abort(
241 raise error.Abort(
242 _(
242 _(
243 b"unable to convert merge commit "
243 b"unable to convert merge commit "
244 b"since target parents do not merge cleanly (file "
244 b"since target parents do not merge cleanly (file "
245 b"%s, parents %s and %s)"
245 b"%s, parents %s and %s)"
246 )
246 )
247 % (file, p1ctx, p2ctx)
247 % (file, p1ctx, p2ctx)
248 )
248 )
249 elif action == b'k':
249 elif action == b'k':
250 # 'keep' means nothing changed from p1
250 # 'keep' means nothing changed from p1
251 continue
251 continue
252 else:
252 else:
253 # Any other change means we want to take the p2 version
253 # Any other change means we want to take the p2 version
254 yield file
254 yield file
255
255
256 def putcommit(
256 def putcommit(
257 self, files, copies, parents, commit, source, revmap, full, cleanp2
257 self, files, copies, parents, commit, source, revmap, full, cleanp2
258 ):
258 ):
259 files = dict(files)
259 files = dict(files)
260
260
261 def getfilectx(repo, memctx, f):
261 def getfilectx(repo, memctx, f):
262 if p2ctx and f in p2files and f not in copies:
262 if p2ctx and f in p2files and f not in copies:
263 self.ui.debug(b'reusing %s from p2\n' % f)
263 self.ui.debug(b'reusing %s from p2\n' % f)
264 try:
264 try:
265 return p2ctx[f]
265 return p2ctx[f]
266 except error.ManifestLookupError:
266 except error.ManifestLookupError:
267 # If the file doesn't exist in p2, then we're syncing a
267 # If the file doesn't exist in p2, then we're syncing a
268 # delete, so just return None.
268 # delete, so just return None.
269 return None
269 return None
270 try:
270 try:
271 v = files[f]
271 v = files[f]
272 except KeyError:
272 except KeyError:
273 return None
273 return None
274 data, mode = source.getfile(f, v)
274 data, mode = source.getfile(f, v)
275 if data is None:
275 if data is None:
276 return None
276 return None
277 if f == b'.hgtags':
277 if f == b'.hgtags':
278 data = self._rewritetags(source, revmap, data)
278 data = self._rewritetags(source, revmap, data)
279 if f == b'.hgsubstate':
279 if f == b'.hgsubstate':
280 data = self._rewritesubstate(source, data)
280 data = self._rewritesubstate(source, data)
281 return context.memfilectx(
281 return context.memfilectx(
282 self.repo,
282 self.repo,
283 memctx,
283 memctx,
284 f,
284 f,
285 data,
285 data,
286 b'l' in mode,
286 b'l' in mode,
287 b'x' in mode,
287 b'x' in mode,
288 copies.get(f),
288 copies.get(f),
289 )
289 )
290
290
291 pl = []
291 pl = []
292 for p in parents:
292 for p in parents:
293 if p not in pl:
293 if p not in pl:
294 pl.append(p)
294 pl.append(p)
295 parents = pl
295 parents = pl
296 nparents = len(parents)
296 nparents = len(parents)
297 if self.filemapmode and nparents == 1:
297 if self.filemapmode and nparents == 1:
298 m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0]
298 m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0]
299 parent = parents[0]
299 parent = parents[0]
300
300
301 if len(parents) < 2:
301 if len(parents) < 2:
302 parents.append(nodemod.nullid)
302 parents.append(nodemod.nullid)
303 if len(parents) < 2:
303 if len(parents) < 2:
304 parents.append(nodemod.nullid)
304 parents.append(nodemod.nullid)
305 p2 = parents.pop(0)
305 p2 = parents.pop(0)
306
306
307 text = commit.desc
307 text = commit.desc
308
308
309 sha1s = re.findall(sha1re, text)
309 sha1s = re.findall(sha1re, text)
310 for sha1 in sha1s:
310 for sha1 in sha1s:
311 oldrev = source.lookuprev(sha1)
311 oldrev = source.lookuprev(sha1)
312 newrev = revmap.get(oldrev)
312 newrev = revmap.get(oldrev)
313 if newrev is not None:
313 if newrev is not None:
314 text = text.replace(sha1, newrev[: len(sha1)])
314 text = text.replace(sha1, newrev[: len(sha1)])
315
315
316 extra = commit.extra.copy()
316 extra = commit.extra.copy()
317
317
318 sourcename = self.repo.ui.config(b'convert', b'hg.sourcename')
318 sourcename = self.repo.ui.config(b'convert', b'hg.sourcename')
319 if sourcename:
319 if sourcename:
320 extra[b'convert_source'] = sourcename
320 extra[b'convert_source'] = sourcename
321
321
322 for label in (
322 for label in (
323 b'source',
323 b'source',
324 b'transplant_source',
324 b'transplant_source',
325 b'rebase_source',
325 b'rebase_source',
326 b'intermediate-source',
326 b'intermediate-source',
327 ):
327 ):
328 node = extra.get(label)
328 node = extra.get(label)
329
329
330 if node is None:
330 if node is None:
331 continue
331 continue
332
332
333 # Only transplant stores its reference in binary
333 # Only transplant stores its reference in binary
334 if label == b'transplant_source':
334 if label == b'transplant_source':
335 node = nodemod.hex(node)
335 node = nodemod.hex(node)
336
336
337 newrev = revmap.get(node)
337 newrev = revmap.get(node)
338 if newrev is not None:
338 if newrev is not None:
339 if label == b'transplant_source':
339 if label == b'transplant_source':
340 newrev = nodemod.bin(newrev)
340 newrev = nodemod.bin(newrev)
341
341
342 extra[label] = newrev
342 extra[label] = newrev
343
343
344 if self.branchnames and commit.branch:
344 if self.branchnames and commit.branch:
345 extra[b'branch'] = commit.branch
345 extra[b'branch'] = commit.branch
346 if commit.rev and commit.saverev:
346 if commit.rev and commit.saverev:
347 extra[b'convert_revision'] = commit.rev
347 extra[b'convert_revision'] = commit.rev
348
348
349 while parents:
349 while parents:
350 p1 = p2
350 p1 = p2
351 p2 = parents.pop(0)
351 p2 = parents.pop(0)
352 p1ctx = self.repo[p1]
352 p1ctx = self.repo[p1]
353 p2ctx = None
353 p2ctx = None
354 if p2 != nodemod.nullid:
354 if p2 != nodemod.nullid:
355 p2ctx = self.repo[p2]
355 p2ctx = self.repo[p2]
356 fileset = set(files)
356 fileset = set(files)
357 if full:
357 if full:
358 fileset.update(self.repo[p1])
358 fileset.update(self.repo[p1])
359 fileset.update(self.repo[p2])
359 fileset.update(self.repo[p2])
360
360
361 if p2ctx:
361 if p2ctx:
362 p2files = set(cleanp2)
362 p2files = set(cleanp2)
363 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
363 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
364 p2files.add(file)
364 p2files.add(file)
365 fileset.add(file)
365 fileset.add(file)
366
366
367 ctx = context.memctx(
367 ctx = context.memctx(
368 self.repo,
368 self.repo,
369 (p1, p2),
369 (p1, p2),
370 text,
370 text,
371 fileset,
371 fileset,
372 getfilectx,
372 getfilectx,
373 commit.author,
373 commit.author,
374 commit.date,
374 commit.date,
375 extra,
375 extra,
376 )
376 )
377
377
378 # We won't know if the conversion changes the node until after the
378 # We won't know if the conversion changes the node until after the
379 # commit, so copy the source's phase for now.
379 # commit, so copy the source's phase for now.
380 self.repo.ui.setconfig(
380 self.repo.ui.setconfig(
381 b'phases',
381 b'phases',
382 b'new-commit',
382 b'new-commit',
383 phases.phasenames[commit.phase],
383 phases.phasenames[commit.phase],
384 b'convert',
384 b'convert',
385 )
385 )
386
386
387 with self.repo.transaction(b"convert") as tr:
387 with self.repo.transaction(b"convert") as tr:
388 if self.repo.ui.config(b'convert', b'hg.preserve-hash'):
388 if self.repo.ui.config(b'convert', b'hg.preserve-hash'):
389 origctx = commit.ctx
389 origctx = commit.ctx
390 else:
390 else:
391 origctx = None
391 origctx = None
392 node = nodemod.hex(self.repo.commitctx(ctx, origctx=origctx))
392 node = nodemod.hex(self.repo.commitctx(ctx, origctx=origctx))
393
393
394 # If the node value has changed, but the phase is lower than
394 # If the node value has changed, but the phase is lower than
395 # draft, set it back to draft since it hasn't been exposed
395 # draft, set it back to draft since it hasn't been exposed
396 # anywhere.
396 # anywhere.
397 if commit.rev != node:
397 if commit.rev != node:
398 ctx = self.repo[node]
398 ctx = self.repo[node]
399 if ctx.phase() < phases.draft:
399 if ctx.phase() < phases.draft:
400 phases.registernew(
400 phases.registernew(
401 self.repo, tr, phases.draft, [ctx.node()]
401 self.repo, tr, phases.draft, [ctx.rev()]
402 )
402 )
403
403
404 text = b"(octopus merge fixup)\n"
404 text = b"(octopus merge fixup)\n"
405 p2 = node
405 p2 = node
406
406
407 if self.filemapmode and nparents == 1:
407 if self.filemapmode and nparents == 1:
408 man = self.repo.manifestlog.getstorage(b'')
408 man = self.repo.manifestlog.getstorage(b'')
409 mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
409 mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
410 closed = b'close' in commit.extra
410 closed = b'close' in commit.extra
411 if not closed and not man.cmp(m1node, man.revision(mnode)):
411 if not closed and not man.cmp(m1node, man.revision(mnode)):
412 self.ui.status(_(b"filtering out empty revision\n"))
412 self.ui.status(_(b"filtering out empty revision\n"))
413 self.repo.rollback(force=True)
413 self.repo.rollback(force=True)
414 return parent
414 return parent
415 return p2
415 return p2
416
416
417 def puttags(self, tags):
417 def puttags(self, tags):
418 tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
418 tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
419 tagparent = tagparent or nodemod.nullid
419 tagparent = tagparent or nodemod.nullid
420
420
421 oldlines = set()
421 oldlines = set()
422 for branch, heads in pycompat.iteritems(self.repo.branchmap()):
422 for branch, heads in pycompat.iteritems(self.repo.branchmap()):
423 for h in heads:
423 for h in heads:
424 if b'.hgtags' in self.repo[h]:
424 if b'.hgtags' in self.repo[h]:
425 oldlines.update(
425 oldlines.update(
426 set(self.repo[h][b'.hgtags'].data().splitlines(True))
426 set(self.repo[h][b'.hgtags'].data().splitlines(True))
427 )
427 )
428 oldlines = sorted(list(oldlines))
428 oldlines = sorted(list(oldlines))
429
429
430 newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags])
430 newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags])
431 if newlines == oldlines:
431 if newlines == oldlines:
432 return None, None
432 return None, None
433
433
434 # if the old and new tags match, then there is nothing to update
434 # if the old and new tags match, then there is nothing to update
435 oldtags = set()
435 oldtags = set()
436 newtags = set()
436 newtags = set()
437 for line in oldlines:
437 for line in oldlines:
438 s = line.strip().split(b' ', 1)
438 s = line.strip().split(b' ', 1)
439 if len(s) != 2:
439 if len(s) != 2:
440 continue
440 continue
441 oldtags.add(s[1])
441 oldtags.add(s[1])
442 for line in newlines:
442 for line in newlines:
443 s = line.strip().split(b' ', 1)
443 s = line.strip().split(b' ', 1)
444 if len(s) != 2:
444 if len(s) != 2:
445 continue
445 continue
446 if s[1] not in oldtags:
446 if s[1] not in oldtags:
447 newtags.add(s[1].strip())
447 newtags.add(s[1].strip())
448
448
449 if not newtags:
449 if not newtags:
450 return None, None
450 return None, None
451
451
452 data = b"".join(newlines)
452 data = b"".join(newlines)
453
453
454 def getfilectx(repo, memctx, f):
454 def getfilectx(repo, memctx, f):
455 return context.memfilectx(repo, memctx, f, data, False, False, None)
455 return context.memfilectx(repo, memctx, f, data, False, False, None)
456
456
457 self.ui.status(_(b"updating tags\n"))
457 self.ui.status(_(b"updating tags\n"))
458 date = b"%d 0" % int(time.mktime(time.gmtime()))
458 date = b"%d 0" % int(time.mktime(time.gmtime()))
459 extra = {b'branch': self.tagsbranch}
459 extra = {b'branch': self.tagsbranch}
460 ctx = context.memctx(
460 ctx = context.memctx(
461 self.repo,
461 self.repo,
462 (tagparent, None),
462 (tagparent, None),
463 b"update tags",
463 b"update tags",
464 [b".hgtags"],
464 [b".hgtags"],
465 getfilectx,
465 getfilectx,
466 b"convert-repo",
466 b"convert-repo",
467 date,
467 date,
468 extra,
468 extra,
469 )
469 )
470 node = self.repo.commitctx(ctx)
470 node = self.repo.commitctx(ctx)
471 return nodemod.hex(node), nodemod.hex(tagparent)
471 return nodemod.hex(node), nodemod.hex(tagparent)
472
472
473 def setfilemapmode(self, active):
473 def setfilemapmode(self, active):
474 self.filemapmode = active
474 self.filemapmode = active
475
475
476 def putbookmarks(self, updatedbookmark):
476 def putbookmarks(self, updatedbookmark):
477 if not len(updatedbookmark):
477 if not len(updatedbookmark):
478 return
478 return
479 wlock = lock = tr = None
479 wlock = lock = tr = None
480 try:
480 try:
481 wlock = self.repo.wlock()
481 wlock = self.repo.wlock()
482 lock = self.repo.lock()
482 lock = self.repo.lock()
483 tr = self.repo.transaction(b'bookmark')
483 tr = self.repo.transaction(b'bookmark')
484 self.ui.status(_(b"updating bookmarks\n"))
484 self.ui.status(_(b"updating bookmarks\n"))
485 destmarks = self.repo._bookmarks
485 destmarks = self.repo._bookmarks
486 changes = [
486 changes = [
487 (bookmark, nodemod.bin(updatedbookmark[bookmark]))
487 (bookmark, nodemod.bin(updatedbookmark[bookmark]))
488 for bookmark in updatedbookmark
488 for bookmark in updatedbookmark
489 ]
489 ]
490 destmarks.applychanges(self.repo, tr, changes)
490 destmarks.applychanges(self.repo, tr, changes)
491 tr.close()
491 tr.close()
492 finally:
492 finally:
493 lockmod.release(lock, wlock, tr)
493 lockmod.release(lock, wlock, tr)
494
494
495 def hascommitfrommap(self, rev):
495 def hascommitfrommap(self, rev):
496 # the exact semantics of clonebranches is unclear so we can't say no
496 # the exact semantics of clonebranches is unclear so we can't say no
497 return rev in self.repo or self.clonebranches
497 return rev in self.repo or self.clonebranches
498
498
499 def hascommitforsplicemap(self, rev):
499 def hascommitforsplicemap(self, rev):
500 if rev not in self.repo and self.clonebranches:
500 if rev not in self.repo and self.clonebranches:
501 raise error.Abort(
501 raise error.Abort(
502 _(
502 _(
503 b'revision %s not found in destination '
503 b'revision %s not found in destination '
504 b'repository (lookups with clonebranches=true '
504 b'repository (lookups with clonebranches=true '
505 b'are not implemented)'
505 b'are not implemented)'
506 )
506 )
507 % rev
507 % rev
508 )
508 )
509 return rev in self.repo
509 return rev in self.repo
510
510
511
511
512 class mercurial_source(common.converter_source):
512 class mercurial_source(common.converter_source):
513 def __init__(self, ui, repotype, path, revs=None):
513 def __init__(self, ui, repotype, path, revs=None):
514 common.converter_source.__init__(self, ui, repotype, path, revs)
514 common.converter_source.__init__(self, ui, repotype, path, revs)
515 self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors')
515 self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors')
516 self.ignored = set()
516 self.ignored = set()
517 self.saverev = ui.configbool(b'convert', b'hg.saverev')
517 self.saverev = ui.configbool(b'convert', b'hg.saverev')
518 try:
518 try:
519 self.repo = hg.repository(self.ui, path)
519 self.repo = hg.repository(self.ui, path)
520 # try to provoke an exception if this isn't really a hg
520 # try to provoke an exception if this isn't really a hg
521 # repo, but some other bogus compatible-looking url
521 # repo, but some other bogus compatible-looking url
522 if not self.repo.local():
522 if not self.repo.local():
523 raise error.RepoError
523 raise error.RepoError
524 except error.RepoError:
524 except error.RepoError:
525 ui.traceback()
525 ui.traceback()
526 raise NoRepo(_(b"%s is not a local Mercurial repository") % path)
526 raise NoRepo(_(b"%s is not a local Mercurial repository") % path)
527 self.lastrev = None
527 self.lastrev = None
528 self.lastctx = None
528 self.lastctx = None
529 self._changescache = None, None
529 self._changescache = None, None
530 self.convertfp = None
530 self.convertfp = None
531 # Restrict converted revisions to startrev descendants
531 # Restrict converted revisions to startrev descendants
532 startnode = ui.config(b'convert', b'hg.startrev')
532 startnode = ui.config(b'convert', b'hg.startrev')
533 hgrevs = ui.config(b'convert', b'hg.revs')
533 hgrevs = ui.config(b'convert', b'hg.revs')
534 if hgrevs is None:
534 if hgrevs is None:
535 if startnode is not None:
535 if startnode is not None:
536 try:
536 try:
537 startnode = self.repo.lookup(startnode)
537 startnode = self.repo.lookup(startnode)
538 except error.RepoError:
538 except error.RepoError:
539 raise error.Abort(
539 raise error.Abort(
540 _(b'%s is not a valid start revision') % startnode
540 _(b'%s is not a valid start revision') % startnode
541 )
541 )
542 startrev = self.repo.changelog.rev(startnode)
542 startrev = self.repo.changelog.rev(startnode)
543 children = {startnode: 1}
543 children = {startnode: 1}
544 for r in self.repo.changelog.descendants([startrev]):
544 for r in self.repo.changelog.descendants([startrev]):
545 children[self.repo.changelog.node(r)] = 1
545 children[self.repo.changelog.node(r)] = 1
546 self.keep = children.__contains__
546 self.keep = children.__contains__
547 else:
547 else:
548 self.keep = util.always
548 self.keep = util.always
549 if revs:
549 if revs:
550 self._heads = [self.repo.lookup(r) for r in revs]
550 self._heads = [self.repo.lookup(r) for r in revs]
551 else:
551 else:
552 self._heads = self.repo.heads()
552 self._heads = self.repo.heads()
553 else:
553 else:
554 if revs or startnode is not None:
554 if revs or startnode is not None:
555 raise error.Abort(
555 raise error.Abort(
556 _(
556 _(
557 b'hg.revs cannot be combined with '
557 b'hg.revs cannot be combined with '
558 b'hg.startrev or --rev'
558 b'hg.startrev or --rev'
559 )
559 )
560 )
560 )
561 nodes = set()
561 nodes = set()
562 parents = set()
562 parents = set()
563 for r in scmutil.revrange(self.repo, [hgrevs]):
563 for r in scmutil.revrange(self.repo, [hgrevs]):
564 ctx = self.repo[r]
564 ctx = self.repo[r]
565 nodes.add(ctx.node())
565 nodes.add(ctx.node())
566 parents.update(p.node() for p in ctx.parents())
566 parents.update(p.node() for p in ctx.parents())
567 self.keep = nodes.__contains__
567 self.keep = nodes.__contains__
568 self._heads = nodes - parents
568 self._heads = nodes - parents
569
569
570 def _changectx(self, rev):
570 def _changectx(self, rev):
571 if self.lastrev != rev:
571 if self.lastrev != rev:
572 self.lastctx = self.repo[rev]
572 self.lastctx = self.repo[rev]
573 self.lastrev = rev
573 self.lastrev = rev
574 return self.lastctx
574 return self.lastctx
575
575
576 def _parents(self, ctx):
576 def _parents(self, ctx):
577 return [p for p in ctx.parents() if p and self.keep(p.node())]
577 return [p for p in ctx.parents() if p and self.keep(p.node())]
578
578
579 def getheads(self):
579 def getheads(self):
580 return [nodemod.hex(h) for h in self._heads if self.keep(h)]
580 return [nodemod.hex(h) for h in self._heads if self.keep(h)]
581
581
582 def getfile(self, name, rev):
582 def getfile(self, name, rev):
583 try:
583 try:
584 fctx = self._changectx(rev)[name]
584 fctx = self._changectx(rev)[name]
585 return fctx.data(), fctx.flags()
585 return fctx.data(), fctx.flags()
586 except error.LookupError:
586 except error.LookupError:
587 return None, None
587 return None, None
588
588
589 def _changedfiles(self, ctx1, ctx2):
589 def _changedfiles(self, ctx1, ctx2):
590 ma, r = [], []
590 ma, r = [], []
591 maappend = ma.append
591 maappend = ma.append
592 rappend = r.append
592 rappend = r.append
593 d = ctx1.manifest().diff(ctx2.manifest())
593 d = ctx1.manifest().diff(ctx2.manifest())
594 for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d):
594 for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d):
595 if node2 is None:
595 if node2 is None:
596 rappend(f)
596 rappend(f)
597 else:
597 else:
598 maappend(f)
598 maappend(f)
599 return ma, r
599 return ma, r
600
600
601 def getchanges(self, rev, full):
601 def getchanges(self, rev, full):
602 ctx = self._changectx(rev)
602 ctx = self._changectx(rev)
603 parents = self._parents(ctx)
603 parents = self._parents(ctx)
604 if full or not parents:
604 if full or not parents:
605 files = copyfiles = ctx.manifest()
605 files = copyfiles = ctx.manifest()
606 if parents:
606 if parents:
607 if self._changescache[0] == rev:
607 if self._changescache[0] == rev:
608 ma, r = self._changescache[1]
608 ma, r = self._changescache[1]
609 else:
609 else:
610 ma, r = self._changedfiles(parents[0], ctx)
610 ma, r = self._changedfiles(parents[0], ctx)
611 if not full:
611 if not full:
612 files = ma + r
612 files = ma + r
613 copyfiles = ma
613 copyfiles = ma
614 # _getcopies() is also run for roots and before filtering so missing
614 # _getcopies() is also run for roots and before filtering so missing
615 # revlogs are detected early
615 # revlogs are detected early
616 copies = self._getcopies(ctx, parents, copyfiles)
616 copies = self._getcopies(ctx, parents, copyfiles)
617 cleanp2 = set()
617 cleanp2 = set()
618 if len(parents) == 2:
618 if len(parents) == 2:
619 d = parents[1].manifest().diff(ctx.manifest(), clean=True)
619 d = parents[1].manifest().diff(ctx.manifest(), clean=True)
620 for f, value in pycompat.iteritems(d):
620 for f, value in pycompat.iteritems(d):
621 if value is None:
621 if value is None:
622 cleanp2.add(f)
622 cleanp2.add(f)
623 changes = [(f, rev) for f in files if f not in self.ignored]
623 changes = [(f, rev) for f in files if f not in self.ignored]
624 changes.sort()
624 changes.sort()
625 return changes, copies, cleanp2
625 return changes, copies, cleanp2
626
626
627 def _getcopies(self, ctx, parents, files):
627 def _getcopies(self, ctx, parents, files):
628 copies = {}
628 copies = {}
629 for name in files:
629 for name in files:
630 if name in self.ignored:
630 if name in self.ignored:
631 continue
631 continue
632 try:
632 try:
633 copysource = ctx.filectx(name).copysource()
633 copysource = ctx.filectx(name).copysource()
634 if copysource in self.ignored:
634 if copysource in self.ignored:
635 continue
635 continue
636 # Ignore copy sources not in parent revisions
636 # Ignore copy sources not in parent revisions
637 if not any(copysource in p for p in parents):
637 if not any(copysource in p for p in parents):
638 continue
638 continue
639 copies[name] = copysource
639 copies[name] = copysource
640 except TypeError:
640 except TypeError:
641 pass
641 pass
642 except error.LookupError as e:
642 except error.LookupError as e:
643 if not self.ignoreerrors:
643 if not self.ignoreerrors:
644 raise
644 raise
645 self.ignored.add(name)
645 self.ignored.add(name)
646 self.ui.warn(_(b'ignoring: %s\n') % e)
646 self.ui.warn(_(b'ignoring: %s\n') % e)
647 return copies
647 return copies
648
648
649 def getcommit(self, rev):
649 def getcommit(self, rev):
650 ctx = self._changectx(rev)
650 ctx = self._changectx(rev)
651 _parents = self._parents(ctx)
651 _parents = self._parents(ctx)
652 parents = [p.hex() for p in _parents]
652 parents = [p.hex() for p in _parents]
653 optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
653 optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
654 crev = rev
654 crev = rev
655
655
656 return common.commit(
656 return common.commit(
657 author=ctx.user(),
657 author=ctx.user(),
658 date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'),
658 date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'),
659 desc=ctx.description(),
659 desc=ctx.description(),
660 rev=crev,
660 rev=crev,
661 parents=parents,
661 parents=parents,
662 optparents=optparents,
662 optparents=optparents,
663 branch=ctx.branch(),
663 branch=ctx.branch(),
664 extra=ctx.extra(),
664 extra=ctx.extra(),
665 sortkey=ctx.rev(),
665 sortkey=ctx.rev(),
666 saverev=self.saverev,
666 saverev=self.saverev,
667 phase=ctx.phase(),
667 phase=ctx.phase(),
668 ctx=ctx,
668 ctx=ctx,
669 )
669 )
670
670
671 def numcommits(self):
671 def numcommits(self):
672 return len(self.repo)
672 return len(self.repo)
673
673
674 def gettags(self):
674 def gettags(self):
675 # This will get written to .hgtags, filter non global tags out.
675 # This will get written to .hgtags, filter non global tags out.
676 tags = [
676 tags = [
677 t
677 t
678 for t in self.repo.tagslist()
678 for t in self.repo.tagslist()
679 if self.repo.tagtype(t[0]) == b'global'
679 if self.repo.tagtype(t[0]) == b'global'
680 ]
680 ]
681 return {
681 return {
682 name: nodemod.hex(node) for name, node in tags if self.keep(node)
682 name: nodemod.hex(node) for name, node in tags if self.keep(node)
683 }
683 }
684
684
685 def getchangedfiles(self, rev, i):
685 def getchangedfiles(self, rev, i):
686 ctx = self._changectx(rev)
686 ctx = self._changectx(rev)
687 parents = self._parents(ctx)
687 parents = self._parents(ctx)
688 if not parents and i is None:
688 if not parents and i is None:
689 i = 0
689 i = 0
690 ma, r = ctx.manifest().keys(), []
690 ma, r = ctx.manifest().keys(), []
691 else:
691 else:
692 i = i or 0
692 i = i or 0
693 ma, r = self._changedfiles(parents[i], ctx)
693 ma, r = self._changedfiles(parents[i], ctx)
694 ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)]
694 ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)]
695
695
696 if i == 0:
696 if i == 0:
697 self._changescache = (rev, (ma, r))
697 self._changescache = (rev, (ma, r))
698
698
699 return ma + r
699 return ma + r
700
700
701 def converted(self, rev, destrev):
701 def converted(self, rev, destrev):
702 if self.convertfp is None:
702 if self.convertfp is None:
703 self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab')
703 self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab')
704 self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev)))
704 self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev)))
705 self.convertfp.flush()
705 self.convertfp.flush()
706
706
707 def before(self):
707 def before(self):
708 self.ui.debug(b'run hg source pre-conversion action\n')
708 self.ui.debug(b'run hg source pre-conversion action\n')
709
709
710 def after(self):
710 def after(self):
711 self.ui.debug(b'run hg source post-conversion action\n')
711 self.ui.debug(b'run hg source post-conversion action\n')
712
712
713 def hasnativeorder(self):
713 def hasnativeorder(self):
714 return True
714 return True
715
715
716 def hasnativeclose(self):
716 def hasnativeclose(self):
717 return True
717 return True
718
718
719 def lookuprev(self, rev):
719 def lookuprev(self, rev):
720 try:
720 try:
721 return nodemod.hex(self.repo.lookup(rev))
721 return nodemod.hex(self.repo.lookup(rev))
722 except (error.RepoError, error.LookupError):
722 except (error.RepoError, error.LookupError):
723 return None
723 return None
724
724
725 def getbookmarks(self):
725 def getbookmarks(self):
726 return bookmarks.listbookmarks(self.repo)
726 return bookmarks.listbookmarks(self.repo)
727
727
728 def checkrevformat(self, revstr, mapname=b'splicemap'):
728 def checkrevformat(self, revstr, mapname=b'splicemap'):
729 """ Mercurial, revision string is a 40 byte hex """
729 """ Mercurial, revision string is a 40 byte hex """
730 self.checkhexformat(revstr, mapname)
730 self.checkhexformat(revstr, mapname)
@@ -1,1703 +1,1703 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import open
21 from .pycompat import open
22
22
23 from . import (
23 from . import (
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 requirements,
29 requirements,
30 scmutil,
30 scmutil,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import repository
34 from .interfaces import repository
35
35
36 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
36 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
37 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
37 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
38 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
38 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
39
39
40 LFS_REQUIREMENT = b'lfs'
40 LFS_REQUIREMENT = b'lfs'
41
41
42 readexactly = util.readexactly
42 readexactly = util.readexactly
43
43
44
44
45 def getchunk(stream):
45 def getchunk(stream):
46 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
47 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
48 l = struct.unpack(b">l", d)[0]
48 l = struct.unpack(b">l", d)[0]
49 if l <= 4:
49 if l <= 4:
50 if l:
50 if l:
51 raise error.Abort(_(b"invalid chunk length %d") % l)
51 raise error.Abort(_(b"invalid chunk length %d") % l)
52 return b""
52 return b""
53 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
54
54
55
55
56 def chunkheader(length):
56 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
57 """return a changegroup chunk header (string)"""
58 return struct.pack(b">l", length + 4)
58 return struct.pack(b">l", length + 4)
59
59
60
60
61 def closechunk():
61 def closechunk():
62 """return a changegroup chunk header (string) for a zero-length chunk"""
62 """return a changegroup chunk header (string) for a zero-length chunk"""
63 return struct.pack(b">l", 0)
63 return struct.pack(b">l", 0)
64
64
65
65
66 def _fileheader(path):
66 def _fileheader(path):
67 """Obtain a changegroup chunk header for a named path."""
67 """Obtain a changegroup chunk header for a named path."""
68 return chunkheader(len(path)) + path
68 return chunkheader(len(path)) + path
69
69
70
70
71 def writechunks(ui, chunks, filename, vfs=None):
71 def writechunks(ui, chunks, filename, vfs=None):
72 """Write chunks to a file and return its filename.
72 """Write chunks to a file and return its filename.
73
73
74 The stream is assumed to be a bundle file.
74 The stream is assumed to be a bundle file.
75 Existing files will not be overwritten.
75 Existing files will not be overwritten.
76 If no filename is specified, a temporary file is created.
76 If no filename is specified, a temporary file is created.
77 """
77 """
78 fh = None
78 fh = None
79 cleanup = None
79 cleanup = None
80 try:
80 try:
81 if filename:
81 if filename:
82 if vfs:
82 if vfs:
83 fh = vfs.open(filename, b"wb")
83 fh = vfs.open(filename, b"wb")
84 else:
84 else:
85 # Increase default buffer size because default is usually
85 # Increase default buffer size because default is usually
86 # small (4k is common on Linux).
86 # small (4k is common on Linux).
87 fh = open(filename, b"wb", 131072)
87 fh = open(filename, b"wb", 131072)
88 else:
88 else:
89 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
89 fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg")
90 fh = os.fdopen(fd, "wb")
90 fh = os.fdopen(fd, "wb")
91 cleanup = filename
91 cleanup = filename
92 for c in chunks:
92 for c in chunks:
93 fh.write(c)
93 fh.write(c)
94 cleanup = None
94 cleanup = None
95 return filename
95 return filename
96 finally:
96 finally:
97 if fh is not None:
97 if fh is not None:
98 fh.close()
98 fh.close()
99 if cleanup is not None:
99 if cleanup is not None:
100 if filename and vfs:
100 if filename and vfs:
101 vfs.unlink(cleanup)
101 vfs.unlink(cleanup)
102 else:
102 else:
103 os.unlink(cleanup)
103 os.unlink(cleanup)
104
104
105
105
106 class cg1unpacker(object):
106 class cg1unpacker(object):
107 """Unpacker for cg1 changegroup streams.
107 """Unpacker for cg1 changegroup streams.
108
108
109 A changegroup unpacker handles the framing of the revision data in
109 A changegroup unpacker handles the framing of the revision data in
110 the wire format. Most consumers will want to use the apply()
110 the wire format. Most consumers will want to use the apply()
111 method to add the changes from the changegroup to a repository.
111 method to add the changes from the changegroup to a repository.
112
112
113 If you're forwarding a changegroup unmodified to another consumer,
113 If you're forwarding a changegroup unmodified to another consumer,
114 use getchunks(), which returns an iterator of changegroup
114 use getchunks(), which returns an iterator of changegroup
115 chunks. This is mostly useful for cases where you need to know the
115 chunks. This is mostly useful for cases where you need to know the
116 data stream has ended by observing the end of the changegroup.
116 data stream has ended by observing the end of the changegroup.
117
117
118 deltachunk() is useful only if you're applying delta data. Most
118 deltachunk() is useful only if you're applying delta data. Most
119 consumers should prefer apply() instead.
119 consumers should prefer apply() instead.
120
120
121 A few other public methods exist. Those are used only for
121 A few other public methods exist. Those are used only for
122 bundlerepo and some debug commands - their use is discouraged.
122 bundlerepo and some debug commands - their use is discouraged.
123 """
123 """
124
124
125 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
125 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
126 deltaheadersize = deltaheader.size
126 deltaheadersize = deltaheader.size
127 version = b'01'
127 version = b'01'
128 _grouplistcount = 1 # One list of files after the manifests
128 _grouplistcount = 1 # One list of files after the manifests
129
129
130 def __init__(self, fh, alg, extras=None):
130 def __init__(self, fh, alg, extras=None):
131 if alg is None:
131 if alg is None:
132 alg = b'UN'
132 alg = b'UN'
133 if alg not in util.compengines.supportedbundletypes:
133 if alg not in util.compengines.supportedbundletypes:
134 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
134 raise error.Abort(_(b'unknown stream compression type: %s') % alg)
135 if alg == b'BZ':
135 if alg == b'BZ':
136 alg = b'_truncatedBZ'
136 alg = b'_truncatedBZ'
137
137
138 compengine = util.compengines.forbundletype(alg)
138 compengine = util.compengines.forbundletype(alg)
139 self._stream = compengine.decompressorreader(fh)
139 self._stream = compengine.decompressorreader(fh)
140 self._type = alg
140 self._type = alg
141 self.extras = extras or {}
141 self.extras = extras or {}
142 self.callback = None
142 self.callback = None
143
143
144 # These methods (compressed, read, seek, tell) all appear to only
144 # These methods (compressed, read, seek, tell) all appear to only
145 # be used by bundlerepo, but it's a little hard to tell.
145 # be used by bundlerepo, but it's a little hard to tell.
146 def compressed(self):
146 def compressed(self):
147 return self._type is not None and self._type != b'UN'
147 return self._type is not None and self._type != b'UN'
148
148
149 def read(self, l):
149 def read(self, l):
150 return self._stream.read(l)
150 return self._stream.read(l)
151
151
152 def seek(self, pos):
152 def seek(self, pos):
153 return self._stream.seek(pos)
153 return self._stream.seek(pos)
154
154
155 def tell(self):
155 def tell(self):
156 return self._stream.tell()
156 return self._stream.tell()
157
157
158 def close(self):
158 def close(self):
159 return self._stream.close()
159 return self._stream.close()
160
160
161 def _chunklength(self):
161 def _chunklength(self):
162 d = readexactly(self._stream, 4)
162 d = readexactly(self._stream, 4)
163 l = struct.unpack(b">l", d)[0]
163 l = struct.unpack(b">l", d)[0]
164 if l <= 4:
164 if l <= 4:
165 if l:
165 if l:
166 raise error.Abort(_(b"invalid chunk length %d") % l)
166 raise error.Abort(_(b"invalid chunk length %d") % l)
167 return 0
167 return 0
168 if self.callback:
168 if self.callback:
169 self.callback()
169 self.callback()
170 return l - 4
170 return l - 4
171
171
172 def changelogheader(self):
172 def changelogheader(self):
173 """v10 does not have a changelog header chunk"""
173 """v10 does not have a changelog header chunk"""
174 return {}
174 return {}
175
175
176 def manifestheader(self):
176 def manifestheader(self):
177 """v10 does not have a manifest header chunk"""
177 """v10 does not have a manifest header chunk"""
178 return {}
178 return {}
179
179
180 def filelogheader(self):
180 def filelogheader(self):
181 """return the header of the filelogs chunk, v10 only has the filename"""
181 """return the header of the filelogs chunk, v10 only has the filename"""
182 l = self._chunklength()
182 l = self._chunklength()
183 if not l:
183 if not l:
184 return {}
184 return {}
185 fname = readexactly(self._stream, l)
185 fname = readexactly(self._stream, l)
186 return {b'filename': fname}
186 return {b'filename': fname}
187
187
188 def _deltaheader(self, headertuple, prevnode):
188 def _deltaheader(self, headertuple, prevnode):
189 node, p1, p2, cs = headertuple
189 node, p1, p2, cs = headertuple
190 if prevnode is None:
190 if prevnode is None:
191 deltabase = p1
191 deltabase = p1
192 else:
192 else:
193 deltabase = prevnode
193 deltabase = prevnode
194 flags = 0
194 flags = 0
195 return node, p1, p2, deltabase, cs, flags
195 return node, p1, p2, deltabase, cs, flags
196
196
197 def deltachunk(self, prevnode):
197 def deltachunk(self, prevnode):
198 l = self._chunklength()
198 l = self._chunklength()
199 if not l:
199 if not l:
200 return {}
200 return {}
201 headerdata = readexactly(self._stream, self.deltaheadersize)
201 headerdata = readexactly(self._stream, self.deltaheadersize)
202 header = self.deltaheader.unpack(headerdata)
202 header = self.deltaheader.unpack(headerdata)
203 delta = readexactly(self._stream, l - self.deltaheadersize)
203 delta = readexactly(self._stream, l - self.deltaheadersize)
204 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
204 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
205 return (node, p1, p2, cs, deltabase, delta, flags)
205 return (node, p1, p2, cs, deltabase, delta, flags)
206
206
207 def getchunks(self):
207 def getchunks(self):
208 """returns all the chunks contains in the bundle
208 """returns all the chunks contains in the bundle
209
209
210 Used when you need to forward the binary stream to a file or another
210 Used when you need to forward the binary stream to a file or another
211 network API. To do so, it parse the changegroup data, otherwise it will
211 network API. To do so, it parse the changegroup data, otherwise it will
212 block in case of sshrepo because it don't know the end of the stream.
212 block in case of sshrepo because it don't know the end of the stream.
213 """
213 """
214 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
214 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
215 # and a list of filelogs. For changegroup 3, we expect 4 parts:
215 # and a list of filelogs. For changegroup 3, we expect 4 parts:
216 # changelog, manifestlog, a list of tree manifestlogs, and a list of
216 # changelog, manifestlog, a list of tree manifestlogs, and a list of
217 # filelogs.
217 # filelogs.
218 #
218 #
219 # Changelog and manifestlog parts are terminated with empty chunks. The
219 # Changelog and manifestlog parts are terminated with empty chunks. The
220 # tree and file parts are a list of entry sections. Each entry section
220 # tree and file parts are a list of entry sections. Each entry section
221 # is a series of chunks terminating in an empty chunk. The list of these
221 # is a series of chunks terminating in an empty chunk. The list of these
222 # entry sections is terminated in yet another empty chunk, so we know
222 # entry sections is terminated in yet another empty chunk, so we know
223 # we've reached the end of the tree/file list when we reach an empty
223 # we've reached the end of the tree/file list when we reach an empty
224 # chunk that was proceeded by no non-empty chunks.
224 # chunk that was proceeded by no non-empty chunks.
225
225
226 parts = 0
226 parts = 0
227 while parts < 2 + self._grouplistcount:
227 while parts < 2 + self._grouplistcount:
228 noentries = True
228 noentries = True
229 while True:
229 while True:
230 chunk = getchunk(self)
230 chunk = getchunk(self)
231 if not chunk:
231 if not chunk:
232 # The first two empty chunks represent the end of the
232 # The first two empty chunks represent the end of the
233 # changelog and the manifestlog portions. The remaining
233 # changelog and the manifestlog portions. The remaining
234 # empty chunks represent either A) the end of individual
234 # empty chunks represent either A) the end of individual
235 # tree or file entries in the file list, or B) the end of
235 # tree or file entries in the file list, or B) the end of
236 # the entire list. It's the end of the entire list if there
236 # the entire list. It's the end of the entire list if there
237 # were no entries (i.e. noentries is True).
237 # were no entries (i.e. noentries is True).
238 if parts < 2:
238 if parts < 2:
239 parts += 1
239 parts += 1
240 elif noentries:
240 elif noentries:
241 parts += 1
241 parts += 1
242 break
242 break
243 noentries = False
243 noentries = False
244 yield chunkheader(len(chunk))
244 yield chunkheader(len(chunk))
245 pos = 0
245 pos = 0
246 while pos < len(chunk):
246 while pos < len(chunk):
247 next = pos + 2 ** 20
247 next = pos + 2 ** 20
248 yield chunk[pos:next]
248 yield chunk[pos:next]
249 pos = next
249 pos = next
250 yield closechunk()
250 yield closechunk()
251
251
252 def _unpackmanifests(self, repo, revmap, trp, prog):
252 def _unpackmanifests(self, repo, revmap, trp, prog):
253 self.callback = prog.increment
253 self.callback = prog.increment
254 # no need to check for empty manifest group here:
254 # no need to check for empty manifest group here:
255 # if the result of the merge of 1 and 2 is the same in 3 and 4,
255 # if the result of the merge of 1 and 2 is the same in 3 and 4,
256 # no new manifest will be created and the manifest group will
256 # no new manifest will be created and the manifest group will
257 # be empty during the pull
257 # be empty during the pull
258 self.manifestheader()
258 self.manifestheader()
259 deltas = self.deltaiter()
259 deltas = self.deltaiter()
260 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
260 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
261 prog.complete()
261 prog.complete()
262 self.callback = None
262 self.callback = None
263
263
264 def apply(
264 def apply(
265 self,
265 self,
266 repo,
266 repo,
267 tr,
267 tr,
268 srctype,
268 srctype,
269 url,
269 url,
270 targetphase=phases.draft,
270 targetphase=phases.draft,
271 expectedtotal=None,
271 expectedtotal=None,
272 ):
272 ):
273 """Add the changegroup returned by source.read() to this repo.
273 """Add the changegroup returned by source.read() to this repo.
274 srctype is a string like 'push', 'pull', or 'unbundle'. url is
274 srctype is a string like 'push', 'pull', or 'unbundle'. url is
275 the URL of the repo where this changegroup is coming from.
275 the URL of the repo where this changegroup is coming from.
276
276
277 Return an integer summarizing the change to this repo:
277 Return an integer summarizing the change to this repo:
278 - nothing changed or no source: 0
278 - nothing changed or no source: 0
279 - more heads than before: 1+added heads (2..n)
279 - more heads than before: 1+added heads (2..n)
280 - fewer heads than before: -1-removed heads (-2..-n)
280 - fewer heads than before: -1-removed heads (-2..-n)
281 - number of heads stays the same: 1
281 - number of heads stays the same: 1
282 """
282 """
283 repo = repo.unfiltered()
283 repo = repo.unfiltered()
284
284
285 def csmap(x):
285 def csmap(x):
286 repo.ui.debug(b"add changeset %s\n" % short(x))
286 repo.ui.debug(b"add changeset %s\n" % short(x))
287 return len(cl)
287 return len(cl)
288
288
289 def revmap(x):
289 def revmap(x):
290 return cl.rev(x)
290 return cl.rev(x)
291
291
292 try:
292 try:
293 # The transaction may already carry source information. In this
293 # The transaction may already carry source information. In this
294 # case we use the top level data. We overwrite the argument
294 # case we use the top level data. We overwrite the argument
295 # because we need to use the top level value (if they exist)
295 # because we need to use the top level value (if they exist)
296 # in this function.
296 # in this function.
297 srctype = tr.hookargs.setdefault(b'source', srctype)
297 srctype = tr.hookargs.setdefault(b'source', srctype)
298 tr.hookargs.setdefault(b'url', url)
298 tr.hookargs.setdefault(b'url', url)
299 repo.hook(
299 repo.hook(
300 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
300 b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)
301 )
301 )
302
302
303 # write changelog data to temp files so concurrent readers
303 # write changelog data to temp files so concurrent readers
304 # will not see an inconsistent view
304 # will not see an inconsistent view
305 cl = repo.changelog
305 cl = repo.changelog
306 cl.delayupdate(tr)
306 cl.delayupdate(tr)
307 oldheads = set(cl.heads())
307 oldheads = set(cl.heads())
308
308
309 trp = weakref.proxy(tr)
309 trp = weakref.proxy(tr)
310 # pull off the changeset group
310 # pull off the changeset group
311 repo.ui.status(_(b"adding changesets\n"))
311 repo.ui.status(_(b"adding changesets\n"))
312 clstart = len(cl)
312 clstart = len(cl)
313 progress = repo.ui.makeprogress(
313 progress = repo.ui.makeprogress(
314 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
314 _(b'changesets'), unit=_(b'chunks'), total=expectedtotal
315 )
315 )
316 self.callback = progress.increment
316 self.callback = progress.increment
317
317
318 efilesset = set()
318 efilesset = set()
319 cgnodes = []
319 cgnodes = []
320
320
321 def ondupchangelog(cl, node):
321 def ondupchangelog(cl, node):
322 if cl.rev(node) < clstart:
322 if cl.rev(node) < clstart:
323 cgnodes.append(node)
323 cgnodes.append(node)
324
324
325 def onchangelog(cl, node):
325 def onchangelog(cl, node):
326 efilesset.update(cl.readfiles(node))
326 efilesset.update(cl.readfiles(node))
327
327
328 self.changelogheader()
328 self.changelogheader()
329 deltas = self.deltaiter()
329 deltas = self.deltaiter()
330 if not cl.addgroup(
330 if not cl.addgroup(
331 deltas,
331 deltas,
332 csmap,
332 csmap,
333 trp,
333 trp,
334 addrevisioncb=onchangelog,
334 addrevisioncb=onchangelog,
335 duplicaterevisioncb=ondupchangelog,
335 duplicaterevisioncb=ondupchangelog,
336 ):
336 ):
337 repo.ui.develwarn(
337 repo.ui.develwarn(
338 b'applied empty changelog from changegroup',
338 b'applied empty changelog from changegroup',
339 config=b'warn-empty-changegroup',
339 config=b'warn-empty-changegroup',
340 )
340 )
341 efiles = len(efilesset)
341 efiles = len(efilesset)
342 clend = len(cl)
342 clend = len(cl)
343 changesets = clend - clstart
343 changesets = clend - clstart
344 progress.complete()
344 progress.complete()
345 del deltas
345 del deltas
346 # TODO Python 2.7 removal
346 # TODO Python 2.7 removal
347 # del efilesset
347 # del efilesset
348 efilesset = None
348 efilesset = None
349 self.callback = None
349 self.callback = None
350
350
351 # pull off the manifest group
351 # pull off the manifest group
352 repo.ui.status(_(b"adding manifests\n"))
352 repo.ui.status(_(b"adding manifests\n"))
353 # We know that we'll never have more manifests than we had
353 # We know that we'll never have more manifests than we had
354 # changesets.
354 # changesets.
355 progress = repo.ui.makeprogress(
355 progress = repo.ui.makeprogress(
356 _(b'manifests'), unit=_(b'chunks'), total=changesets
356 _(b'manifests'), unit=_(b'chunks'), total=changesets
357 )
357 )
358 self._unpackmanifests(repo, revmap, trp, progress)
358 self._unpackmanifests(repo, revmap, trp, progress)
359
359
360 needfiles = {}
360 needfiles = {}
361 if repo.ui.configbool(b'server', b'validate'):
361 if repo.ui.configbool(b'server', b'validate'):
362 cl = repo.changelog
362 cl = repo.changelog
363 ml = repo.manifestlog
363 ml = repo.manifestlog
364 # validate incoming csets have their manifests
364 # validate incoming csets have their manifests
365 for cset in pycompat.xrange(clstart, clend):
365 for cset in pycompat.xrange(clstart, clend):
366 mfnode = cl.changelogrevision(cset).manifest
366 mfnode = cl.changelogrevision(cset).manifest
367 mfest = ml[mfnode].readdelta()
367 mfest = ml[mfnode].readdelta()
368 # store file nodes we must see
368 # store file nodes we must see
369 for f, n in pycompat.iteritems(mfest):
369 for f, n in pycompat.iteritems(mfest):
370 needfiles.setdefault(f, set()).add(n)
370 needfiles.setdefault(f, set()).add(n)
371
371
372 # process the files
372 # process the files
373 repo.ui.status(_(b"adding file changes\n"))
373 repo.ui.status(_(b"adding file changes\n"))
374 newrevs, newfiles = _addchangegroupfiles(
374 newrevs, newfiles = _addchangegroupfiles(
375 repo, self, revmap, trp, efiles, needfiles
375 repo, self, revmap, trp, efiles, needfiles
376 )
376 )
377
377
378 # making sure the value exists
378 # making sure the value exists
379 tr.changes.setdefault(b'changegroup-count-changesets', 0)
379 tr.changes.setdefault(b'changegroup-count-changesets', 0)
380 tr.changes.setdefault(b'changegroup-count-revisions', 0)
380 tr.changes.setdefault(b'changegroup-count-revisions', 0)
381 tr.changes.setdefault(b'changegroup-count-files', 0)
381 tr.changes.setdefault(b'changegroup-count-files', 0)
382 tr.changes.setdefault(b'changegroup-count-heads', 0)
382 tr.changes.setdefault(b'changegroup-count-heads', 0)
383
383
384 # some code use bundle operation for internal purpose. They usually
384 # some code use bundle operation for internal purpose. They usually
385 # set `ui.quiet` to do this outside of user sight. Size the report
385 # set `ui.quiet` to do this outside of user sight. Size the report
386 # of such operation now happens at the end of the transaction, that
386 # of such operation now happens at the end of the transaction, that
387 # ui.quiet has not direct effect on the output.
387 # ui.quiet has not direct effect on the output.
388 #
388 #
389 # To preserve this intend use an inelegant hack, we fail to report
389 # To preserve this intend use an inelegant hack, we fail to report
390 # the change if `quiet` is set. We should probably move to
390 # the change if `quiet` is set. We should probably move to
391 # something better, but this is a good first step to allow the "end
391 # something better, but this is a good first step to allow the "end
392 # of transaction report" to pass tests.
392 # of transaction report" to pass tests.
393 if not repo.ui.quiet:
393 if not repo.ui.quiet:
394 tr.changes[b'changegroup-count-changesets'] += changesets
394 tr.changes[b'changegroup-count-changesets'] += changesets
395 tr.changes[b'changegroup-count-revisions'] += newrevs
395 tr.changes[b'changegroup-count-revisions'] += newrevs
396 tr.changes[b'changegroup-count-files'] += newfiles
396 tr.changes[b'changegroup-count-files'] += newfiles
397
397
398 deltaheads = 0
398 deltaheads = 0
399 if oldheads:
399 if oldheads:
400 heads = cl.heads()
400 heads = cl.heads()
401 deltaheads += len(heads) - len(oldheads)
401 deltaheads += len(heads) - len(oldheads)
402 for h in heads:
402 for h in heads:
403 if h not in oldheads and repo[h].closesbranch():
403 if h not in oldheads and repo[h].closesbranch():
404 deltaheads -= 1
404 deltaheads -= 1
405
405
406 # see previous comment about checking ui.quiet
406 # see previous comment about checking ui.quiet
407 if not repo.ui.quiet:
407 if not repo.ui.quiet:
408 tr.changes[b'changegroup-count-heads'] += deltaheads
408 tr.changes[b'changegroup-count-heads'] += deltaheads
409 repo.invalidatevolatilesets()
409 repo.invalidatevolatilesets()
410
410
411 if changesets > 0:
411 if changesets > 0:
412 if b'node' not in tr.hookargs:
412 if b'node' not in tr.hookargs:
413 tr.hookargs[b'node'] = hex(cl.node(clstart))
413 tr.hookargs[b'node'] = hex(cl.node(clstart))
414 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
414 tr.hookargs[b'node_last'] = hex(cl.node(clend - 1))
415 hookargs = dict(tr.hookargs)
415 hookargs = dict(tr.hookargs)
416 else:
416 else:
417 hookargs = dict(tr.hookargs)
417 hookargs = dict(tr.hookargs)
418 hookargs[b'node'] = hex(cl.node(clstart))
418 hookargs[b'node'] = hex(cl.node(clstart))
419 hookargs[b'node_last'] = hex(cl.node(clend - 1))
419 hookargs[b'node_last'] = hex(cl.node(clend - 1))
420 repo.hook(
420 repo.hook(
421 b'pretxnchangegroup',
421 b'pretxnchangegroup',
422 throw=True,
422 throw=True,
423 **pycompat.strkwargs(hookargs)
423 **pycompat.strkwargs(hookargs)
424 )
424 )
425
425
426 added = pycompat.xrange(clstart, clend)
426 added = pycompat.xrange(clstart, clend)
427 phaseall = None
427 phaseall = None
428 if srctype in (b'push', b'serve'):
428 if srctype in (b'push', b'serve'):
429 # Old servers can not push the boundary themselves.
429 # Old servers can not push the boundary themselves.
430 # New servers won't push the boundary if changeset already
430 # New servers won't push the boundary if changeset already
431 # exists locally as secret
431 # exists locally as secret
432 #
432 #
433 # We should not use added here but the list of all change in
433 # We should not use added here but the list of all change in
434 # the bundle
434 # the bundle
435 if repo.publishing():
435 if repo.publishing():
436 targetphase = phaseall = phases.public
436 targetphase = phaseall = phases.public
437 else:
437 else:
438 # closer target phase computation
438 # closer target phase computation
439
439
440 # Those changesets have been pushed from the
440 # Those changesets have been pushed from the
441 # outside, their phases are going to be pushed
441 # outside, their phases are going to be pushed
442 # alongside. Therefor `targetphase` is
442 # alongside. Therefor `targetphase` is
443 # ignored.
443 # ignored.
444 targetphase = phaseall = phases.draft
444 targetphase = phaseall = phases.draft
445 if added:
445 if added:
446 phases.registernew(repo, tr, targetphase, [], revs=added)
446 phases.registernew(repo, tr, targetphase, added)
447 if phaseall is not None:
447 if phaseall is not None:
448 phases.advanceboundary(repo, tr, phaseall, cgnodes, revs=added)
448 phases.advanceboundary(repo, tr, phaseall, cgnodes, revs=added)
449 cgnodes = []
449 cgnodes = []
450
450
451 if changesets > 0:
451 if changesets > 0:
452
452
453 def runhooks(unused_success):
453 def runhooks(unused_success):
454 # These hooks run when the lock releases, not when the
454 # These hooks run when the lock releases, not when the
455 # transaction closes. So it's possible for the changelog
455 # transaction closes. So it's possible for the changelog
456 # to have changed since we last saw it.
456 # to have changed since we last saw it.
457 if clstart >= len(repo):
457 if clstart >= len(repo):
458 return
458 return
459
459
460 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
460 repo.hook(b"changegroup", **pycompat.strkwargs(hookargs))
461
461
462 for rev in added:
462 for rev in added:
463 args = hookargs.copy()
463 args = hookargs.copy()
464 args[b'node'] = hex(cl.node(rev))
464 args[b'node'] = hex(cl.node(rev))
465 del args[b'node_last']
465 del args[b'node_last']
466 repo.hook(b"incoming", **pycompat.strkwargs(args))
466 repo.hook(b"incoming", **pycompat.strkwargs(args))
467
467
468 newheads = [h for h in repo.heads() if h not in oldheads]
468 newheads = [h for h in repo.heads() if h not in oldheads]
469 repo.ui.log(
469 repo.ui.log(
470 b"incoming",
470 b"incoming",
471 b"%d incoming changes - new heads: %s\n",
471 b"%d incoming changes - new heads: %s\n",
472 len(added),
472 len(added),
473 b', '.join([hex(c[:6]) for c in newheads]),
473 b', '.join([hex(c[:6]) for c in newheads]),
474 )
474 )
475
475
476 tr.addpostclose(
476 tr.addpostclose(
477 b'changegroup-runhooks-%020i' % clstart,
477 b'changegroup-runhooks-%020i' % clstart,
478 lambda tr: repo._afterlock(runhooks),
478 lambda tr: repo._afterlock(runhooks),
479 )
479 )
480 finally:
480 finally:
481 repo.ui.flush()
481 repo.ui.flush()
482 # never return 0 here:
482 # never return 0 here:
483 if deltaheads < 0:
483 if deltaheads < 0:
484 ret = deltaheads - 1
484 ret = deltaheads - 1
485 else:
485 else:
486 ret = deltaheads + 1
486 ret = deltaheads + 1
487 return ret
487 return ret
488
488
489 def deltaiter(self):
489 def deltaiter(self):
490 """
490 """
491 returns an iterator of the deltas in this changegroup
491 returns an iterator of the deltas in this changegroup
492
492
493 Useful for passing to the underlying storage system to be stored.
493 Useful for passing to the underlying storage system to be stored.
494 """
494 """
495 chain = None
495 chain = None
496 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
496 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
497 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
497 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
498 yield chunkdata
498 yield chunkdata
499 chain = chunkdata[0]
499 chain = chunkdata[0]
500
500
501
501
502 class cg2unpacker(cg1unpacker):
502 class cg2unpacker(cg1unpacker):
503 """Unpacker for cg2 streams.
503 """Unpacker for cg2 streams.
504
504
505 cg2 streams add support for generaldelta, so the delta header
505 cg2 streams add support for generaldelta, so the delta header
506 format is slightly different. All other features about the data
506 format is slightly different. All other features about the data
507 remain the same.
507 remain the same.
508 """
508 """
509
509
510 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
510 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
511 deltaheadersize = deltaheader.size
511 deltaheadersize = deltaheader.size
512 version = b'02'
512 version = b'02'
513
513
514 def _deltaheader(self, headertuple, prevnode):
514 def _deltaheader(self, headertuple, prevnode):
515 node, p1, p2, deltabase, cs = headertuple
515 node, p1, p2, deltabase, cs = headertuple
516 flags = 0
516 flags = 0
517 return node, p1, p2, deltabase, cs, flags
517 return node, p1, p2, deltabase, cs, flags
518
518
519
519
520 class cg3unpacker(cg2unpacker):
520 class cg3unpacker(cg2unpacker):
521 """Unpacker for cg3 streams.
521 """Unpacker for cg3 streams.
522
522
523 cg3 streams add support for exchanging treemanifests and revlog
523 cg3 streams add support for exchanging treemanifests and revlog
524 flags. It adds the revlog flags to the delta header and an empty chunk
524 flags. It adds the revlog flags to the delta header and an empty chunk
525 separating manifests and files.
525 separating manifests and files.
526 """
526 """
527
527
528 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
528 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
529 deltaheadersize = deltaheader.size
529 deltaheadersize = deltaheader.size
530 version = b'03'
530 version = b'03'
531 _grouplistcount = 2 # One list of manifests and one list of files
531 _grouplistcount = 2 # One list of manifests and one list of files
532
532
533 def _deltaheader(self, headertuple, prevnode):
533 def _deltaheader(self, headertuple, prevnode):
534 node, p1, p2, deltabase, cs, flags = headertuple
534 node, p1, p2, deltabase, cs, flags = headertuple
535 return node, p1, p2, deltabase, cs, flags
535 return node, p1, p2, deltabase, cs, flags
536
536
537 def _unpackmanifests(self, repo, revmap, trp, prog):
537 def _unpackmanifests(self, repo, revmap, trp, prog):
538 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
538 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
539 for chunkdata in iter(self.filelogheader, {}):
539 for chunkdata in iter(self.filelogheader, {}):
540 # If we get here, there are directory manifests in the changegroup
540 # If we get here, there are directory manifests in the changegroup
541 d = chunkdata[b"filename"]
541 d = chunkdata[b"filename"]
542 repo.ui.debug(b"adding %s revisions\n" % d)
542 repo.ui.debug(b"adding %s revisions\n" % d)
543 deltas = self.deltaiter()
543 deltas = self.deltaiter()
544 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
544 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
545 raise error.Abort(_(b"received dir revlog group is empty"))
545 raise error.Abort(_(b"received dir revlog group is empty"))
546
546
547
547
548 class headerlessfixup(object):
548 class headerlessfixup(object):
549 def __init__(self, fh, h):
549 def __init__(self, fh, h):
550 self._h = h
550 self._h = h
551 self._fh = fh
551 self._fh = fh
552
552
553 def read(self, n):
553 def read(self, n):
554 if self._h:
554 if self._h:
555 d, self._h = self._h[:n], self._h[n:]
555 d, self._h = self._h[:n], self._h[n:]
556 if len(d) < n:
556 if len(d) < n:
557 d += readexactly(self._fh, n - len(d))
557 d += readexactly(self._fh, n - len(d))
558 return d
558 return d
559 return readexactly(self._fh, n)
559 return readexactly(self._fh, n)
560
560
561
561
562 def _revisiondeltatochunks(delta, headerfn):
562 def _revisiondeltatochunks(delta, headerfn):
563 """Serialize a revisiondelta to changegroup chunks."""
563 """Serialize a revisiondelta to changegroup chunks."""
564
564
565 # The captured revision delta may be encoded as a delta against
565 # The captured revision delta may be encoded as a delta against
566 # a base revision or as a full revision. The changegroup format
566 # a base revision or as a full revision. The changegroup format
567 # requires that everything on the wire be deltas. So for full
567 # requires that everything on the wire be deltas. So for full
568 # revisions, we need to invent a header that says to rewrite
568 # revisions, we need to invent a header that says to rewrite
569 # data.
569 # data.
570
570
571 if delta.delta is not None:
571 if delta.delta is not None:
572 prefix, data = b'', delta.delta
572 prefix, data = b'', delta.delta
573 elif delta.basenode == nullid:
573 elif delta.basenode == nullid:
574 data = delta.revision
574 data = delta.revision
575 prefix = mdiff.trivialdiffheader(len(data))
575 prefix = mdiff.trivialdiffheader(len(data))
576 else:
576 else:
577 data = delta.revision
577 data = delta.revision
578 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
578 prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data))
579
579
580 meta = headerfn(delta)
580 meta = headerfn(delta)
581
581
582 yield chunkheader(len(meta) + len(prefix) + len(data))
582 yield chunkheader(len(meta) + len(prefix) + len(data))
583 yield meta
583 yield meta
584 if prefix:
584 if prefix:
585 yield prefix
585 yield prefix
586 yield data
586 yield data
587
587
588
588
589 def _sortnodesellipsis(store, nodes, cl, lookup):
589 def _sortnodesellipsis(store, nodes, cl, lookup):
590 """Sort nodes for changegroup generation."""
590 """Sort nodes for changegroup generation."""
591 # Ellipses serving mode.
591 # Ellipses serving mode.
592 #
592 #
593 # In a perfect world, we'd generate better ellipsis-ified graphs
593 # In a perfect world, we'd generate better ellipsis-ified graphs
594 # for non-changelog revlogs. In practice, we haven't started doing
594 # for non-changelog revlogs. In practice, we haven't started doing
595 # that yet, so the resulting DAGs for the manifestlog and filelogs
595 # that yet, so the resulting DAGs for the manifestlog and filelogs
596 # are actually full of bogus parentage on all the ellipsis
596 # are actually full of bogus parentage on all the ellipsis
597 # nodes. This has the side effect that, while the contents are
597 # nodes. This has the side effect that, while the contents are
598 # correct, the individual DAGs might be completely out of whack in
598 # correct, the individual DAGs might be completely out of whack in
599 # a case like 882681bc3166 and its ancestors (back about 10
599 # a case like 882681bc3166 and its ancestors (back about 10
600 # revisions or so) in the main hg repo.
600 # revisions or so) in the main hg repo.
601 #
601 #
602 # The one invariant we *know* holds is that the new (potentially
602 # The one invariant we *know* holds is that the new (potentially
603 # bogus) DAG shape will be valid if we order the nodes in the
603 # bogus) DAG shape will be valid if we order the nodes in the
604 # order that they're introduced in dramatis personae by the
604 # order that they're introduced in dramatis personae by the
605 # changelog, so what we do is we sort the non-changelog histories
605 # changelog, so what we do is we sort the non-changelog histories
606 # by the order in which they are used by the changelog.
606 # by the order in which they are used by the changelog.
607 key = lambda n: cl.rev(lookup(n))
607 key = lambda n: cl.rev(lookup(n))
608 return sorted(nodes, key=key)
608 return sorted(nodes, key=key)
609
609
610
610
611 def _resolvenarrowrevisioninfo(
611 def _resolvenarrowrevisioninfo(
612 cl,
612 cl,
613 store,
613 store,
614 ischangelog,
614 ischangelog,
615 rev,
615 rev,
616 linkrev,
616 linkrev,
617 linknode,
617 linknode,
618 clrevtolocalrev,
618 clrevtolocalrev,
619 fullclnodes,
619 fullclnodes,
620 precomputedellipsis,
620 precomputedellipsis,
621 ):
621 ):
622 linkparents = precomputedellipsis[linkrev]
622 linkparents = precomputedellipsis[linkrev]
623
623
624 def local(clrev):
624 def local(clrev):
625 """Turn a changelog revnum into a local revnum.
625 """Turn a changelog revnum into a local revnum.
626
626
627 The ellipsis dag is stored as revnums on the changelog,
627 The ellipsis dag is stored as revnums on the changelog,
628 but when we're producing ellipsis entries for
628 but when we're producing ellipsis entries for
629 non-changelog revlogs, we need to turn those numbers into
629 non-changelog revlogs, we need to turn those numbers into
630 something local. This does that for us, and during the
630 something local. This does that for us, and during the
631 changelog sending phase will also expand the stored
631 changelog sending phase will also expand the stored
632 mappings as needed.
632 mappings as needed.
633 """
633 """
634 if clrev == nullrev:
634 if clrev == nullrev:
635 return nullrev
635 return nullrev
636
636
637 if ischangelog:
637 if ischangelog:
638 return clrev
638 return clrev
639
639
640 # Walk the ellipsis-ized changelog breadth-first looking for a
640 # Walk the ellipsis-ized changelog breadth-first looking for a
641 # change that has been linked from the current revlog.
641 # change that has been linked from the current revlog.
642 #
642 #
643 # For a flat manifest revlog only a single step should be necessary
643 # For a flat manifest revlog only a single step should be necessary
644 # as all relevant changelog entries are relevant to the flat
644 # as all relevant changelog entries are relevant to the flat
645 # manifest.
645 # manifest.
646 #
646 #
647 # For a filelog or tree manifest dirlog however not every changelog
647 # For a filelog or tree manifest dirlog however not every changelog
648 # entry will have been relevant, so we need to skip some changelog
648 # entry will have been relevant, so we need to skip some changelog
649 # nodes even after ellipsis-izing.
649 # nodes even after ellipsis-izing.
650 walk = [clrev]
650 walk = [clrev]
651 while walk:
651 while walk:
652 p = walk[0]
652 p = walk[0]
653 walk = walk[1:]
653 walk = walk[1:]
654 if p in clrevtolocalrev:
654 if p in clrevtolocalrev:
655 return clrevtolocalrev[p]
655 return clrevtolocalrev[p]
656 elif p in fullclnodes:
656 elif p in fullclnodes:
657 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
657 walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev])
658 elif p in precomputedellipsis:
658 elif p in precomputedellipsis:
659 walk.extend(
659 walk.extend(
660 [pp for pp in precomputedellipsis[p] if pp != nullrev]
660 [pp for pp in precomputedellipsis[p] if pp != nullrev]
661 )
661 )
662 else:
662 else:
663 # In this case, we've got an ellipsis with parents
663 # In this case, we've got an ellipsis with parents
664 # outside the current bundle (likely an
664 # outside the current bundle (likely an
665 # incremental pull). We "know" that we can use the
665 # incremental pull). We "know" that we can use the
666 # value of this same revlog at whatever revision
666 # value of this same revlog at whatever revision
667 # is pointed to by linknode. "Know" is in scare
667 # is pointed to by linknode. "Know" is in scare
668 # quotes because I haven't done enough examination
668 # quotes because I haven't done enough examination
669 # of edge cases to convince myself this is really
669 # of edge cases to convince myself this is really
670 # a fact - it works for all the (admittedly
670 # a fact - it works for all the (admittedly
671 # thorough) cases in our testsuite, but I would be
671 # thorough) cases in our testsuite, but I would be
672 # somewhat unsurprised to find a case in the wild
672 # somewhat unsurprised to find a case in the wild
673 # where this breaks down a bit. That said, I don't
673 # where this breaks down a bit. That said, I don't
674 # know if it would hurt anything.
674 # know if it would hurt anything.
675 for i in pycompat.xrange(rev, 0, -1):
675 for i in pycompat.xrange(rev, 0, -1):
676 if store.linkrev(i) == clrev:
676 if store.linkrev(i) == clrev:
677 return i
677 return i
678 # We failed to resolve a parent for this node, so
678 # We failed to resolve a parent for this node, so
679 # we crash the changegroup construction.
679 # we crash the changegroup construction.
680 raise error.Abort(
680 raise error.Abort(
681 b'unable to resolve parent while packing %r %r'
681 b'unable to resolve parent while packing %r %r'
682 b' for changeset %r' % (store.indexfile, rev, clrev)
682 b' for changeset %r' % (store.indexfile, rev, clrev)
683 )
683 )
684
684
685 return nullrev
685 return nullrev
686
686
687 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
687 if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)):
688 p1, p2 = nullrev, nullrev
688 p1, p2 = nullrev, nullrev
689 elif len(linkparents) == 1:
689 elif len(linkparents) == 1:
690 (p1,) = sorted(local(p) for p in linkparents)
690 (p1,) = sorted(local(p) for p in linkparents)
691 p2 = nullrev
691 p2 = nullrev
692 else:
692 else:
693 p1, p2 = sorted(local(p) for p in linkparents)
693 p1, p2 = sorted(local(p) for p in linkparents)
694
694
695 p1node, p2node = store.node(p1), store.node(p2)
695 p1node, p2node = store.node(p1), store.node(p2)
696
696
697 return p1node, p2node, linknode
697 return p1node, p2node, linknode
698
698
699
699
700 def deltagroup(
700 def deltagroup(
701 repo,
701 repo,
702 store,
702 store,
703 nodes,
703 nodes,
704 ischangelog,
704 ischangelog,
705 lookup,
705 lookup,
706 forcedeltaparentprev,
706 forcedeltaparentprev,
707 topic=None,
707 topic=None,
708 ellipses=False,
708 ellipses=False,
709 clrevtolocalrev=None,
709 clrevtolocalrev=None,
710 fullclnodes=None,
710 fullclnodes=None,
711 precomputedellipsis=None,
711 precomputedellipsis=None,
712 ):
712 ):
713 """Calculate deltas for a set of revisions.
713 """Calculate deltas for a set of revisions.
714
714
715 Is a generator of ``revisiondelta`` instances.
715 Is a generator of ``revisiondelta`` instances.
716
716
717 If topic is not None, progress detail will be generated using this
717 If topic is not None, progress detail will be generated using this
718 topic name (e.g. changesets, manifests, etc).
718 topic name (e.g. changesets, manifests, etc).
719 """
719 """
720 if not nodes:
720 if not nodes:
721 return
721 return
722
722
723 cl = repo.changelog
723 cl = repo.changelog
724
724
725 if ischangelog:
725 if ischangelog:
726 # `hg log` shows changesets in storage order. To preserve order
726 # `hg log` shows changesets in storage order. To preserve order
727 # across clones, send out changesets in storage order.
727 # across clones, send out changesets in storage order.
728 nodesorder = b'storage'
728 nodesorder = b'storage'
729 elif ellipses:
729 elif ellipses:
730 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
730 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
731 nodesorder = b'nodes'
731 nodesorder = b'nodes'
732 else:
732 else:
733 nodesorder = None
733 nodesorder = None
734
734
735 # Perform ellipses filtering and revision massaging. We do this before
735 # Perform ellipses filtering and revision massaging. We do this before
736 # emitrevisions() because a) filtering out revisions creates less work
736 # emitrevisions() because a) filtering out revisions creates less work
737 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
737 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
738 # assumptions about delta choices and we would possibly send a delta
738 # assumptions about delta choices and we would possibly send a delta
739 # referencing a missing base revision.
739 # referencing a missing base revision.
740 #
740 #
741 # Also, calling lookup() has side-effects with regards to populating
741 # Also, calling lookup() has side-effects with regards to populating
742 # data structures. If we don't call lookup() for each node or if we call
742 # data structures. If we don't call lookup() for each node or if we call
743 # lookup() after the first pass through each node, things can break -
743 # lookup() after the first pass through each node, things can break -
744 # possibly intermittently depending on the python hash seed! For that
744 # possibly intermittently depending on the python hash seed! For that
745 # reason, we store a mapping of all linknodes during the initial node
745 # reason, we store a mapping of all linknodes during the initial node
746 # pass rather than use lookup() on the output side.
746 # pass rather than use lookup() on the output side.
747 if ellipses:
747 if ellipses:
748 filtered = []
748 filtered = []
749 adjustedparents = {}
749 adjustedparents = {}
750 linknodes = {}
750 linknodes = {}
751
751
752 for node in nodes:
752 for node in nodes:
753 rev = store.rev(node)
753 rev = store.rev(node)
754 linknode = lookup(node)
754 linknode = lookup(node)
755 linkrev = cl.rev(linknode)
755 linkrev = cl.rev(linknode)
756 clrevtolocalrev[linkrev] = rev
756 clrevtolocalrev[linkrev] = rev
757
757
758 # If linknode is in fullclnodes, it means the corresponding
758 # If linknode is in fullclnodes, it means the corresponding
759 # changeset was a full changeset and is being sent unaltered.
759 # changeset was a full changeset and is being sent unaltered.
760 if linknode in fullclnodes:
760 if linknode in fullclnodes:
761 linknodes[node] = linknode
761 linknodes[node] = linknode
762
762
763 # If the corresponding changeset wasn't in the set computed
763 # If the corresponding changeset wasn't in the set computed
764 # as relevant to us, it should be dropped outright.
764 # as relevant to us, it should be dropped outright.
765 elif linkrev not in precomputedellipsis:
765 elif linkrev not in precomputedellipsis:
766 continue
766 continue
767
767
768 else:
768 else:
769 # We could probably do this later and avoid the dict
769 # We could probably do this later and avoid the dict
770 # holding state. But it likely doesn't matter.
770 # holding state. But it likely doesn't matter.
771 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
771 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
772 cl,
772 cl,
773 store,
773 store,
774 ischangelog,
774 ischangelog,
775 rev,
775 rev,
776 linkrev,
776 linkrev,
777 linknode,
777 linknode,
778 clrevtolocalrev,
778 clrevtolocalrev,
779 fullclnodes,
779 fullclnodes,
780 precomputedellipsis,
780 precomputedellipsis,
781 )
781 )
782
782
783 adjustedparents[node] = (p1node, p2node)
783 adjustedparents[node] = (p1node, p2node)
784 linknodes[node] = linknode
784 linknodes[node] = linknode
785
785
786 filtered.append(node)
786 filtered.append(node)
787
787
788 nodes = filtered
788 nodes = filtered
789
789
790 # We expect the first pass to be fast, so we only engage the progress
790 # We expect the first pass to be fast, so we only engage the progress
791 # meter for constructing the revision deltas.
791 # meter for constructing the revision deltas.
792 progress = None
792 progress = None
793 if topic is not None:
793 if topic is not None:
794 progress = repo.ui.makeprogress(
794 progress = repo.ui.makeprogress(
795 topic, unit=_(b'chunks'), total=len(nodes)
795 topic, unit=_(b'chunks'), total=len(nodes)
796 )
796 )
797
797
798 configtarget = repo.ui.config(b'devel', b'bundle.delta')
798 configtarget = repo.ui.config(b'devel', b'bundle.delta')
799 if configtarget not in (b'', b'p1', b'full'):
799 if configtarget not in (b'', b'p1', b'full'):
800 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
800 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
801 repo.ui.warn(msg % configtarget)
801 repo.ui.warn(msg % configtarget)
802
802
803 deltamode = repository.CG_DELTAMODE_STD
803 deltamode = repository.CG_DELTAMODE_STD
804 if forcedeltaparentprev:
804 if forcedeltaparentprev:
805 deltamode = repository.CG_DELTAMODE_PREV
805 deltamode = repository.CG_DELTAMODE_PREV
806 elif configtarget == b'p1':
806 elif configtarget == b'p1':
807 deltamode = repository.CG_DELTAMODE_P1
807 deltamode = repository.CG_DELTAMODE_P1
808 elif configtarget == b'full':
808 elif configtarget == b'full':
809 deltamode = repository.CG_DELTAMODE_FULL
809 deltamode = repository.CG_DELTAMODE_FULL
810
810
811 revisions = store.emitrevisions(
811 revisions = store.emitrevisions(
812 nodes,
812 nodes,
813 nodesorder=nodesorder,
813 nodesorder=nodesorder,
814 revisiondata=True,
814 revisiondata=True,
815 assumehaveparentrevisions=not ellipses,
815 assumehaveparentrevisions=not ellipses,
816 deltamode=deltamode,
816 deltamode=deltamode,
817 )
817 )
818
818
819 for i, revision in enumerate(revisions):
819 for i, revision in enumerate(revisions):
820 if progress:
820 if progress:
821 progress.update(i + 1)
821 progress.update(i + 1)
822
822
823 if ellipses:
823 if ellipses:
824 linknode = linknodes[revision.node]
824 linknode = linknodes[revision.node]
825
825
826 if revision.node in adjustedparents:
826 if revision.node in adjustedparents:
827 p1node, p2node = adjustedparents[revision.node]
827 p1node, p2node = adjustedparents[revision.node]
828 revision.p1node = p1node
828 revision.p1node = p1node
829 revision.p2node = p2node
829 revision.p2node = p2node
830 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
830 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
831
831
832 else:
832 else:
833 linknode = lookup(revision.node)
833 linknode = lookup(revision.node)
834
834
835 revision.linknode = linknode
835 revision.linknode = linknode
836 yield revision
836 yield revision
837
837
838 if progress:
838 if progress:
839 progress.complete()
839 progress.complete()
840
840
841
841
842 class cgpacker(object):
842 class cgpacker(object):
843 def __init__(
843 def __init__(
844 self,
844 self,
845 repo,
845 repo,
846 oldmatcher,
846 oldmatcher,
847 matcher,
847 matcher,
848 version,
848 version,
849 builddeltaheader,
849 builddeltaheader,
850 manifestsend,
850 manifestsend,
851 forcedeltaparentprev=False,
851 forcedeltaparentprev=False,
852 bundlecaps=None,
852 bundlecaps=None,
853 ellipses=False,
853 ellipses=False,
854 shallow=False,
854 shallow=False,
855 ellipsisroots=None,
855 ellipsisroots=None,
856 fullnodes=None,
856 fullnodes=None,
857 ):
857 ):
858 """Given a source repo, construct a bundler.
858 """Given a source repo, construct a bundler.
859
859
860 oldmatcher is a matcher that matches on files the client already has.
860 oldmatcher is a matcher that matches on files the client already has.
861 These will not be included in the changegroup.
861 These will not be included in the changegroup.
862
862
863 matcher is a matcher that matches on files to include in the
863 matcher is a matcher that matches on files to include in the
864 changegroup. Used to facilitate sparse changegroups.
864 changegroup. Used to facilitate sparse changegroups.
865
865
866 forcedeltaparentprev indicates whether delta parents must be against
866 forcedeltaparentprev indicates whether delta parents must be against
867 the previous revision in a delta group. This should only be used for
867 the previous revision in a delta group. This should only be used for
868 compatibility with changegroup version 1.
868 compatibility with changegroup version 1.
869
869
870 builddeltaheader is a callable that constructs the header for a group
870 builddeltaheader is a callable that constructs the header for a group
871 delta.
871 delta.
872
872
873 manifestsend is a chunk to send after manifests have been fully emitted.
873 manifestsend is a chunk to send after manifests have been fully emitted.
874
874
875 ellipses indicates whether ellipsis serving mode is enabled.
875 ellipses indicates whether ellipsis serving mode is enabled.
876
876
877 bundlecaps is optional and can be used to specify the set of
877 bundlecaps is optional and can be used to specify the set of
878 capabilities which can be used to build the bundle. While bundlecaps is
878 capabilities which can be used to build the bundle. While bundlecaps is
879 unused in core Mercurial, extensions rely on this feature to communicate
879 unused in core Mercurial, extensions rely on this feature to communicate
880 capabilities to customize the changegroup packer.
880 capabilities to customize the changegroup packer.
881
881
882 shallow indicates whether shallow data might be sent. The packer may
882 shallow indicates whether shallow data might be sent. The packer may
883 need to pack file contents not introduced by the changes being packed.
883 need to pack file contents not introduced by the changes being packed.
884
884
885 fullnodes is the set of changelog nodes which should not be ellipsis
885 fullnodes is the set of changelog nodes which should not be ellipsis
886 nodes. We store this rather than the set of nodes that should be
886 nodes. We store this rather than the set of nodes that should be
887 ellipsis because for very large histories we expect this to be
887 ellipsis because for very large histories we expect this to be
888 significantly smaller.
888 significantly smaller.
889 """
889 """
890 assert oldmatcher
890 assert oldmatcher
891 assert matcher
891 assert matcher
892 self._oldmatcher = oldmatcher
892 self._oldmatcher = oldmatcher
893 self._matcher = matcher
893 self._matcher = matcher
894
894
895 self.version = version
895 self.version = version
896 self._forcedeltaparentprev = forcedeltaparentprev
896 self._forcedeltaparentprev = forcedeltaparentprev
897 self._builddeltaheader = builddeltaheader
897 self._builddeltaheader = builddeltaheader
898 self._manifestsend = manifestsend
898 self._manifestsend = manifestsend
899 self._ellipses = ellipses
899 self._ellipses = ellipses
900
900
901 # Set of capabilities we can use to build the bundle.
901 # Set of capabilities we can use to build the bundle.
902 if bundlecaps is None:
902 if bundlecaps is None:
903 bundlecaps = set()
903 bundlecaps = set()
904 self._bundlecaps = bundlecaps
904 self._bundlecaps = bundlecaps
905 self._isshallow = shallow
905 self._isshallow = shallow
906 self._fullclnodes = fullnodes
906 self._fullclnodes = fullnodes
907
907
908 # Maps ellipsis revs to their roots at the changelog level.
908 # Maps ellipsis revs to their roots at the changelog level.
909 self._precomputedellipsis = ellipsisroots
909 self._precomputedellipsis = ellipsisroots
910
910
911 self._repo = repo
911 self._repo = repo
912
912
913 if self._repo.ui.verbose and not self._repo.ui.debugflag:
913 if self._repo.ui.verbose and not self._repo.ui.debugflag:
914 self._verbosenote = self._repo.ui.note
914 self._verbosenote = self._repo.ui.note
915 else:
915 else:
916 self._verbosenote = lambda s: None
916 self._verbosenote = lambda s: None
917
917
918 def generate(
918 def generate(
919 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
919 self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
920 ):
920 ):
921 """Yield a sequence of changegroup byte chunks.
921 """Yield a sequence of changegroup byte chunks.
922 If changelog is False, changelog data won't be added to changegroup
922 If changelog is False, changelog data won't be added to changegroup
923 """
923 """
924
924
925 repo = self._repo
925 repo = self._repo
926 cl = repo.changelog
926 cl = repo.changelog
927
927
928 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
928 self._verbosenote(_(b'uncompressed size of bundle content:\n'))
929 size = 0
929 size = 0
930
930
931 clstate, deltas = self._generatechangelog(
931 clstate, deltas = self._generatechangelog(
932 cl, clnodes, generate=changelog
932 cl, clnodes, generate=changelog
933 )
933 )
934 for delta in deltas:
934 for delta in deltas:
935 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
935 for chunk in _revisiondeltatochunks(delta, self._builddeltaheader):
936 size += len(chunk)
936 size += len(chunk)
937 yield chunk
937 yield chunk
938
938
939 close = closechunk()
939 close = closechunk()
940 size += len(close)
940 size += len(close)
941 yield closechunk()
941 yield closechunk()
942
942
943 self._verbosenote(_(b'%8.i (changelog)\n') % size)
943 self._verbosenote(_(b'%8.i (changelog)\n') % size)
944
944
945 clrevorder = clstate[b'clrevorder']
945 clrevorder = clstate[b'clrevorder']
946 manifests = clstate[b'manifests']
946 manifests = clstate[b'manifests']
947 changedfiles = clstate[b'changedfiles']
947 changedfiles = clstate[b'changedfiles']
948
948
949 # We need to make sure that the linkrev in the changegroup refers to
949 # We need to make sure that the linkrev in the changegroup refers to
950 # the first changeset that introduced the manifest or file revision.
950 # the first changeset that introduced the manifest or file revision.
951 # The fastpath is usually safer than the slowpath, because the filelogs
951 # The fastpath is usually safer than the slowpath, because the filelogs
952 # are walked in revlog order.
952 # are walked in revlog order.
953 #
953 #
954 # When taking the slowpath when the manifest revlog uses generaldelta,
954 # When taking the slowpath when the manifest revlog uses generaldelta,
955 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
955 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
956 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
956 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
957 #
957 #
958 # When taking the fastpath, we are only vulnerable to reordering
958 # When taking the fastpath, we are only vulnerable to reordering
959 # of the changelog itself. The changelog never uses generaldelta and is
959 # of the changelog itself. The changelog never uses generaldelta and is
960 # never reordered. To handle this case, we simply take the slowpath,
960 # never reordered. To handle this case, we simply take the slowpath,
961 # which already has the 'clrevorder' logic. This was also fixed in
961 # which already has the 'clrevorder' logic. This was also fixed in
962 # cc0ff93d0c0c.
962 # cc0ff93d0c0c.
963
963
964 # Treemanifests don't work correctly with fastpathlinkrev
964 # Treemanifests don't work correctly with fastpathlinkrev
965 # either, because we don't discover which directory nodes to
965 # either, because we don't discover which directory nodes to
966 # send along with files. This could probably be fixed.
966 # send along with files. This could probably be fixed.
967 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
967 fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo)
968
968
969 fnodes = {} # needed file nodes
969 fnodes = {} # needed file nodes
970
970
971 size = 0
971 size = 0
972 it = self.generatemanifests(
972 it = self.generatemanifests(
973 commonrevs,
973 commonrevs,
974 clrevorder,
974 clrevorder,
975 fastpathlinkrev,
975 fastpathlinkrev,
976 manifests,
976 manifests,
977 fnodes,
977 fnodes,
978 source,
978 source,
979 clstate[b'clrevtomanifestrev'],
979 clstate[b'clrevtomanifestrev'],
980 )
980 )
981
981
982 for tree, deltas in it:
982 for tree, deltas in it:
983 if tree:
983 if tree:
984 assert self.version == b'03'
984 assert self.version == b'03'
985 chunk = _fileheader(tree)
985 chunk = _fileheader(tree)
986 size += len(chunk)
986 size += len(chunk)
987 yield chunk
987 yield chunk
988
988
989 for delta in deltas:
989 for delta in deltas:
990 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
990 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
991 for chunk in chunks:
991 for chunk in chunks:
992 size += len(chunk)
992 size += len(chunk)
993 yield chunk
993 yield chunk
994
994
995 close = closechunk()
995 close = closechunk()
996 size += len(close)
996 size += len(close)
997 yield close
997 yield close
998
998
999 self._verbosenote(_(b'%8.i (manifests)\n') % size)
999 self._verbosenote(_(b'%8.i (manifests)\n') % size)
1000 yield self._manifestsend
1000 yield self._manifestsend
1001
1001
1002 mfdicts = None
1002 mfdicts = None
1003 if self._ellipses and self._isshallow:
1003 if self._ellipses and self._isshallow:
1004 mfdicts = [
1004 mfdicts = [
1005 (self._repo.manifestlog[n].read(), lr)
1005 (self._repo.manifestlog[n].read(), lr)
1006 for (n, lr) in pycompat.iteritems(manifests)
1006 for (n, lr) in pycompat.iteritems(manifests)
1007 ]
1007 ]
1008
1008
1009 manifests.clear()
1009 manifests.clear()
1010 clrevs = {cl.rev(x) for x in clnodes}
1010 clrevs = {cl.rev(x) for x in clnodes}
1011
1011
1012 it = self.generatefiles(
1012 it = self.generatefiles(
1013 changedfiles,
1013 changedfiles,
1014 commonrevs,
1014 commonrevs,
1015 source,
1015 source,
1016 mfdicts,
1016 mfdicts,
1017 fastpathlinkrev,
1017 fastpathlinkrev,
1018 fnodes,
1018 fnodes,
1019 clrevs,
1019 clrevs,
1020 )
1020 )
1021
1021
1022 for path, deltas in it:
1022 for path, deltas in it:
1023 h = _fileheader(path)
1023 h = _fileheader(path)
1024 size = len(h)
1024 size = len(h)
1025 yield h
1025 yield h
1026
1026
1027 for delta in deltas:
1027 for delta in deltas:
1028 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1028 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
1029 for chunk in chunks:
1029 for chunk in chunks:
1030 size += len(chunk)
1030 size += len(chunk)
1031 yield chunk
1031 yield chunk
1032
1032
1033 close = closechunk()
1033 close = closechunk()
1034 size += len(close)
1034 size += len(close)
1035 yield close
1035 yield close
1036
1036
1037 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1037 self._verbosenote(_(b'%8.i %s\n') % (size, path))
1038
1038
1039 yield closechunk()
1039 yield closechunk()
1040
1040
1041 if clnodes:
1041 if clnodes:
1042 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1042 repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
1043
1043
1044 def _generatechangelog(self, cl, nodes, generate=True):
1044 def _generatechangelog(self, cl, nodes, generate=True):
1045 """Generate data for changelog chunks.
1045 """Generate data for changelog chunks.
1046
1046
1047 Returns a 2-tuple of a dict containing state and an iterable of
1047 Returns a 2-tuple of a dict containing state and an iterable of
1048 byte chunks. The state will not be fully populated until the
1048 byte chunks. The state will not be fully populated until the
1049 chunk stream has been fully consumed.
1049 chunk stream has been fully consumed.
1050
1050
1051 if generate is False, the state will be fully populated and no chunk
1051 if generate is False, the state will be fully populated and no chunk
1052 stream will be yielded
1052 stream will be yielded
1053 """
1053 """
1054 clrevorder = {}
1054 clrevorder = {}
1055 manifests = {}
1055 manifests = {}
1056 mfl = self._repo.manifestlog
1056 mfl = self._repo.manifestlog
1057 changedfiles = set()
1057 changedfiles = set()
1058 clrevtomanifestrev = {}
1058 clrevtomanifestrev = {}
1059
1059
1060 state = {
1060 state = {
1061 b'clrevorder': clrevorder,
1061 b'clrevorder': clrevorder,
1062 b'manifests': manifests,
1062 b'manifests': manifests,
1063 b'changedfiles': changedfiles,
1063 b'changedfiles': changedfiles,
1064 b'clrevtomanifestrev': clrevtomanifestrev,
1064 b'clrevtomanifestrev': clrevtomanifestrev,
1065 }
1065 }
1066
1066
1067 if not (generate or self._ellipses):
1067 if not (generate or self._ellipses):
1068 # sort the nodes in storage order
1068 # sort the nodes in storage order
1069 nodes = sorted(nodes, key=cl.rev)
1069 nodes = sorted(nodes, key=cl.rev)
1070 for node in nodes:
1070 for node in nodes:
1071 c = cl.changelogrevision(node)
1071 c = cl.changelogrevision(node)
1072 clrevorder[node] = len(clrevorder)
1072 clrevorder[node] = len(clrevorder)
1073 # record the first changeset introducing this manifest version
1073 # record the first changeset introducing this manifest version
1074 manifests.setdefault(c.manifest, node)
1074 manifests.setdefault(c.manifest, node)
1075 # Record a complete list of potentially-changed files in
1075 # Record a complete list of potentially-changed files in
1076 # this manifest.
1076 # this manifest.
1077 changedfiles.update(c.files)
1077 changedfiles.update(c.files)
1078
1078
1079 return state, ()
1079 return state, ()
1080
1080
1081 # Callback for the changelog, used to collect changed files and
1081 # Callback for the changelog, used to collect changed files and
1082 # manifest nodes.
1082 # manifest nodes.
1083 # Returns the linkrev node (identity in the changelog case).
1083 # Returns the linkrev node (identity in the changelog case).
1084 def lookupcl(x):
1084 def lookupcl(x):
1085 c = cl.changelogrevision(x)
1085 c = cl.changelogrevision(x)
1086 clrevorder[x] = len(clrevorder)
1086 clrevorder[x] = len(clrevorder)
1087
1087
1088 if self._ellipses:
1088 if self._ellipses:
1089 # Only update manifests if x is going to be sent. Otherwise we
1089 # Only update manifests if x is going to be sent. Otherwise we
1090 # end up with bogus linkrevs specified for manifests and
1090 # end up with bogus linkrevs specified for manifests and
1091 # we skip some manifest nodes that we should otherwise
1091 # we skip some manifest nodes that we should otherwise
1092 # have sent.
1092 # have sent.
1093 if (
1093 if (
1094 x in self._fullclnodes
1094 x in self._fullclnodes
1095 or cl.rev(x) in self._precomputedellipsis
1095 or cl.rev(x) in self._precomputedellipsis
1096 ):
1096 ):
1097
1097
1098 manifestnode = c.manifest
1098 manifestnode = c.manifest
1099 # Record the first changeset introducing this manifest
1099 # Record the first changeset introducing this manifest
1100 # version.
1100 # version.
1101 manifests.setdefault(manifestnode, x)
1101 manifests.setdefault(manifestnode, x)
1102 # Set this narrow-specific dict so we have the lowest
1102 # Set this narrow-specific dict so we have the lowest
1103 # manifest revnum to look up for this cl revnum. (Part of
1103 # manifest revnum to look up for this cl revnum. (Part of
1104 # mapping changelog ellipsis parents to manifest ellipsis
1104 # mapping changelog ellipsis parents to manifest ellipsis
1105 # parents)
1105 # parents)
1106 clrevtomanifestrev.setdefault(
1106 clrevtomanifestrev.setdefault(
1107 cl.rev(x), mfl.rev(manifestnode)
1107 cl.rev(x), mfl.rev(manifestnode)
1108 )
1108 )
1109 # We can't trust the changed files list in the changeset if the
1109 # We can't trust the changed files list in the changeset if the
1110 # client requested a shallow clone.
1110 # client requested a shallow clone.
1111 if self._isshallow:
1111 if self._isshallow:
1112 changedfiles.update(mfl[c.manifest].read().keys())
1112 changedfiles.update(mfl[c.manifest].read().keys())
1113 else:
1113 else:
1114 changedfiles.update(c.files)
1114 changedfiles.update(c.files)
1115 else:
1115 else:
1116 # record the first changeset introducing this manifest version
1116 # record the first changeset introducing this manifest version
1117 manifests.setdefault(c.manifest, x)
1117 manifests.setdefault(c.manifest, x)
1118 # Record a complete list of potentially-changed files in
1118 # Record a complete list of potentially-changed files in
1119 # this manifest.
1119 # this manifest.
1120 changedfiles.update(c.files)
1120 changedfiles.update(c.files)
1121
1121
1122 return x
1122 return x
1123
1123
1124 gen = deltagroup(
1124 gen = deltagroup(
1125 self._repo,
1125 self._repo,
1126 cl,
1126 cl,
1127 nodes,
1127 nodes,
1128 True,
1128 True,
1129 lookupcl,
1129 lookupcl,
1130 self._forcedeltaparentprev,
1130 self._forcedeltaparentprev,
1131 ellipses=self._ellipses,
1131 ellipses=self._ellipses,
1132 topic=_(b'changesets'),
1132 topic=_(b'changesets'),
1133 clrevtolocalrev={},
1133 clrevtolocalrev={},
1134 fullclnodes=self._fullclnodes,
1134 fullclnodes=self._fullclnodes,
1135 precomputedellipsis=self._precomputedellipsis,
1135 precomputedellipsis=self._precomputedellipsis,
1136 )
1136 )
1137
1137
1138 return state, gen
1138 return state, gen
1139
1139
1140 def generatemanifests(
1140 def generatemanifests(
1141 self,
1141 self,
1142 commonrevs,
1142 commonrevs,
1143 clrevorder,
1143 clrevorder,
1144 fastpathlinkrev,
1144 fastpathlinkrev,
1145 manifests,
1145 manifests,
1146 fnodes,
1146 fnodes,
1147 source,
1147 source,
1148 clrevtolocalrev,
1148 clrevtolocalrev,
1149 ):
1149 ):
1150 """Returns an iterator of changegroup chunks containing manifests.
1150 """Returns an iterator of changegroup chunks containing manifests.
1151
1151
1152 `source` is unused here, but is used by extensions like remotefilelog to
1152 `source` is unused here, but is used by extensions like remotefilelog to
1153 change what is sent based in pulls vs pushes, etc.
1153 change what is sent based in pulls vs pushes, etc.
1154 """
1154 """
1155 repo = self._repo
1155 repo = self._repo
1156 mfl = repo.manifestlog
1156 mfl = repo.manifestlog
1157 tmfnodes = {b'': manifests}
1157 tmfnodes = {b'': manifests}
1158
1158
1159 # Callback for the manifest, used to collect linkrevs for filelog
1159 # Callback for the manifest, used to collect linkrevs for filelog
1160 # revisions.
1160 # revisions.
1161 # Returns the linkrev node (collected in lookupcl).
1161 # Returns the linkrev node (collected in lookupcl).
1162 def makelookupmflinknode(tree, nodes):
1162 def makelookupmflinknode(tree, nodes):
1163 if fastpathlinkrev:
1163 if fastpathlinkrev:
1164 assert not tree
1164 assert not tree
1165 return (
1165 return (
1166 manifests.__getitem__
1166 manifests.__getitem__
1167 ) # pytype: disable=unsupported-operands
1167 ) # pytype: disable=unsupported-operands
1168
1168
1169 def lookupmflinknode(x):
1169 def lookupmflinknode(x):
1170 """Callback for looking up the linknode for manifests.
1170 """Callback for looking up the linknode for manifests.
1171
1171
1172 Returns the linkrev node for the specified manifest.
1172 Returns the linkrev node for the specified manifest.
1173
1173
1174 SIDE EFFECT:
1174 SIDE EFFECT:
1175
1175
1176 1) fclnodes gets populated with the list of relevant
1176 1) fclnodes gets populated with the list of relevant
1177 file nodes if we're not using fastpathlinkrev
1177 file nodes if we're not using fastpathlinkrev
1178 2) When treemanifests are in use, collects treemanifest nodes
1178 2) When treemanifests are in use, collects treemanifest nodes
1179 to send
1179 to send
1180
1180
1181 Note that this means manifests must be completely sent to
1181 Note that this means manifests must be completely sent to
1182 the client before you can trust the list of files and
1182 the client before you can trust the list of files and
1183 treemanifests to send.
1183 treemanifests to send.
1184 """
1184 """
1185 clnode = nodes[x]
1185 clnode = nodes[x]
1186 mdata = mfl.get(tree, x).readfast(shallow=True)
1186 mdata = mfl.get(tree, x).readfast(shallow=True)
1187 for p, n, fl in mdata.iterentries():
1187 for p, n, fl in mdata.iterentries():
1188 if fl == b't': # subdirectory manifest
1188 if fl == b't': # subdirectory manifest
1189 subtree = tree + p + b'/'
1189 subtree = tree + p + b'/'
1190 tmfclnodes = tmfnodes.setdefault(subtree, {})
1190 tmfclnodes = tmfnodes.setdefault(subtree, {})
1191 tmfclnode = tmfclnodes.setdefault(n, clnode)
1191 tmfclnode = tmfclnodes.setdefault(n, clnode)
1192 if clrevorder[clnode] < clrevorder[tmfclnode]:
1192 if clrevorder[clnode] < clrevorder[tmfclnode]:
1193 tmfclnodes[n] = clnode
1193 tmfclnodes[n] = clnode
1194 else:
1194 else:
1195 f = tree + p
1195 f = tree + p
1196 fclnodes = fnodes.setdefault(f, {})
1196 fclnodes = fnodes.setdefault(f, {})
1197 fclnode = fclnodes.setdefault(n, clnode)
1197 fclnode = fclnodes.setdefault(n, clnode)
1198 if clrevorder[clnode] < clrevorder[fclnode]:
1198 if clrevorder[clnode] < clrevorder[fclnode]:
1199 fclnodes[n] = clnode
1199 fclnodes[n] = clnode
1200 return clnode
1200 return clnode
1201
1201
1202 return lookupmflinknode
1202 return lookupmflinknode
1203
1203
1204 while tmfnodes:
1204 while tmfnodes:
1205 tree, nodes = tmfnodes.popitem()
1205 tree, nodes = tmfnodes.popitem()
1206
1206
1207 should_visit = self._matcher.visitdir(tree[:-1])
1207 should_visit = self._matcher.visitdir(tree[:-1])
1208 if tree and not should_visit:
1208 if tree and not should_visit:
1209 continue
1209 continue
1210
1210
1211 store = mfl.getstorage(tree)
1211 store = mfl.getstorage(tree)
1212
1212
1213 if not should_visit:
1213 if not should_visit:
1214 # No nodes to send because this directory is out of
1214 # No nodes to send because this directory is out of
1215 # the client's view of the repository (probably
1215 # the client's view of the repository (probably
1216 # because of narrow clones). Do this even for the root
1216 # because of narrow clones). Do this even for the root
1217 # directory (tree=='')
1217 # directory (tree=='')
1218 prunednodes = []
1218 prunednodes = []
1219 else:
1219 else:
1220 # Avoid sending any manifest nodes we can prove the
1220 # Avoid sending any manifest nodes we can prove the
1221 # client already has by checking linkrevs. See the
1221 # client already has by checking linkrevs. See the
1222 # related comment in generatefiles().
1222 # related comment in generatefiles().
1223 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1223 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1224
1224
1225 if tree and not prunednodes:
1225 if tree and not prunednodes:
1226 continue
1226 continue
1227
1227
1228 lookupfn = makelookupmflinknode(tree, nodes)
1228 lookupfn = makelookupmflinknode(tree, nodes)
1229
1229
1230 deltas = deltagroup(
1230 deltas = deltagroup(
1231 self._repo,
1231 self._repo,
1232 store,
1232 store,
1233 prunednodes,
1233 prunednodes,
1234 False,
1234 False,
1235 lookupfn,
1235 lookupfn,
1236 self._forcedeltaparentprev,
1236 self._forcedeltaparentprev,
1237 ellipses=self._ellipses,
1237 ellipses=self._ellipses,
1238 topic=_(b'manifests'),
1238 topic=_(b'manifests'),
1239 clrevtolocalrev=clrevtolocalrev,
1239 clrevtolocalrev=clrevtolocalrev,
1240 fullclnodes=self._fullclnodes,
1240 fullclnodes=self._fullclnodes,
1241 precomputedellipsis=self._precomputedellipsis,
1241 precomputedellipsis=self._precomputedellipsis,
1242 )
1242 )
1243
1243
1244 if not self._oldmatcher.visitdir(store.tree[:-1]):
1244 if not self._oldmatcher.visitdir(store.tree[:-1]):
1245 yield tree, deltas
1245 yield tree, deltas
1246 else:
1246 else:
1247 # 'deltas' is a generator and we need to consume it even if
1247 # 'deltas' is a generator and we need to consume it even if
1248 # we are not going to send it because a side-effect is that
1248 # we are not going to send it because a side-effect is that
1249 # it updates tmdnodes (via lookupfn)
1249 # it updates tmdnodes (via lookupfn)
1250 for d in deltas:
1250 for d in deltas:
1251 pass
1251 pass
1252 if not tree:
1252 if not tree:
1253 yield tree, []
1253 yield tree, []
1254
1254
1255 def _prunemanifests(self, store, nodes, commonrevs):
1255 def _prunemanifests(self, store, nodes, commonrevs):
1256 if not self._ellipses:
1256 if not self._ellipses:
1257 # In non-ellipses case and large repositories, it is better to
1257 # In non-ellipses case and large repositories, it is better to
1258 # prevent calling of store.rev and store.linkrev on a lot of
1258 # prevent calling of store.rev and store.linkrev on a lot of
1259 # nodes as compared to sending some extra data
1259 # nodes as compared to sending some extra data
1260 return nodes.copy()
1260 return nodes.copy()
1261 # This is split out as a separate method to allow filtering
1261 # This is split out as a separate method to allow filtering
1262 # commonrevs in extension code.
1262 # commonrevs in extension code.
1263 #
1263 #
1264 # TODO(augie): this shouldn't be required, instead we should
1264 # TODO(augie): this shouldn't be required, instead we should
1265 # make filtering of revisions to send delegated to the store
1265 # make filtering of revisions to send delegated to the store
1266 # layer.
1266 # layer.
1267 frev, flr = store.rev, store.linkrev
1267 frev, flr = store.rev, store.linkrev
1268 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1268 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1269
1269
1270 # The 'source' parameter is useful for extensions
1270 # The 'source' parameter is useful for extensions
1271 def generatefiles(
1271 def generatefiles(
1272 self,
1272 self,
1273 changedfiles,
1273 changedfiles,
1274 commonrevs,
1274 commonrevs,
1275 source,
1275 source,
1276 mfdicts,
1276 mfdicts,
1277 fastpathlinkrev,
1277 fastpathlinkrev,
1278 fnodes,
1278 fnodes,
1279 clrevs,
1279 clrevs,
1280 ):
1280 ):
1281 changedfiles = [
1281 changedfiles = [
1282 f
1282 f
1283 for f in changedfiles
1283 for f in changedfiles
1284 if self._matcher(f) and not self._oldmatcher(f)
1284 if self._matcher(f) and not self._oldmatcher(f)
1285 ]
1285 ]
1286
1286
1287 if not fastpathlinkrev:
1287 if not fastpathlinkrev:
1288
1288
1289 def normallinknodes(unused, fname):
1289 def normallinknodes(unused, fname):
1290 return fnodes.get(fname, {})
1290 return fnodes.get(fname, {})
1291
1291
1292 else:
1292 else:
1293 cln = self._repo.changelog.node
1293 cln = self._repo.changelog.node
1294
1294
1295 def normallinknodes(store, fname):
1295 def normallinknodes(store, fname):
1296 flinkrev = store.linkrev
1296 flinkrev = store.linkrev
1297 fnode = store.node
1297 fnode = store.node
1298 revs = ((r, flinkrev(r)) for r in store)
1298 revs = ((r, flinkrev(r)) for r in store)
1299 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1299 return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs}
1300
1300
1301 clrevtolocalrev = {}
1301 clrevtolocalrev = {}
1302
1302
1303 if self._isshallow:
1303 if self._isshallow:
1304 # In a shallow clone, the linknodes callback needs to also include
1304 # In a shallow clone, the linknodes callback needs to also include
1305 # those file nodes that are in the manifests we sent but weren't
1305 # those file nodes that are in the manifests we sent but weren't
1306 # introduced by those manifests.
1306 # introduced by those manifests.
1307 commonctxs = [self._repo[c] for c in commonrevs]
1307 commonctxs = [self._repo[c] for c in commonrevs]
1308 clrev = self._repo.changelog.rev
1308 clrev = self._repo.changelog.rev
1309
1309
1310 def linknodes(flog, fname):
1310 def linknodes(flog, fname):
1311 for c in commonctxs:
1311 for c in commonctxs:
1312 try:
1312 try:
1313 fnode = c.filenode(fname)
1313 fnode = c.filenode(fname)
1314 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1314 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1315 except error.ManifestLookupError:
1315 except error.ManifestLookupError:
1316 pass
1316 pass
1317 links = normallinknodes(flog, fname)
1317 links = normallinknodes(flog, fname)
1318 if len(links) != len(mfdicts):
1318 if len(links) != len(mfdicts):
1319 for mf, lr in mfdicts:
1319 for mf, lr in mfdicts:
1320 fnode = mf.get(fname, None)
1320 fnode = mf.get(fname, None)
1321 if fnode in links:
1321 if fnode in links:
1322 links[fnode] = min(links[fnode], lr, key=clrev)
1322 links[fnode] = min(links[fnode], lr, key=clrev)
1323 elif fnode:
1323 elif fnode:
1324 links[fnode] = lr
1324 links[fnode] = lr
1325 return links
1325 return links
1326
1326
1327 else:
1327 else:
1328 linknodes = normallinknodes
1328 linknodes = normallinknodes
1329
1329
1330 repo = self._repo
1330 repo = self._repo
1331 progress = repo.ui.makeprogress(
1331 progress = repo.ui.makeprogress(
1332 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1332 _(b'files'), unit=_(b'files'), total=len(changedfiles)
1333 )
1333 )
1334 for i, fname in enumerate(sorted(changedfiles)):
1334 for i, fname in enumerate(sorted(changedfiles)):
1335 filerevlog = repo.file(fname)
1335 filerevlog = repo.file(fname)
1336 if not filerevlog:
1336 if not filerevlog:
1337 raise error.Abort(
1337 raise error.Abort(
1338 _(b"empty or missing file data for %s") % fname
1338 _(b"empty or missing file data for %s") % fname
1339 )
1339 )
1340
1340
1341 clrevtolocalrev.clear()
1341 clrevtolocalrev.clear()
1342
1342
1343 linkrevnodes = linknodes(filerevlog, fname)
1343 linkrevnodes = linknodes(filerevlog, fname)
1344 # Lookup for filenodes, we collected the linkrev nodes above in the
1344 # Lookup for filenodes, we collected the linkrev nodes above in the
1345 # fastpath case and with lookupmf in the slowpath case.
1345 # fastpath case and with lookupmf in the slowpath case.
1346 def lookupfilelog(x):
1346 def lookupfilelog(x):
1347 return linkrevnodes[x]
1347 return linkrevnodes[x]
1348
1348
1349 frev, flr = filerevlog.rev, filerevlog.linkrev
1349 frev, flr = filerevlog.rev, filerevlog.linkrev
1350 # Skip sending any filenode we know the client already
1350 # Skip sending any filenode we know the client already
1351 # has. This avoids over-sending files relatively
1351 # has. This avoids over-sending files relatively
1352 # inexpensively, so it's not a problem if we under-filter
1352 # inexpensively, so it's not a problem if we under-filter
1353 # here.
1353 # here.
1354 filenodes = [
1354 filenodes = [
1355 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1355 n for n in linkrevnodes if flr(frev(n)) not in commonrevs
1356 ]
1356 ]
1357
1357
1358 if not filenodes:
1358 if not filenodes:
1359 continue
1359 continue
1360
1360
1361 progress.update(i + 1, item=fname)
1361 progress.update(i + 1, item=fname)
1362
1362
1363 deltas = deltagroup(
1363 deltas = deltagroup(
1364 self._repo,
1364 self._repo,
1365 filerevlog,
1365 filerevlog,
1366 filenodes,
1366 filenodes,
1367 False,
1367 False,
1368 lookupfilelog,
1368 lookupfilelog,
1369 self._forcedeltaparentprev,
1369 self._forcedeltaparentprev,
1370 ellipses=self._ellipses,
1370 ellipses=self._ellipses,
1371 clrevtolocalrev=clrevtolocalrev,
1371 clrevtolocalrev=clrevtolocalrev,
1372 fullclnodes=self._fullclnodes,
1372 fullclnodes=self._fullclnodes,
1373 precomputedellipsis=self._precomputedellipsis,
1373 precomputedellipsis=self._precomputedellipsis,
1374 )
1374 )
1375
1375
1376 yield fname, deltas
1376 yield fname, deltas
1377
1377
1378 progress.complete()
1378 progress.complete()
1379
1379
1380
1380
1381 def _makecg1packer(
1381 def _makecg1packer(
1382 repo,
1382 repo,
1383 oldmatcher,
1383 oldmatcher,
1384 matcher,
1384 matcher,
1385 bundlecaps,
1385 bundlecaps,
1386 ellipses=False,
1386 ellipses=False,
1387 shallow=False,
1387 shallow=False,
1388 ellipsisroots=None,
1388 ellipsisroots=None,
1389 fullnodes=None,
1389 fullnodes=None,
1390 ):
1390 ):
1391 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1391 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1392 d.node, d.p1node, d.p2node, d.linknode
1392 d.node, d.p1node, d.p2node, d.linknode
1393 )
1393 )
1394
1394
1395 return cgpacker(
1395 return cgpacker(
1396 repo,
1396 repo,
1397 oldmatcher,
1397 oldmatcher,
1398 matcher,
1398 matcher,
1399 b'01',
1399 b'01',
1400 builddeltaheader=builddeltaheader,
1400 builddeltaheader=builddeltaheader,
1401 manifestsend=b'',
1401 manifestsend=b'',
1402 forcedeltaparentprev=True,
1402 forcedeltaparentprev=True,
1403 bundlecaps=bundlecaps,
1403 bundlecaps=bundlecaps,
1404 ellipses=ellipses,
1404 ellipses=ellipses,
1405 shallow=shallow,
1405 shallow=shallow,
1406 ellipsisroots=ellipsisroots,
1406 ellipsisroots=ellipsisroots,
1407 fullnodes=fullnodes,
1407 fullnodes=fullnodes,
1408 )
1408 )
1409
1409
1410
1410
1411 def _makecg2packer(
1411 def _makecg2packer(
1412 repo,
1412 repo,
1413 oldmatcher,
1413 oldmatcher,
1414 matcher,
1414 matcher,
1415 bundlecaps,
1415 bundlecaps,
1416 ellipses=False,
1416 ellipses=False,
1417 shallow=False,
1417 shallow=False,
1418 ellipsisroots=None,
1418 ellipsisroots=None,
1419 fullnodes=None,
1419 fullnodes=None,
1420 ):
1420 ):
1421 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1421 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1422 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1422 d.node, d.p1node, d.p2node, d.basenode, d.linknode
1423 )
1423 )
1424
1424
1425 return cgpacker(
1425 return cgpacker(
1426 repo,
1426 repo,
1427 oldmatcher,
1427 oldmatcher,
1428 matcher,
1428 matcher,
1429 b'02',
1429 b'02',
1430 builddeltaheader=builddeltaheader,
1430 builddeltaheader=builddeltaheader,
1431 manifestsend=b'',
1431 manifestsend=b'',
1432 bundlecaps=bundlecaps,
1432 bundlecaps=bundlecaps,
1433 ellipses=ellipses,
1433 ellipses=ellipses,
1434 shallow=shallow,
1434 shallow=shallow,
1435 ellipsisroots=ellipsisroots,
1435 ellipsisroots=ellipsisroots,
1436 fullnodes=fullnodes,
1436 fullnodes=fullnodes,
1437 )
1437 )
1438
1438
1439
1439
1440 def _makecg3packer(
1440 def _makecg3packer(
1441 repo,
1441 repo,
1442 oldmatcher,
1442 oldmatcher,
1443 matcher,
1443 matcher,
1444 bundlecaps,
1444 bundlecaps,
1445 ellipses=False,
1445 ellipses=False,
1446 shallow=False,
1446 shallow=False,
1447 ellipsisroots=None,
1447 ellipsisroots=None,
1448 fullnodes=None,
1448 fullnodes=None,
1449 ):
1449 ):
1450 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1450 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1451 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1451 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
1452 )
1452 )
1453
1453
1454 return cgpacker(
1454 return cgpacker(
1455 repo,
1455 repo,
1456 oldmatcher,
1456 oldmatcher,
1457 matcher,
1457 matcher,
1458 b'03',
1458 b'03',
1459 builddeltaheader=builddeltaheader,
1459 builddeltaheader=builddeltaheader,
1460 manifestsend=closechunk(),
1460 manifestsend=closechunk(),
1461 bundlecaps=bundlecaps,
1461 bundlecaps=bundlecaps,
1462 ellipses=ellipses,
1462 ellipses=ellipses,
1463 shallow=shallow,
1463 shallow=shallow,
1464 ellipsisroots=ellipsisroots,
1464 ellipsisroots=ellipsisroots,
1465 fullnodes=fullnodes,
1465 fullnodes=fullnodes,
1466 )
1466 )
1467
1467
1468
1468
1469 _packermap = {
1469 _packermap = {
1470 b'01': (_makecg1packer, cg1unpacker),
1470 b'01': (_makecg1packer, cg1unpacker),
1471 # cg2 adds support for exchanging generaldelta
1471 # cg2 adds support for exchanging generaldelta
1472 b'02': (_makecg2packer, cg2unpacker),
1472 b'02': (_makecg2packer, cg2unpacker),
1473 # cg3 adds support for exchanging revlog flags and treemanifests
1473 # cg3 adds support for exchanging revlog flags and treemanifests
1474 b'03': (_makecg3packer, cg3unpacker),
1474 b'03': (_makecg3packer, cg3unpacker),
1475 }
1475 }
1476
1476
1477
1477
1478 def allsupportedversions(repo):
1478 def allsupportedversions(repo):
1479 versions = set(_packermap.keys())
1479 versions = set(_packermap.keys())
1480 needv03 = False
1480 needv03 = False
1481 if (
1481 if (
1482 repo.ui.configbool(b'experimental', b'changegroup3')
1482 repo.ui.configbool(b'experimental', b'changegroup3')
1483 or repo.ui.configbool(b'experimental', b'treemanifest')
1483 or repo.ui.configbool(b'experimental', b'treemanifest')
1484 or scmutil.istreemanifest(repo)
1484 or scmutil.istreemanifest(repo)
1485 ):
1485 ):
1486 # we keep version 03 because we need to to exchange treemanifest data
1486 # we keep version 03 because we need to to exchange treemanifest data
1487 #
1487 #
1488 # we also keep vresion 01 and 02, because it is possible for repo to
1488 # we also keep vresion 01 and 02, because it is possible for repo to
1489 # contains both normal and tree manifest at the same time. so using
1489 # contains both normal and tree manifest at the same time. so using
1490 # older version to pull data is viable
1490 # older version to pull data is viable
1491 #
1491 #
1492 # (or even to push subset of history)
1492 # (or even to push subset of history)
1493 needv03 = True
1493 needv03 = True
1494 if b'exp-sidedata-flag' in repo.requirements:
1494 if b'exp-sidedata-flag' in repo.requirements:
1495 needv03 = True
1495 needv03 = True
1496 # don't attempt to use 01/02 until we do sidedata cleaning
1496 # don't attempt to use 01/02 until we do sidedata cleaning
1497 versions.discard(b'01')
1497 versions.discard(b'01')
1498 versions.discard(b'02')
1498 versions.discard(b'02')
1499 if not needv03:
1499 if not needv03:
1500 versions.discard(b'03')
1500 versions.discard(b'03')
1501 return versions
1501 return versions
1502
1502
1503
1503
1504 # Changegroup versions that can be applied to the repo
1504 # Changegroup versions that can be applied to the repo
1505 def supportedincomingversions(repo):
1505 def supportedincomingversions(repo):
1506 return allsupportedversions(repo)
1506 return allsupportedversions(repo)
1507
1507
1508
1508
1509 # Changegroup versions that can be created from the repo
1509 # Changegroup versions that can be created from the repo
1510 def supportedoutgoingversions(repo):
1510 def supportedoutgoingversions(repo):
1511 versions = allsupportedversions(repo)
1511 versions = allsupportedversions(repo)
1512 if scmutil.istreemanifest(repo):
1512 if scmutil.istreemanifest(repo):
1513 # Versions 01 and 02 support only flat manifests and it's just too
1513 # Versions 01 and 02 support only flat manifests and it's just too
1514 # expensive to convert between the flat manifest and tree manifest on
1514 # expensive to convert between the flat manifest and tree manifest on
1515 # the fly. Since tree manifests are hashed differently, all of history
1515 # the fly. Since tree manifests are hashed differently, all of history
1516 # would have to be converted. Instead, we simply don't even pretend to
1516 # would have to be converted. Instead, we simply don't even pretend to
1517 # support versions 01 and 02.
1517 # support versions 01 and 02.
1518 versions.discard(b'01')
1518 versions.discard(b'01')
1519 versions.discard(b'02')
1519 versions.discard(b'02')
1520 if requirements.NARROW_REQUIREMENT in repo.requirements:
1520 if requirements.NARROW_REQUIREMENT in repo.requirements:
1521 # Versions 01 and 02 don't support revlog flags, and we need to
1521 # Versions 01 and 02 don't support revlog flags, and we need to
1522 # support that for stripping and unbundling to work.
1522 # support that for stripping and unbundling to work.
1523 versions.discard(b'01')
1523 versions.discard(b'01')
1524 versions.discard(b'02')
1524 versions.discard(b'02')
1525 if LFS_REQUIREMENT in repo.requirements:
1525 if LFS_REQUIREMENT in repo.requirements:
1526 # Versions 01 and 02 don't support revlog flags, and we need to
1526 # Versions 01 and 02 don't support revlog flags, and we need to
1527 # mark LFS entries with REVIDX_EXTSTORED.
1527 # mark LFS entries with REVIDX_EXTSTORED.
1528 versions.discard(b'01')
1528 versions.discard(b'01')
1529 versions.discard(b'02')
1529 versions.discard(b'02')
1530
1530
1531 return versions
1531 return versions
1532
1532
1533
1533
1534 def localversion(repo):
1534 def localversion(repo):
1535 # Finds the best version to use for bundles that are meant to be used
1535 # Finds the best version to use for bundles that are meant to be used
1536 # locally, such as those from strip and shelve, and temporary bundles.
1536 # locally, such as those from strip and shelve, and temporary bundles.
1537 return max(supportedoutgoingversions(repo))
1537 return max(supportedoutgoingversions(repo))
1538
1538
1539
1539
1540 def safeversion(repo):
1540 def safeversion(repo):
1541 # Finds the smallest version that it's safe to assume clients of the repo
1541 # Finds the smallest version that it's safe to assume clients of the repo
1542 # will support. For example, all hg versions that support generaldelta also
1542 # will support. For example, all hg versions that support generaldelta also
1543 # support changegroup 02.
1543 # support changegroup 02.
1544 versions = supportedoutgoingversions(repo)
1544 versions = supportedoutgoingversions(repo)
1545 if b'generaldelta' in repo.requirements:
1545 if b'generaldelta' in repo.requirements:
1546 versions.discard(b'01')
1546 versions.discard(b'01')
1547 assert versions
1547 assert versions
1548 return min(versions)
1548 return min(versions)
1549
1549
1550
1550
1551 def getbundler(
1551 def getbundler(
1552 version,
1552 version,
1553 repo,
1553 repo,
1554 bundlecaps=None,
1554 bundlecaps=None,
1555 oldmatcher=None,
1555 oldmatcher=None,
1556 matcher=None,
1556 matcher=None,
1557 ellipses=False,
1557 ellipses=False,
1558 shallow=False,
1558 shallow=False,
1559 ellipsisroots=None,
1559 ellipsisroots=None,
1560 fullnodes=None,
1560 fullnodes=None,
1561 ):
1561 ):
1562 assert version in supportedoutgoingversions(repo)
1562 assert version in supportedoutgoingversions(repo)
1563
1563
1564 if matcher is None:
1564 if matcher is None:
1565 matcher = matchmod.always()
1565 matcher = matchmod.always()
1566 if oldmatcher is None:
1566 if oldmatcher is None:
1567 oldmatcher = matchmod.never()
1567 oldmatcher = matchmod.never()
1568
1568
1569 if version == b'01' and not matcher.always():
1569 if version == b'01' and not matcher.always():
1570 raise error.ProgrammingError(
1570 raise error.ProgrammingError(
1571 b'version 01 changegroups do not support sparse file matchers'
1571 b'version 01 changegroups do not support sparse file matchers'
1572 )
1572 )
1573
1573
1574 if ellipses and version in (b'01', b'02'):
1574 if ellipses and version in (b'01', b'02'):
1575 raise error.Abort(
1575 raise error.Abort(
1576 _(
1576 _(
1577 b'ellipsis nodes require at least cg3 on client and server, '
1577 b'ellipsis nodes require at least cg3 on client and server, '
1578 b'but negotiated version %s'
1578 b'but negotiated version %s'
1579 )
1579 )
1580 % version
1580 % version
1581 )
1581 )
1582
1582
1583 # Requested files could include files not in the local store. So
1583 # Requested files could include files not in the local store. So
1584 # filter those out.
1584 # filter those out.
1585 matcher = repo.narrowmatch(matcher)
1585 matcher = repo.narrowmatch(matcher)
1586
1586
1587 fn = _packermap[version][0]
1587 fn = _packermap[version][0]
1588 return fn(
1588 return fn(
1589 repo,
1589 repo,
1590 oldmatcher,
1590 oldmatcher,
1591 matcher,
1591 matcher,
1592 bundlecaps,
1592 bundlecaps,
1593 ellipses=ellipses,
1593 ellipses=ellipses,
1594 shallow=shallow,
1594 shallow=shallow,
1595 ellipsisroots=ellipsisroots,
1595 ellipsisroots=ellipsisroots,
1596 fullnodes=fullnodes,
1596 fullnodes=fullnodes,
1597 )
1597 )
1598
1598
1599
1599
1600 def getunbundler(version, fh, alg, extras=None):
1600 def getunbundler(version, fh, alg, extras=None):
1601 return _packermap[version][1](fh, alg, extras=extras)
1601 return _packermap[version][1](fh, alg, extras=extras)
1602
1602
1603
1603
1604 def _changegroupinfo(repo, nodes, source):
1604 def _changegroupinfo(repo, nodes, source):
1605 if repo.ui.verbose or source == b'bundle':
1605 if repo.ui.verbose or source == b'bundle':
1606 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1606 repo.ui.status(_(b"%d changesets found\n") % len(nodes))
1607 if repo.ui.debugflag:
1607 if repo.ui.debugflag:
1608 repo.ui.debug(b"list of changesets:\n")
1608 repo.ui.debug(b"list of changesets:\n")
1609 for node in nodes:
1609 for node in nodes:
1610 repo.ui.debug(b"%s\n" % hex(node))
1610 repo.ui.debug(b"%s\n" % hex(node))
1611
1611
1612
1612
1613 def makechangegroup(
1613 def makechangegroup(
1614 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1614 repo, outgoing, version, source, fastpath=False, bundlecaps=None
1615 ):
1615 ):
1616 cgstream = makestream(
1616 cgstream = makestream(
1617 repo,
1617 repo,
1618 outgoing,
1618 outgoing,
1619 version,
1619 version,
1620 source,
1620 source,
1621 fastpath=fastpath,
1621 fastpath=fastpath,
1622 bundlecaps=bundlecaps,
1622 bundlecaps=bundlecaps,
1623 )
1623 )
1624 return getunbundler(
1624 return getunbundler(
1625 version,
1625 version,
1626 util.chunkbuffer(cgstream),
1626 util.chunkbuffer(cgstream),
1627 None,
1627 None,
1628 {b'clcount': len(outgoing.missing)},
1628 {b'clcount': len(outgoing.missing)},
1629 )
1629 )
1630
1630
1631
1631
1632 def makestream(
1632 def makestream(
1633 repo,
1633 repo,
1634 outgoing,
1634 outgoing,
1635 version,
1635 version,
1636 source,
1636 source,
1637 fastpath=False,
1637 fastpath=False,
1638 bundlecaps=None,
1638 bundlecaps=None,
1639 matcher=None,
1639 matcher=None,
1640 ):
1640 ):
1641 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1641 bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher)
1642
1642
1643 repo = repo.unfiltered()
1643 repo = repo.unfiltered()
1644 commonrevs = outgoing.common
1644 commonrevs = outgoing.common
1645 csets = outgoing.missing
1645 csets = outgoing.missing
1646 heads = outgoing.ancestorsof
1646 heads = outgoing.ancestorsof
1647 # We go through the fast path if we get told to, or if all (unfiltered
1647 # We go through the fast path if we get told to, or if all (unfiltered
1648 # heads have been requested (since we then know there all linkrevs will
1648 # heads have been requested (since we then know there all linkrevs will
1649 # be pulled by the client).
1649 # be pulled by the client).
1650 heads.sort()
1650 heads.sort()
1651 fastpathlinkrev = fastpath or (
1651 fastpathlinkrev = fastpath or (
1652 repo.filtername is None and heads == sorted(repo.heads())
1652 repo.filtername is None and heads == sorted(repo.heads())
1653 )
1653 )
1654
1654
1655 repo.hook(b'preoutgoing', throw=True, source=source)
1655 repo.hook(b'preoutgoing', throw=True, source=source)
1656 _changegroupinfo(repo, csets, source)
1656 _changegroupinfo(repo, csets, source)
1657 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1657 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1658
1658
1659
1659
1660 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1660 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1661 revisions = 0
1661 revisions = 0
1662 files = 0
1662 files = 0
1663 progress = repo.ui.makeprogress(
1663 progress = repo.ui.makeprogress(
1664 _(b'files'), unit=_(b'files'), total=expectedfiles
1664 _(b'files'), unit=_(b'files'), total=expectedfiles
1665 )
1665 )
1666 for chunkdata in iter(source.filelogheader, {}):
1666 for chunkdata in iter(source.filelogheader, {}):
1667 files += 1
1667 files += 1
1668 f = chunkdata[b"filename"]
1668 f = chunkdata[b"filename"]
1669 repo.ui.debug(b"adding %s revisions\n" % f)
1669 repo.ui.debug(b"adding %s revisions\n" % f)
1670 progress.increment()
1670 progress.increment()
1671 fl = repo.file(f)
1671 fl = repo.file(f)
1672 o = len(fl)
1672 o = len(fl)
1673 try:
1673 try:
1674 deltas = source.deltaiter()
1674 deltas = source.deltaiter()
1675 if not fl.addgroup(deltas, revmap, trp):
1675 if not fl.addgroup(deltas, revmap, trp):
1676 raise error.Abort(_(b"received file revlog group is empty"))
1676 raise error.Abort(_(b"received file revlog group is empty"))
1677 except error.CensoredBaseError as e:
1677 except error.CensoredBaseError as e:
1678 raise error.Abort(_(b"received delta base is censored: %s") % e)
1678 raise error.Abort(_(b"received delta base is censored: %s") % e)
1679 revisions += len(fl) - o
1679 revisions += len(fl) - o
1680 if f in needfiles:
1680 if f in needfiles:
1681 needs = needfiles[f]
1681 needs = needfiles[f]
1682 for new in pycompat.xrange(o, len(fl)):
1682 for new in pycompat.xrange(o, len(fl)):
1683 n = fl.node(new)
1683 n = fl.node(new)
1684 if n in needs:
1684 if n in needs:
1685 needs.remove(n)
1685 needs.remove(n)
1686 else:
1686 else:
1687 raise error.Abort(_(b"received spurious file revlog entry"))
1687 raise error.Abort(_(b"received spurious file revlog entry"))
1688 if not needs:
1688 if not needs:
1689 del needfiles[f]
1689 del needfiles[f]
1690 progress.complete()
1690 progress.complete()
1691
1691
1692 for f, needs in pycompat.iteritems(needfiles):
1692 for f, needs in pycompat.iteritems(needfiles):
1693 fl = repo.file(f)
1693 fl = repo.file(f)
1694 for n in needs:
1694 for n in needs:
1695 try:
1695 try:
1696 fl.rev(n)
1696 fl.rev(n)
1697 except error.LookupError:
1697 except error.LookupError:
1698 raise error.Abort(
1698 raise error.Abort(
1699 _(b'missing file data for %s:%s - run hg verify')
1699 _(b'missing file data for %s:%s - run hg verify')
1700 % (f, hex(n))
1700 % (f, hex(n))
1701 )
1701 )
1702
1702
1703 return revisions, files
1703 return revisions, files
@@ -1,475 +1,475 b''
1 # commit.py - fonction to perform commit
1 # commit.py - fonction to perform commit
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import (
11 from .node import (
12 hex,
12 hex,
13 nullid,
13 nullid,
14 nullrev,
14 nullrev,
15 )
15 )
16
16
17 from . import (
17 from . import (
18 context,
18 context,
19 mergestate,
19 mergestate,
20 metadata,
20 metadata,
21 phases,
21 phases,
22 scmutil,
22 scmutil,
23 subrepoutil,
23 subrepoutil,
24 )
24 )
25
25
26
26
27 def _write_copy_meta(repo):
27 def _write_copy_meta(repo):
28 """return a (changelog, filelog) boolean tuple
28 """return a (changelog, filelog) boolean tuple
29
29
30 changelog: copy related information should be stored in the changeset
30 changelog: copy related information should be stored in the changeset
31 filelof: copy related information should be written in the file revision
31 filelof: copy related information should be written in the file revision
32 """
32 """
33 if repo.filecopiesmode == b'changeset-sidedata':
33 if repo.filecopiesmode == b'changeset-sidedata':
34 writechangesetcopy = True
34 writechangesetcopy = True
35 writefilecopymeta = True
35 writefilecopymeta = True
36 else:
36 else:
37 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
37 writecopiesto = repo.ui.config(b'experimental', b'copies.write-to')
38 writefilecopymeta = writecopiesto != b'changeset-only'
38 writefilecopymeta = writecopiesto != b'changeset-only'
39 writechangesetcopy = writecopiesto in (
39 writechangesetcopy = writecopiesto in (
40 b'changeset-only',
40 b'changeset-only',
41 b'compatibility',
41 b'compatibility',
42 )
42 )
43 return writechangesetcopy, writefilecopymeta
43 return writechangesetcopy, writefilecopymeta
44
44
45
45
46 def commitctx(repo, ctx, error=False, origctx=None):
46 def commitctx(repo, ctx, error=False, origctx=None):
47 """Add a new revision to the target repository.
47 """Add a new revision to the target repository.
48 Revision information is passed via the context argument.
48 Revision information is passed via the context argument.
49
49
50 ctx.files() should list all files involved in this commit, i.e.
50 ctx.files() should list all files involved in this commit, i.e.
51 modified/added/removed files. On merge, it may be wider than the
51 modified/added/removed files. On merge, it may be wider than the
52 ctx.files() to be committed, since any file nodes derived directly
52 ctx.files() to be committed, since any file nodes derived directly
53 from p1 or p2 are excluded from the committed ctx.files().
53 from p1 or p2 are excluded from the committed ctx.files().
54
54
55 origctx is for convert to work around the problem that bug
55 origctx is for convert to work around the problem that bug
56 fixes to the files list in changesets change hashes. For
56 fixes to the files list in changesets change hashes. For
57 convert to be the identity, it can pass an origctx and this
57 convert to be the identity, it can pass an origctx and this
58 function will use the same files list when it makes sense to
58 function will use the same files list when it makes sense to
59 do so.
59 do so.
60 """
60 """
61 repo = repo.unfiltered()
61 repo = repo.unfiltered()
62
62
63 p1, p2 = ctx.p1(), ctx.p2()
63 p1, p2 = ctx.p1(), ctx.p2()
64 user = ctx.user()
64 user = ctx.user()
65
65
66 with repo.lock(), repo.transaction(b"commit") as tr:
66 with repo.lock(), repo.transaction(b"commit") as tr:
67 mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx)
67 mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx)
68
68
69 extra = ctx.extra().copy()
69 extra = ctx.extra().copy()
70
70
71 if extra is not None:
71 if extra is not None:
72 for name in (
72 for name in (
73 b'p1copies',
73 b'p1copies',
74 b'p2copies',
74 b'p2copies',
75 b'filesadded',
75 b'filesadded',
76 b'filesremoved',
76 b'filesremoved',
77 ):
77 ):
78 extra.pop(name, None)
78 extra.pop(name, None)
79 if repo.changelog._copiesstorage == b'extra':
79 if repo.changelog._copiesstorage == b'extra':
80 extra = _extra_with_copies(repo, extra, files)
80 extra = _extra_with_copies(repo, extra, files)
81
81
82 # update changelog
82 # update changelog
83 repo.ui.note(_(b"committing changelog\n"))
83 repo.ui.note(_(b"committing changelog\n"))
84 repo.changelog.delayupdate(tr)
84 repo.changelog.delayupdate(tr)
85 n = repo.changelog.add(
85 n = repo.changelog.add(
86 mn,
86 mn,
87 files,
87 files,
88 ctx.description(),
88 ctx.description(),
89 tr,
89 tr,
90 p1.node(),
90 p1.node(),
91 p2.node(),
91 p2.node(),
92 user,
92 user,
93 ctx.date(),
93 ctx.date(),
94 extra,
94 extra,
95 )
95 )
96 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
96 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
97 repo.hook(
97 repo.hook(
98 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
98 b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2,
99 )
99 )
100 # set the new commit is proper phase
100 # set the new commit is proper phase
101 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
101 targetphase = subrepoutil.newcommitphase(repo.ui, ctx)
102 if targetphase:
102 if targetphase:
103 # retract boundary do not alter parent changeset.
103 # retract boundary do not alter parent changeset.
104 # if a parent have higher the resulting phase will
104 # if a parent have higher the resulting phase will
105 # be compliant anyway
105 # be compliant anyway
106 #
106 #
107 # if minimal phase was 0 we don't need to retract anything
107 # if minimal phase was 0 we don't need to retract anything
108 phases.registernew(repo, tr, targetphase, [n])
108 phases.registernew(repo, tr, targetphase, [repo[n].rev()])
109 return n
109 return n
110
110
111
111
112 def _prepare_files(tr, ctx, error=False, origctx=None):
112 def _prepare_files(tr, ctx, error=False, origctx=None):
113 repo = ctx.repo()
113 repo = ctx.repo()
114 p1 = ctx.p1()
114 p1 = ctx.p1()
115
115
116 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
116 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
117 files = metadata.ChangingFiles()
117 files = metadata.ChangingFiles()
118 ms = mergestate.mergestate.read(repo)
118 ms = mergestate.mergestate.read(repo)
119 salvaged = _get_salvaged(repo, ms, ctx)
119 salvaged = _get_salvaged(repo, ms, ctx)
120 for s in salvaged:
120 for s in salvaged:
121 files.mark_salvaged(s)
121 files.mark_salvaged(s)
122
122
123 if ctx.manifestnode():
123 if ctx.manifestnode():
124 # reuse an existing manifest revision
124 # reuse an existing manifest revision
125 repo.ui.debug(b'reusing known manifest\n')
125 repo.ui.debug(b'reusing known manifest\n')
126 mn = ctx.manifestnode()
126 mn = ctx.manifestnode()
127 files.update_touched(ctx.files())
127 files.update_touched(ctx.files())
128 if writechangesetcopy:
128 if writechangesetcopy:
129 files.update_added(ctx.filesadded())
129 files.update_added(ctx.filesadded())
130 files.update_removed(ctx.filesremoved())
130 files.update_removed(ctx.filesremoved())
131 elif not ctx.files():
131 elif not ctx.files():
132 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
132 repo.ui.debug(b'reusing manifest from p1 (no file change)\n')
133 mn = p1.manifestnode()
133 mn = p1.manifestnode()
134 else:
134 else:
135 mn = _process_files(tr, ctx, ms, files, error=error)
135 mn = _process_files(tr, ctx, ms, files, error=error)
136
136
137 if origctx and origctx.manifestnode() == mn:
137 if origctx and origctx.manifestnode() == mn:
138 origfiles = origctx.files()
138 origfiles = origctx.files()
139 assert files.touched.issubset(origfiles)
139 assert files.touched.issubset(origfiles)
140 files.update_touched(origfiles)
140 files.update_touched(origfiles)
141
141
142 if writechangesetcopy:
142 if writechangesetcopy:
143 files.update_copies_from_p1(ctx.p1copies())
143 files.update_copies_from_p1(ctx.p1copies())
144 files.update_copies_from_p2(ctx.p2copies())
144 files.update_copies_from_p2(ctx.p2copies())
145
145
146 return mn, files
146 return mn, files
147
147
148
148
149 def _get_salvaged(repo, ms, ctx):
149 def _get_salvaged(repo, ms, ctx):
150 """ returns a list of salvaged files
150 """ returns a list of salvaged files
151
151
152 returns empty list if config option which process salvaged files are
152 returns empty list if config option which process salvaged files are
153 not enabled """
153 not enabled """
154 salvaged = []
154 salvaged = []
155 copy_sd = repo.filecopiesmode == b'changeset-sidedata'
155 copy_sd = repo.filecopiesmode == b'changeset-sidedata'
156 if copy_sd and len(ctx.parents()) > 1:
156 if copy_sd and len(ctx.parents()) > 1:
157 if ms.active():
157 if ms.active():
158 for fname in sorted(ms.allextras().keys()):
158 for fname in sorted(ms.allextras().keys()):
159 might_removed = ms.extras(fname).get(b'merge-removal-candidate')
159 might_removed = ms.extras(fname).get(b'merge-removal-candidate')
160 if might_removed == b'yes':
160 if might_removed == b'yes':
161 if fname in ctx:
161 if fname in ctx:
162 salvaged.append(fname)
162 salvaged.append(fname)
163 return salvaged
163 return salvaged
164
164
165
165
166 def _process_files(tr, ctx, ms, files, error=False):
166 def _process_files(tr, ctx, ms, files, error=False):
167 repo = ctx.repo()
167 repo = ctx.repo()
168 p1 = ctx.p1()
168 p1 = ctx.p1()
169 p2 = ctx.p2()
169 p2 = ctx.p2()
170
170
171 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
171 writechangesetcopy, writefilecopymeta = _write_copy_meta(repo)
172
172
173 m1ctx = p1.manifestctx()
173 m1ctx = p1.manifestctx()
174 m2ctx = p2.manifestctx()
174 m2ctx = p2.manifestctx()
175 mctx = m1ctx.copy()
175 mctx = m1ctx.copy()
176
176
177 m = mctx.read()
177 m = mctx.read()
178 m1 = m1ctx.read()
178 m1 = m1ctx.read()
179 m2 = m2ctx.read()
179 m2 = m2ctx.read()
180
180
181 # check in files
181 # check in files
182 added = []
182 added = []
183 removed = list(ctx.removed())
183 removed = list(ctx.removed())
184 linkrev = len(repo)
184 linkrev = len(repo)
185 repo.ui.note(_(b"committing files:\n"))
185 repo.ui.note(_(b"committing files:\n"))
186 uipathfn = scmutil.getuipathfn(repo)
186 uipathfn = scmutil.getuipathfn(repo)
187 for f in sorted(ctx.modified() + ctx.added()):
187 for f in sorted(ctx.modified() + ctx.added()):
188 repo.ui.note(uipathfn(f) + b"\n")
188 repo.ui.note(uipathfn(f) + b"\n")
189 try:
189 try:
190 fctx = ctx[f]
190 fctx = ctx[f]
191 if fctx is None:
191 if fctx is None:
192 removed.append(f)
192 removed.append(f)
193 else:
193 else:
194 added.append(f)
194 added.append(f)
195 m[f], is_touched = _filecommit(
195 m[f], is_touched = _filecommit(
196 repo, fctx, m1, m2, linkrev, tr, writefilecopymeta, ms
196 repo, fctx, m1, m2, linkrev, tr, writefilecopymeta, ms
197 )
197 )
198 if is_touched:
198 if is_touched:
199 if is_touched == 'added':
199 if is_touched == 'added':
200 files.mark_added(f)
200 files.mark_added(f)
201 elif is_touched == 'merged':
201 elif is_touched == 'merged':
202 files.mark_merged(f)
202 files.mark_merged(f)
203 else:
203 else:
204 files.mark_touched(f)
204 files.mark_touched(f)
205 m.setflag(f, fctx.flags())
205 m.setflag(f, fctx.flags())
206 except OSError:
206 except OSError:
207 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
207 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
208 raise
208 raise
209 except IOError as inst:
209 except IOError as inst:
210 errcode = getattr(inst, 'errno', errno.ENOENT)
210 errcode = getattr(inst, 'errno', errno.ENOENT)
211 if error or errcode and errcode != errno.ENOENT:
211 if error or errcode and errcode != errno.ENOENT:
212 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
212 repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f))
213 raise
213 raise
214
214
215 # update manifest
215 # update manifest
216 removed = [f for f in removed if f in m1 or f in m2]
216 removed = [f for f in removed if f in m1 or f in m2]
217 drop = sorted([f for f in removed if f in m])
217 drop = sorted([f for f in removed if f in m])
218 for f in drop:
218 for f in drop:
219 del m[f]
219 del m[f]
220 if p2.rev() == nullrev:
220 if p2.rev() == nullrev:
221 files.update_removed(removed)
221 files.update_removed(removed)
222 else:
222 else:
223 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
223 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
224 for f in removed:
224 for f in removed:
225 if not rf(f):
225 if not rf(f):
226 files.mark_removed(f)
226 files.mark_removed(f)
227
227
228 mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop)
228 mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop)
229
229
230 return mn
230 return mn
231
231
232
232
233 def _filecommit(
233 def _filecommit(
234 repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta, ms,
234 repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta, ms,
235 ):
235 ):
236 """
236 """
237 commit an individual file as part of a larger transaction
237 commit an individual file as part of a larger transaction
238
238
239 input:
239 input:
240
240
241 fctx: a file context with the content we are trying to commit
241 fctx: a file context with the content we are trying to commit
242 manifest1: manifest of changeset first parent
242 manifest1: manifest of changeset first parent
243 manifest2: manifest of changeset second parent
243 manifest2: manifest of changeset second parent
244 linkrev: revision number of the changeset being created
244 linkrev: revision number of the changeset being created
245 tr: current transation
245 tr: current transation
246 includecopymeta: boolean, set to False to skip storing the copy data
246 includecopymeta: boolean, set to False to skip storing the copy data
247 (only used by the Google specific feature of using
247 (only used by the Google specific feature of using
248 changeset extra as copy source of truth).
248 changeset extra as copy source of truth).
249 ms: mergestate object
249 ms: mergestate object
250
250
251 output: (filenode, touched)
251 output: (filenode, touched)
252
252
253 filenode: the filenode that should be used by this changeset
253 filenode: the filenode that should be used by this changeset
254 touched: one of: None (mean untouched), 'added' or 'modified'
254 touched: one of: None (mean untouched), 'added' or 'modified'
255 """
255 """
256
256
257 fname = fctx.path()
257 fname = fctx.path()
258 fparent1 = manifest1.get(fname, nullid)
258 fparent1 = manifest1.get(fname, nullid)
259 fparent2 = manifest2.get(fname, nullid)
259 fparent2 = manifest2.get(fname, nullid)
260 touched = None
260 touched = None
261 if fparent1 == fparent2 == nullid:
261 if fparent1 == fparent2 == nullid:
262 touched = 'added'
262 touched = 'added'
263
263
264 if isinstance(fctx, context.filectx):
264 if isinstance(fctx, context.filectx):
265 # This block fast path most comparisons which are usually done. It
265 # This block fast path most comparisons which are usually done. It
266 # assumes that bare filectx is used and no merge happened, hence no
266 # assumes that bare filectx is used and no merge happened, hence no
267 # need to create a new file revision in this case.
267 # need to create a new file revision in this case.
268 node = fctx.filenode()
268 node = fctx.filenode()
269 if node in [fparent1, fparent2]:
269 if node in [fparent1, fparent2]:
270 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
270 repo.ui.debug(b'reusing %s filelog entry\n' % fname)
271 if (
271 if (
272 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
272 fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
273 ) or (
273 ) or (
274 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
274 fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
275 ):
275 ):
276 touched = 'modified'
276 touched = 'modified'
277 return node, touched
277 return node, touched
278
278
279 flog = repo.file(fname)
279 flog = repo.file(fname)
280 meta = {}
280 meta = {}
281 cfname = fctx.copysource()
281 cfname = fctx.copysource()
282 fnode = None
282 fnode = None
283
283
284 if cfname and cfname != fname:
284 if cfname and cfname != fname:
285 # Mark the new revision of this file as a copy of another
285 # Mark the new revision of this file as a copy of another
286 # file. This copy data will effectively act as a parent
286 # file. This copy data will effectively act as a parent
287 # of this new revision. If this is a merge, the first
287 # of this new revision. If this is a merge, the first
288 # parent will be the nullid (meaning "look up the copy data")
288 # parent will be the nullid (meaning "look up the copy data")
289 # and the second one will be the other parent. For example:
289 # and the second one will be the other parent. For example:
290 #
290 #
291 # 0 --- 1 --- 3 rev1 changes file foo
291 # 0 --- 1 --- 3 rev1 changes file foo
292 # \ / rev2 renames foo to bar and changes it
292 # \ / rev2 renames foo to bar and changes it
293 # \- 2 -/ rev3 should have bar with all changes and
293 # \- 2 -/ rev3 should have bar with all changes and
294 # should record that bar descends from
294 # should record that bar descends from
295 # bar in rev2 and foo in rev1
295 # bar in rev2 and foo in rev1
296 #
296 #
297 # this allows this merge to succeed:
297 # this allows this merge to succeed:
298 #
298 #
299 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
299 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
300 # \ / merging rev3 and rev4 should use bar@rev2
300 # \ / merging rev3 and rev4 should use bar@rev2
301 # \- 2 --- 4 as the merge base
301 # \- 2 --- 4 as the merge base
302 #
302 #
303
303
304 cnode = manifest1.get(cfname)
304 cnode = manifest1.get(cfname)
305 newfparent = fparent2
305 newfparent = fparent2
306
306
307 if manifest2: # branch merge
307 if manifest2: # branch merge
308 if fparent2 == nullid or cnode is None: # copied on remote side
308 if fparent2 == nullid or cnode is None: # copied on remote side
309 if cfname in manifest2:
309 if cfname in manifest2:
310 cnode = manifest2[cfname]
310 cnode = manifest2[cfname]
311 newfparent = fparent1
311 newfparent = fparent1
312
312
313 # Here, we used to search backwards through history to try to find
313 # Here, we used to search backwards through history to try to find
314 # where the file copy came from if the source of a copy was not in
314 # where the file copy came from if the source of a copy was not in
315 # the parent directory. However, this doesn't actually make sense to
315 # the parent directory. However, this doesn't actually make sense to
316 # do (what does a copy from something not in your working copy even
316 # do (what does a copy from something not in your working copy even
317 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
317 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
318 # the user that copy information was dropped, so if they didn't
318 # the user that copy information was dropped, so if they didn't
319 # expect this outcome it can be fixed, but this is the correct
319 # expect this outcome it can be fixed, but this is the correct
320 # behavior in this circumstance.
320 # behavior in this circumstance.
321
321
322 if cnode:
322 if cnode:
323 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
323 repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
324 if includecopymeta:
324 if includecopymeta:
325 meta[b"copy"] = cfname
325 meta[b"copy"] = cfname
326 meta[b"copyrev"] = hex(cnode)
326 meta[b"copyrev"] = hex(cnode)
327 fparent1, fparent2 = nullid, newfparent
327 fparent1, fparent2 = nullid, newfparent
328 else:
328 else:
329 repo.ui.warn(
329 repo.ui.warn(
330 _(
330 _(
331 b"warning: can't find ancestor for '%s' "
331 b"warning: can't find ancestor for '%s' "
332 b"copied from '%s'!\n"
332 b"copied from '%s'!\n"
333 )
333 )
334 % (fname, cfname)
334 % (fname, cfname)
335 )
335 )
336
336
337 elif fparent1 == nullid:
337 elif fparent1 == nullid:
338 fparent1, fparent2 = fparent2, nullid
338 fparent1, fparent2 = fparent2, nullid
339 elif fparent2 != nullid:
339 elif fparent2 != nullid:
340 # is one parent an ancestor of the other?
340 # is one parent an ancestor of the other?
341 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
341 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
342 if fparent1 in fparentancestors:
342 if fparent1 in fparentancestors:
343 fparent1, fparent2 = fparent2, nullid
343 fparent1, fparent2 = fparent2, nullid
344 elif fparent2 in fparentancestors:
344 elif fparent2 in fparentancestors:
345 fparent2 = nullid
345 fparent2 = nullid
346 elif not fparentancestors:
346 elif not fparentancestors:
347 # TODO: this whole if-else might be simplified much more
347 # TODO: this whole if-else might be simplified much more
348 if (
348 if (
349 ms.active()
349 ms.active()
350 and ms.extras(fname).get(b'filenode-source') == b'other'
350 and ms.extras(fname).get(b'filenode-source') == b'other'
351 ):
351 ):
352 fparent1, fparent2 = fparent2, nullid
352 fparent1, fparent2 = fparent2, nullid
353
353
354 force_new_node = False
354 force_new_node = False
355 # The file might have been deleted by merge code and user explicitly choose
355 # The file might have been deleted by merge code and user explicitly choose
356 # to revert the file and keep it. The other case can be where there is
356 # to revert the file and keep it. The other case can be where there is
357 # change-delete or delete-change conflict and user explicitly choose to keep
357 # change-delete or delete-change conflict and user explicitly choose to keep
358 # the file. The goal is to create a new filenode for users explicit choices
358 # the file. The goal is to create a new filenode for users explicit choices
359 if (
359 if (
360 repo.ui.configbool(b'experimental', b'merge-track-salvaged')
360 repo.ui.configbool(b'experimental', b'merge-track-salvaged')
361 and ms.active()
361 and ms.active()
362 and ms.extras(fname).get(b'merge-removal-candidate') == b'yes'
362 and ms.extras(fname).get(b'merge-removal-candidate') == b'yes'
363 ):
363 ):
364 force_new_node = True
364 force_new_node = True
365 # is the file changed?
365 # is the file changed?
366 text = fctx.data()
366 text = fctx.data()
367 if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
367 if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
368 if touched is None: # do not overwrite added
368 if touched is None: # do not overwrite added
369 if fparent2 == nullid:
369 if fparent2 == nullid:
370 touched = 'modified'
370 touched = 'modified'
371 else:
371 else:
372 touched = 'merged'
372 touched = 'merged'
373 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
373 fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2)
374 # are just the flags changed during merge?
374 # are just the flags changed during merge?
375 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
375 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
376 touched = 'modified'
376 touched = 'modified'
377 fnode = fparent1
377 fnode = fparent1
378 else:
378 else:
379 fnode = fparent1
379 fnode = fparent1
380 return fnode, touched
380 return fnode, touched
381
381
382
382
383 def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
383 def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop):
384 """make a new manifest entry (or reuse a new one)
384 """make a new manifest entry (or reuse a new one)
385
385
386 given an initialised manifest context and precomputed list of
386 given an initialised manifest context and precomputed list of
387 - files: files affected by the commit
387 - files: files affected by the commit
388 - added: new entries in the manifest
388 - added: new entries in the manifest
389 - drop: entries present in parents but absent of this one
389 - drop: entries present in parents but absent of this one
390
390
391 Create a new manifest revision, reuse existing ones if possible.
391 Create a new manifest revision, reuse existing ones if possible.
392
392
393 Return the nodeid of the manifest revision.
393 Return the nodeid of the manifest revision.
394 """
394 """
395 repo = ctx.repo()
395 repo = ctx.repo()
396
396
397 md = None
397 md = None
398
398
399 # all this is cached, so it is find to get them all from the ctx.
399 # all this is cached, so it is find to get them all from the ctx.
400 p1 = ctx.p1()
400 p1 = ctx.p1()
401 p2 = ctx.p2()
401 p2 = ctx.p2()
402 m1ctx = p1.manifestctx()
402 m1ctx = p1.manifestctx()
403
403
404 m1 = m1ctx.read()
404 m1 = m1ctx.read()
405
405
406 if not files:
406 if not files:
407 # if no "files" actually changed in terms of the changelog,
407 # if no "files" actually changed in terms of the changelog,
408 # try hard to detect unmodified manifest entry so that the
408 # try hard to detect unmodified manifest entry so that the
409 # exact same commit can be reproduced later on convert.
409 # exact same commit can be reproduced later on convert.
410 md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
410 md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files()))
411 if not files and md:
411 if not files and md:
412 repo.ui.debug(
412 repo.ui.debug(
413 b'not reusing manifest (no file change in '
413 b'not reusing manifest (no file change in '
414 b'changelog, but manifest differs)\n'
414 b'changelog, but manifest differs)\n'
415 )
415 )
416 if files or md:
416 if files or md:
417 repo.ui.note(_(b"committing manifest\n"))
417 repo.ui.note(_(b"committing manifest\n"))
418 # we're using narrowmatch here since it's already applied at
418 # we're using narrowmatch here since it's already applied at
419 # other stages (such as dirstate.walk), so we're already
419 # other stages (such as dirstate.walk), so we're already
420 # ignoring things outside of narrowspec in most cases. The
420 # ignoring things outside of narrowspec in most cases. The
421 # one case where we might have files outside the narrowspec
421 # one case where we might have files outside the narrowspec
422 # at this point is merges, and we already error out in the
422 # at this point is merges, and we already error out in the
423 # case where the merge has files outside of the narrowspec,
423 # case where the merge has files outside of the narrowspec,
424 # so this is safe.
424 # so this is safe.
425 mn = mctx.write(
425 mn = mctx.write(
426 tr,
426 tr,
427 linkrev,
427 linkrev,
428 p1.manifestnode(),
428 p1.manifestnode(),
429 p2.manifestnode(),
429 p2.manifestnode(),
430 added,
430 added,
431 drop,
431 drop,
432 match=repo.narrowmatch(),
432 match=repo.narrowmatch(),
433 )
433 )
434 else:
434 else:
435 repo.ui.debug(
435 repo.ui.debug(
436 b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
436 b'reusing manifest from p1 (listed files ' b'actually unchanged)\n'
437 )
437 )
438 mn = p1.manifestnode()
438 mn = p1.manifestnode()
439
439
440 return mn
440 return mn
441
441
442
442
443 def _extra_with_copies(repo, extra, files):
443 def _extra_with_copies(repo, extra, files):
444 """encode copy information into a `extra` dictionnary"""
444 """encode copy information into a `extra` dictionnary"""
445 p1copies = files.copied_from_p1
445 p1copies = files.copied_from_p1
446 p2copies = files.copied_from_p2
446 p2copies = files.copied_from_p2
447 filesadded = files.added
447 filesadded = files.added
448 filesremoved = files.removed
448 filesremoved = files.removed
449 files = sorted(files.touched)
449 files = sorted(files.touched)
450 if not _write_copy_meta(repo)[1]:
450 if not _write_copy_meta(repo)[1]:
451 # If writing only to changeset extras, use None to indicate that
451 # If writing only to changeset extras, use None to indicate that
452 # no entry should be written. If writing to both, write an empty
452 # no entry should be written. If writing to both, write an empty
453 # entry to prevent the reader from falling back to reading
453 # entry to prevent the reader from falling back to reading
454 # filelogs.
454 # filelogs.
455 p1copies = p1copies or None
455 p1copies = p1copies or None
456 p2copies = p2copies or None
456 p2copies = p2copies or None
457 filesadded = filesadded or None
457 filesadded = filesadded or None
458 filesremoved = filesremoved or None
458 filesremoved = filesremoved or None
459
459
460 extrasentries = p1copies, p2copies, filesadded, filesremoved
460 extrasentries = p1copies, p2copies, filesadded, filesremoved
461 if extra is None and any(x is not None for x in extrasentries):
461 if extra is None and any(x is not None for x in extrasentries):
462 extra = {}
462 extra = {}
463 if p1copies is not None:
463 if p1copies is not None:
464 p1copies = metadata.encodecopies(files, p1copies)
464 p1copies = metadata.encodecopies(files, p1copies)
465 extra[b'p1copies'] = p1copies
465 extra[b'p1copies'] = p1copies
466 if p2copies is not None:
466 if p2copies is not None:
467 p2copies = metadata.encodecopies(files, p2copies)
467 p2copies = metadata.encodecopies(files, p2copies)
468 extra[b'p2copies'] = p2copies
468 extra[b'p2copies'] = p2copies
469 if filesadded is not None:
469 if filesadded is not None:
470 filesadded = metadata.encodefileindices(files, filesadded)
470 filesadded = metadata.encodefileindices(files, filesadded)
471 extra[b'filesadded'] = filesadded
471 extra[b'filesadded'] = filesadded
472 if filesremoved is not None:
472 if filesremoved is not None:
473 filesremoved = metadata.encodefileindices(files, filesremoved)
473 filesremoved = metadata.encodefileindices(files, filesremoved)
474 extra[b'filesremoved'] = filesremoved
474 extra[b'filesremoved'] = filesremoved
475 return extra
475 return extra
@@ -1,784 +1,786 b''
1 # exchangev2.py - repository exchange for wire protocol version 2
1 # exchangev2.py - repository exchange for wire protocol version 2
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import weakref
11 import weakref
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 nullid,
15 nullid,
16 short,
16 short,
17 )
17 )
18 from . import (
18 from . import (
19 bookmarks,
19 bookmarks,
20 error,
20 error,
21 mdiff,
21 mdiff,
22 narrowspec,
22 narrowspec,
23 phases,
23 phases,
24 pycompat,
24 pycompat,
25 setdiscovery,
25 setdiscovery,
26 )
26 )
27 from .interfaces import repository
27 from .interfaces import repository
28
28
29
29
30 def pull(pullop):
30 def pull(pullop):
31 """Pull using wire protocol version 2."""
31 """Pull using wire protocol version 2."""
32 repo = pullop.repo
32 repo = pullop.repo
33 remote = pullop.remote
33 remote = pullop.remote
34
34
35 usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop)
35 usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop)
36
36
37 # If this is a clone and it was requested to perform a "stream clone",
37 # If this is a clone and it was requested to perform a "stream clone",
38 # we obtain the raw files data from the remote then fall back to an
38 # we obtain the raw files data from the remote then fall back to an
39 # incremental pull. This is somewhat hacky and is not nearly robust enough
39 # incremental pull. This is somewhat hacky and is not nearly robust enough
40 # for long-term usage.
40 # for long-term usage.
41 if usingrawchangelogandmanifest:
41 if usingrawchangelogandmanifest:
42 with repo.transaction(b'clone'):
42 with repo.transaction(b'clone'):
43 _fetchrawstorefiles(repo, remote)
43 _fetchrawstorefiles(repo, remote)
44 repo.invalidate(clearfilecache=True)
44 repo.invalidate(clearfilecache=True)
45
45
46 tr = pullop.trmanager.transaction()
46 tr = pullop.trmanager.transaction()
47
47
48 # We don't use the repo's narrow matcher here because the patterns passed
48 # We don't use the repo's narrow matcher here because the patterns passed
49 # to exchange.pull() could be different.
49 # to exchange.pull() could be different.
50 narrowmatcher = narrowspec.match(
50 narrowmatcher = narrowspec.match(
51 repo.root,
51 repo.root,
52 # Empty maps to nevermatcher. So always
52 # Empty maps to nevermatcher. So always
53 # set includes if missing.
53 # set includes if missing.
54 pullop.includepats or {b'path:.'},
54 pullop.includepats or {b'path:.'},
55 pullop.excludepats,
55 pullop.excludepats,
56 )
56 )
57
57
58 if pullop.includepats or pullop.excludepats:
58 if pullop.includepats or pullop.excludepats:
59 pathfilter = {}
59 pathfilter = {}
60 if pullop.includepats:
60 if pullop.includepats:
61 pathfilter[b'include'] = sorted(pullop.includepats)
61 pathfilter[b'include'] = sorted(pullop.includepats)
62 if pullop.excludepats:
62 if pullop.excludepats:
63 pathfilter[b'exclude'] = sorted(pullop.excludepats)
63 pathfilter[b'exclude'] = sorted(pullop.excludepats)
64 else:
64 else:
65 pathfilter = None
65 pathfilter = None
66
66
67 # Figure out what needs to be fetched.
67 # Figure out what needs to be fetched.
68 common, fetch, remoteheads = _pullchangesetdiscovery(
68 common, fetch, remoteheads = _pullchangesetdiscovery(
69 repo, remote, pullop.heads, abortwhenunrelated=pullop.force
69 repo, remote, pullop.heads, abortwhenunrelated=pullop.force
70 )
70 )
71
71
72 # And fetch the data.
72 # And fetch the data.
73 pullheads = pullop.heads or remoteheads
73 pullheads = pullop.heads or remoteheads
74 csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads)
74 csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads)
75
75
76 # New revisions are written to the changelog. But all other updates
76 # New revisions are written to the changelog. But all other updates
77 # are deferred. Do those now.
77 # are deferred. Do those now.
78
78
79 # Ensure all new changesets are draft by default. If the repo is
79 # Ensure all new changesets are draft by default. If the repo is
80 # publishing, the phase will be adjusted by the loop below.
80 # publishing, the phase will be adjusted by the loop below.
81 if csetres[b'added']:
81 if csetres[b'added']:
82 phases.registernew(repo, tr, phases.draft, csetres[b'added'])
82 phases.registernew(
83 repo, tr, phases.draft, [repo[n].rev() for n in csetres[b'added']]
84 )
83
85
84 # And adjust the phase of all changesets accordingly.
86 # And adjust the phase of all changesets accordingly.
85 for phasenumber, phase in phases.phasenames.items():
87 for phasenumber, phase in phases.phasenames.items():
86 if phase == b'secret' or not csetres[b'nodesbyphase'][phase]:
88 if phase == b'secret' or not csetres[b'nodesbyphase'][phase]:
87 continue
89 continue
88
90
89 phases.advanceboundary(
91 phases.advanceboundary(
90 repo, tr, phasenumber, csetres[b'nodesbyphase'][phase],
92 repo, tr, phasenumber, csetres[b'nodesbyphase'][phase],
91 )
93 )
92
94
93 # Write bookmark updates.
95 # Write bookmark updates.
94 bookmarks.updatefromremote(
96 bookmarks.updatefromremote(
95 repo.ui,
97 repo.ui,
96 repo,
98 repo,
97 csetres[b'bookmarks'],
99 csetres[b'bookmarks'],
98 remote.url(),
100 remote.url(),
99 pullop.gettransaction,
101 pullop.gettransaction,
100 explicit=pullop.explicitbookmarks,
102 explicit=pullop.explicitbookmarks,
101 )
103 )
102
104
103 manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes'])
105 manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes'])
104
106
105 # We don't properly support shallow changeset and manifest yet. So we apply
107 # We don't properly support shallow changeset and manifest yet. So we apply
106 # depth limiting locally.
108 # depth limiting locally.
107 if pullop.depth:
109 if pullop.depth:
108 relevantcsetnodes = set()
110 relevantcsetnodes = set()
109 clnode = repo.changelog.node
111 clnode = repo.changelog.node
110
112
111 for rev in repo.revs(
113 for rev in repo.revs(
112 b'ancestors(%ln, %s)', pullheads, pullop.depth - 1
114 b'ancestors(%ln, %s)', pullheads, pullop.depth - 1
113 ):
115 ):
114 relevantcsetnodes.add(clnode(rev))
116 relevantcsetnodes.add(clnode(rev))
115
117
116 csetrelevantfilter = lambda n: n in relevantcsetnodes
118 csetrelevantfilter = lambda n: n in relevantcsetnodes
117
119
118 else:
120 else:
119 csetrelevantfilter = lambda n: True
121 csetrelevantfilter = lambda n: True
120
122
121 # If obtaining the raw store files, we need to scan the full repo to
123 # If obtaining the raw store files, we need to scan the full repo to
122 # derive all the changesets, manifests, and linkrevs.
124 # derive all the changesets, manifests, and linkrevs.
123 if usingrawchangelogandmanifest:
125 if usingrawchangelogandmanifest:
124 csetsforfiles = []
126 csetsforfiles = []
125 mnodesforfiles = []
127 mnodesforfiles = []
126 manifestlinkrevs = {}
128 manifestlinkrevs = {}
127
129
128 for rev in repo:
130 for rev in repo:
129 ctx = repo[rev]
131 ctx = repo[rev]
130 node = ctx.node()
132 node = ctx.node()
131
133
132 if not csetrelevantfilter(node):
134 if not csetrelevantfilter(node):
133 continue
135 continue
134
136
135 mnode = ctx.manifestnode()
137 mnode = ctx.manifestnode()
136
138
137 csetsforfiles.append(node)
139 csetsforfiles.append(node)
138 mnodesforfiles.append(mnode)
140 mnodesforfiles.append(mnode)
139 manifestlinkrevs[mnode] = rev
141 manifestlinkrevs[mnode] = rev
140
142
141 else:
143 else:
142 csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)]
144 csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)]
143 mnodesforfiles = manres[b'added']
145 mnodesforfiles = manres[b'added']
144 manifestlinkrevs = manres[b'linkrevs']
146 manifestlinkrevs = manres[b'linkrevs']
145
147
146 # Find all file nodes referenced by added manifests and fetch those
148 # Find all file nodes referenced by added manifests and fetch those
147 # revisions.
149 # revisions.
148 fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles)
150 fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles)
149 _fetchfilesfromcsets(
151 _fetchfilesfromcsets(
150 repo,
152 repo,
151 tr,
153 tr,
152 remote,
154 remote,
153 pathfilter,
155 pathfilter,
154 fnodes,
156 fnodes,
155 csetsforfiles,
157 csetsforfiles,
156 manifestlinkrevs,
158 manifestlinkrevs,
157 shallow=bool(pullop.depth),
159 shallow=bool(pullop.depth),
158 )
160 )
159
161
160
162
161 def _checkuserawstorefiledata(pullop):
163 def _checkuserawstorefiledata(pullop):
162 """Check whether we should use rawstorefiledata command to retrieve data."""
164 """Check whether we should use rawstorefiledata command to retrieve data."""
163
165
164 repo = pullop.repo
166 repo = pullop.repo
165 remote = pullop.remote
167 remote = pullop.remote
166
168
167 # Command to obtain raw store data isn't available.
169 # Command to obtain raw store data isn't available.
168 if b'rawstorefiledata' not in remote.apidescriptor[b'commands']:
170 if b'rawstorefiledata' not in remote.apidescriptor[b'commands']:
169 return False
171 return False
170
172
171 # Only honor if user requested stream clone operation.
173 # Only honor if user requested stream clone operation.
172 if not pullop.streamclonerequested:
174 if not pullop.streamclonerequested:
173 return False
175 return False
174
176
175 # Only works on empty repos.
177 # Only works on empty repos.
176 if len(repo):
178 if len(repo):
177 return False
179 return False
178
180
179 # TODO This is super hacky. There needs to be a storage API for this. We
181 # TODO This is super hacky. There needs to be a storage API for this. We
180 # also need to check for compatibility with the remote.
182 # also need to check for compatibility with the remote.
181 if b'revlogv1' not in repo.requirements:
183 if b'revlogv1' not in repo.requirements:
182 return False
184 return False
183
185
184 return True
186 return True
185
187
186
188
187 def _fetchrawstorefiles(repo, remote):
189 def _fetchrawstorefiles(repo, remote):
188 with remote.commandexecutor() as e:
190 with remote.commandexecutor() as e:
189 objs = e.callcommand(
191 objs = e.callcommand(
190 b'rawstorefiledata', {b'files': [b'changelog', b'manifestlog'],}
192 b'rawstorefiledata', {b'files': [b'changelog', b'manifestlog'],}
191 ).result()
193 ).result()
192
194
193 # First object is a summary of files data that follows.
195 # First object is a summary of files data that follows.
194 overall = next(objs)
196 overall = next(objs)
195
197
196 progress = repo.ui.makeprogress(
198 progress = repo.ui.makeprogress(
197 _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes')
199 _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes')
198 )
200 )
199 with progress:
201 with progress:
200 progress.update(0)
202 progress.update(0)
201
203
202 # Next are pairs of file metadata, data.
204 # Next are pairs of file metadata, data.
203 while True:
205 while True:
204 try:
206 try:
205 filemeta = next(objs)
207 filemeta = next(objs)
206 except StopIteration:
208 except StopIteration:
207 break
209 break
208
210
209 for k in (b'location', b'path', b'size'):
211 for k in (b'location', b'path', b'size'):
210 if k not in filemeta:
212 if k not in filemeta:
211 raise error.Abort(
213 raise error.Abort(
212 _(b'remote file data missing key: %s') % k
214 _(b'remote file data missing key: %s') % k
213 )
215 )
214
216
215 if filemeta[b'location'] == b'store':
217 if filemeta[b'location'] == b'store':
216 vfs = repo.svfs
218 vfs = repo.svfs
217 else:
219 else:
218 raise error.Abort(
220 raise error.Abort(
219 _(b'invalid location for raw file data: %s')
221 _(b'invalid location for raw file data: %s')
220 % filemeta[b'location']
222 % filemeta[b'location']
221 )
223 )
222
224
223 bytesremaining = filemeta[b'size']
225 bytesremaining = filemeta[b'size']
224
226
225 with vfs.open(filemeta[b'path'], b'wb') as fh:
227 with vfs.open(filemeta[b'path'], b'wb') as fh:
226 while True:
228 while True:
227 try:
229 try:
228 chunk = next(objs)
230 chunk = next(objs)
229 except StopIteration:
231 except StopIteration:
230 break
232 break
231
233
232 bytesremaining -= len(chunk)
234 bytesremaining -= len(chunk)
233
235
234 if bytesremaining < 0:
236 if bytesremaining < 0:
235 raise error.Abort(
237 raise error.Abort(
236 _(
238 _(
237 b'received invalid number of bytes for file '
239 b'received invalid number of bytes for file '
238 b'data; expected %d, got extra'
240 b'data; expected %d, got extra'
239 )
241 )
240 % filemeta[b'size']
242 % filemeta[b'size']
241 )
243 )
242
244
243 progress.increment(step=len(chunk))
245 progress.increment(step=len(chunk))
244 fh.write(chunk)
246 fh.write(chunk)
245
247
246 try:
248 try:
247 if chunk.islast:
249 if chunk.islast:
248 break
250 break
249 except AttributeError:
251 except AttributeError:
250 raise error.Abort(
252 raise error.Abort(
251 _(
253 _(
252 b'did not receive indefinite length bytestring '
254 b'did not receive indefinite length bytestring '
253 b'for file data'
255 b'for file data'
254 )
256 )
255 )
257 )
256
258
257 if bytesremaining:
259 if bytesremaining:
258 raise error.Abort(
260 raise error.Abort(
259 _(
261 _(
260 b'received invalid number of bytes for'
262 b'received invalid number of bytes for'
261 b'file data; expected %d got %d'
263 b'file data; expected %d got %d'
262 )
264 )
263 % (
265 % (
264 filemeta[b'size'],
266 filemeta[b'size'],
265 filemeta[b'size'] - bytesremaining,
267 filemeta[b'size'] - bytesremaining,
266 )
268 )
267 )
269 )
268
270
269
271
270 def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):
272 def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True):
271 """Determine which changesets need to be pulled."""
273 """Determine which changesets need to be pulled."""
272
274
273 if heads:
275 if heads:
274 knownnode = repo.changelog.hasnode
276 knownnode = repo.changelog.hasnode
275 if all(knownnode(head) for head in heads):
277 if all(knownnode(head) for head in heads):
276 return heads, False, heads
278 return heads, False, heads
277
279
278 # TODO wire protocol version 2 is capable of more efficient discovery
280 # TODO wire protocol version 2 is capable of more efficient discovery
279 # than setdiscovery. Consider implementing something better.
281 # than setdiscovery. Consider implementing something better.
280 common, fetch, remoteheads = setdiscovery.findcommonheads(
282 common, fetch, remoteheads = setdiscovery.findcommonheads(
281 repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated
283 repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated
282 )
284 )
283
285
284 common = set(common)
286 common = set(common)
285 remoteheads = set(remoteheads)
287 remoteheads = set(remoteheads)
286
288
287 # If a remote head is filtered locally, put it back in the common set.
289 # If a remote head is filtered locally, put it back in the common set.
288 # See the comment in exchange._pulldiscoverychangegroup() for more.
290 # See the comment in exchange._pulldiscoverychangegroup() for more.
289
291
290 if fetch and remoteheads:
292 if fetch and remoteheads:
291 has_node = repo.unfiltered().changelog.index.has_node
293 has_node = repo.unfiltered().changelog.index.has_node
292
294
293 common |= {head for head in remoteheads if has_node(head)}
295 common |= {head for head in remoteheads if has_node(head)}
294
296
295 if set(remoteheads).issubset(common):
297 if set(remoteheads).issubset(common):
296 fetch = []
298 fetch = []
297
299
298 common.discard(nullid)
300 common.discard(nullid)
299
301
300 return common, fetch, remoteheads
302 return common, fetch, remoteheads
301
303
302
304
303 def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads):
305 def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads):
304 # TODO consider adding a step here where we obtain the DAG shape first
306 # TODO consider adding a step here where we obtain the DAG shape first
305 # (or ask the server to slice changesets into chunks for us) so that
307 # (or ask the server to slice changesets into chunks for us) so that
306 # we can perform multiple fetches in batches. This will facilitate
308 # we can perform multiple fetches in batches. This will facilitate
307 # resuming interrupted clones, higher server-side cache hit rates due
309 # resuming interrupted clones, higher server-side cache hit rates due
308 # to smaller segments, etc.
310 # to smaller segments, etc.
309 with remote.commandexecutor() as e:
311 with remote.commandexecutor() as e:
310 objs = e.callcommand(
312 objs = e.callcommand(
311 b'changesetdata',
313 b'changesetdata',
312 {
314 {
313 b'revisions': [
315 b'revisions': [
314 {
316 {
315 b'type': b'changesetdagrange',
317 b'type': b'changesetdagrange',
316 b'roots': sorted(common),
318 b'roots': sorted(common),
317 b'heads': sorted(remoteheads),
319 b'heads': sorted(remoteheads),
318 }
320 }
319 ],
321 ],
320 b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
322 b'fields': {b'bookmarks', b'parents', b'phase', b'revision'},
321 },
323 },
322 ).result()
324 ).result()
323
325
324 # The context manager waits on all response data when exiting. So
326 # The context manager waits on all response data when exiting. So
325 # we need to remain in the context manager in order to stream data.
327 # we need to remain in the context manager in order to stream data.
326 return _processchangesetdata(repo, tr, objs)
328 return _processchangesetdata(repo, tr, objs)
327
329
328
330
329 def _processchangesetdata(repo, tr, objs):
331 def _processchangesetdata(repo, tr, objs):
330 repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs))
332 repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs))
331
333
332 urepo = repo.unfiltered()
334 urepo = repo.unfiltered()
333 cl = urepo.changelog
335 cl = urepo.changelog
334
336
335 cl.delayupdate(tr)
337 cl.delayupdate(tr)
336
338
337 # The first emitted object is a header describing the data that
339 # The first emitted object is a header describing the data that
338 # follows.
340 # follows.
339 meta = next(objs)
341 meta = next(objs)
340
342
341 progress = repo.ui.makeprogress(
343 progress = repo.ui.makeprogress(
342 _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems')
344 _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems')
343 )
345 )
344
346
345 manifestnodes = {}
347 manifestnodes = {}
346 added = []
348 added = []
347
349
348 def linkrev(node):
350 def linkrev(node):
349 repo.ui.debug(b'add changeset %s\n' % short(node))
351 repo.ui.debug(b'add changeset %s\n' % short(node))
350 # Linkrev for changelog is always self.
352 # Linkrev for changelog is always self.
351 return len(cl)
353 return len(cl)
352
354
353 def ondupchangeset(cl, node):
355 def ondupchangeset(cl, node):
354 added.append(node)
356 added.append(node)
355
357
356 def onchangeset(cl, node):
358 def onchangeset(cl, node):
357 progress.increment()
359 progress.increment()
358
360
359 revision = cl.changelogrevision(node)
361 revision = cl.changelogrevision(node)
360 added.append(node)
362 added.append(node)
361
363
362 # We need to preserve the mapping of changelog revision to node
364 # We need to preserve the mapping of changelog revision to node
363 # so we can set the linkrev accordingly when manifests are added.
365 # so we can set the linkrev accordingly when manifests are added.
364 manifestnodes[cl.rev(node)] = revision.manifest
366 manifestnodes[cl.rev(node)] = revision.manifest
365
367
366 nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
368 nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
367 remotebookmarks = {}
369 remotebookmarks = {}
368
370
369 # addgroup() expects a 7-tuple describing revisions. This normalizes
371 # addgroup() expects a 7-tuple describing revisions. This normalizes
370 # the wire data to that format.
372 # the wire data to that format.
371 #
373 #
372 # This loop also aggregates non-revision metadata, such as phase
374 # This loop also aggregates non-revision metadata, such as phase
373 # data.
375 # data.
374 def iterrevisions():
376 def iterrevisions():
375 for cset in objs:
377 for cset in objs:
376 node = cset[b'node']
378 node = cset[b'node']
377
379
378 if b'phase' in cset:
380 if b'phase' in cset:
379 nodesbyphase[cset[b'phase']].add(node)
381 nodesbyphase[cset[b'phase']].add(node)
380
382
381 for mark in cset.get(b'bookmarks', []):
383 for mark in cset.get(b'bookmarks', []):
382 remotebookmarks[mark] = node
384 remotebookmarks[mark] = node
383
385
384 # TODO add mechanism for extensions to examine records so they
386 # TODO add mechanism for extensions to examine records so they
385 # can siphon off custom data fields.
387 # can siphon off custom data fields.
386
388
387 extrafields = {}
389 extrafields = {}
388
390
389 for field, size in cset.get(b'fieldsfollowing', []):
391 for field, size in cset.get(b'fieldsfollowing', []):
390 extrafields[field] = next(objs)
392 extrafields[field] = next(objs)
391
393
392 # Some entries might only be metadata only updates.
394 # Some entries might only be metadata only updates.
393 if b'revision' not in extrafields:
395 if b'revision' not in extrafields:
394 continue
396 continue
395
397
396 data = extrafields[b'revision']
398 data = extrafields[b'revision']
397
399
398 yield (
400 yield (
399 node,
401 node,
400 cset[b'parents'][0],
402 cset[b'parents'][0],
401 cset[b'parents'][1],
403 cset[b'parents'][1],
402 # Linknode is always itself for changesets.
404 # Linknode is always itself for changesets.
403 cset[b'node'],
405 cset[b'node'],
404 # We always send full revisions. So delta base is not set.
406 # We always send full revisions. So delta base is not set.
405 nullid,
407 nullid,
406 mdiff.trivialdiffheader(len(data)) + data,
408 mdiff.trivialdiffheader(len(data)) + data,
407 # Flags not yet supported.
409 # Flags not yet supported.
408 0,
410 0,
409 )
411 )
410
412
411 cl.addgroup(
413 cl.addgroup(
412 iterrevisions(),
414 iterrevisions(),
413 linkrev,
415 linkrev,
414 weakref.proxy(tr),
416 weakref.proxy(tr),
415 addrevisioncb=onchangeset,
417 addrevisioncb=onchangeset,
416 duplicaterevisioncb=ondupchangeset,
418 duplicaterevisioncb=ondupchangeset,
417 )
419 )
418
420
419 progress.complete()
421 progress.complete()
420
422
421 return {
423 return {
422 b'added': added,
424 b'added': added,
423 b'nodesbyphase': nodesbyphase,
425 b'nodesbyphase': nodesbyphase,
424 b'bookmarks': remotebookmarks,
426 b'bookmarks': remotebookmarks,
425 b'manifestnodes': manifestnodes,
427 b'manifestnodes': manifestnodes,
426 }
428 }
427
429
428
430
429 def _fetchmanifests(repo, tr, remote, manifestnodes):
431 def _fetchmanifests(repo, tr, remote, manifestnodes):
430 rootmanifest = repo.manifestlog.getstorage(b'')
432 rootmanifest = repo.manifestlog.getstorage(b'')
431
433
432 # Some manifests can be shared between changesets. Filter out revisions
434 # Some manifests can be shared between changesets. Filter out revisions
433 # we already know about.
435 # we already know about.
434 fetchnodes = []
436 fetchnodes = []
435 linkrevs = {}
437 linkrevs = {}
436 seen = set()
438 seen = set()
437
439
438 for clrev, node in sorted(pycompat.iteritems(manifestnodes)):
440 for clrev, node in sorted(pycompat.iteritems(manifestnodes)):
439 if node in seen:
441 if node in seen:
440 continue
442 continue
441
443
442 try:
444 try:
443 rootmanifest.rev(node)
445 rootmanifest.rev(node)
444 except error.LookupError:
446 except error.LookupError:
445 fetchnodes.append(node)
447 fetchnodes.append(node)
446 linkrevs[node] = clrev
448 linkrevs[node] = clrev
447
449
448 seen.add(node)
450 seen.add(node)
449
451
450 # TODO handle tree manifests
452 # TODO handle tree manifests
451
453
452 # addgroup() expects 7-tuple describing revisions. This normalizes
454 # addgroup() expects 7-tuple describing revisions. This normalizes
453 # the wire data to that format.
455 # the wire data to that format.
454 def iterrevisions(objs, progress):
456 def iterrevisions(objs, progress):
455 for manifest in objs:
457 for manifest in objs:
456 node = manifest[b'node']
458 node = manifest[b'node']
457
459
458 extrafields = {}
460 extrafields = {}
459
461
460 for field, size in manifest.get(b'fieldsfollowing', []):
462 for field, size in manifest.get(b'fieldsfollowing', []):
461 extrafields[field] = next(objs)
463 extrafields[field] = next(objs)
462
464
463 if b'delta' in extrafields:
465 if b'delta' in extrafields:
464 basenode = manifest[b'deltabasenode']
466 basenode = manifest[b'deltabasenode']
465 delta = extrafields[b'delta']
467 delta = extrafields[b'delta']
466 elif b'revision' in extrafields:
468 elif b'revision' in extrafields:
467 basenode = nullid
469 basenode = nullid
468 revision = extrafields[b'revision']
470 revision = extrafields[b'revision']
469 delta = mdiff.trivialdiffheader(len(revision)) + revision
471 delta = mdiff.trivialdiffheader(len(revision)) + revision
470 else:
472 else:
471 continue
473 continue
472
474
473 yield (
475 yield (
474 node,
476 node,
475 manifest[b'parents'][0],
477 manifest[b'parents'][0],
476 manifest[b'parents'][1],
478 manifest[b'parents'][1],
477 # The value passed in is passed to the lookup function passed
479 # The value passed in is passed to the lookup function passed
478 # to addgroup(). We already have a map of manifest node to
480 # to addgroup(). We already have a map of manifest node to
479 # changelog revision number. So we just pass in the
481 # changelog revision number. So we just pass in the
480 # manifest node here and use linkrevs.__getitem__ as the
482 # manifest node here and use linkrevs.__getitem__ as the
481 # resolution function.
483 # resolution function.
482 node,
484 node,
483 basenode,
485 basenode,
484 delta,
486 delta,
485 # Flags not yet supported.
487 # Flags not yet supported.
486 0,
488 0,
487 )
489 )
488
490
489 progress.increment()
491 progress.increment()
490
492
491 progress = repo.ui.makeprogress(
493 progress = repo.ui.makeprogress(
492 _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes)
494 _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes)
493 )
495 )
494
496
495 commandmeta = remote.apidescriptor[b'commands'][b'manifestdata']
497 commandmeta = remote.apidescriptor[b'commands'][b'manifestdata']
496 batchsize = commandmeta.get(b'recommendedbatchsize', 10000)
498 batchsize = commandmeta.get(b'recommendedbatchsize', 10000)
497 # TODO make size configurable on client?
499 # TODO make size configurable on client?
498
500
499 # We send commands 1 at a time to the remote. This is not the most
501 # We send commands 1 at a time to the remote. This is not the most
500 # efficient because we incur a round trip at the end of each batch.
502 # efficient because we incur a round trip at the end of each batch.
501 # However, the existing frame-based reactor keeps consuming server
503 # However, the existing frame-based reactor keeps consuming server
502 # data in the background. And this results in response data buffering
504 # data in the background. And this results in response data buffering
503 # in memory. This can consume gigabytes of memory.
505 # in memory. This can consume gigabytes of memory.
504 # TODO send multiple commands in a request once background buffering
506 # TODO send multiple commands in a request once background buffering
505 # issues are resolved.
507 # issues are resolved.
506
508
507 added = []
509 added = []
508
510
509 for i in pycompat.xrange(0, len(fetchnodes), batchsize):
511 for i in pycompat.xrange(0, len(fetchnodes), batchsize):
510 batch = [node for node in fetchnodes[i : i + batchsize]]
512 batch = [node for node in fetchnodes[i : i + batchsize]]
511 if not batch:
513 if not batch:
512 continue
514 continue
513
515
514 with remote.commandexecutor() as e:
516 with remote.commandexecutor() as e:
515 objs = e.callcommand(
517 objs = e.callcommand(
516 b'manifestdata',
518 b'manifestdata',
517 {
519 {
518 b'tree': b'',
520 b'tree': b'',
519 b'nodes': batch,
521 b'nodes': batch,
520 b'fields': {b'parents', b'revision'},
522 b'fields': {b'parents', b'revision'},
521 b'haveparents': True,
523 b'haveparents': True,
522 },
524 },
523 ).result()
525 ).result()
524
526
525 # Chomp off header object.
527 # Chomp off header object.
526 next(objs)
528 next(objs)
527
529
528 def onchangeset(cl, node):
530 def onchangeset(cl, node):
529 added.append(node)
531 added.append(node)
530
532
531 rootmanifest.addgroup(
533 rootmanifest.addgroup(
532 iterrevisions(objs, progress),
534 iterrevisions(objs, progress),
533 linkrevs.__getitem__,
535 linkrevs.__getitem__,
534 weakref.proxy(tr),
536 weakref.proxy(tr),
535 addrevisioncb=onchangeset,
537 addrevisioncb=onchangeset,
536 duplicaterevisioncb=onchangeset,
538 duplicaterevisioncb=onchangeset,
537 )
539 )
538
540
539 progress.complete()
541 progress.complete()
540
542
541 return {
543 return {
542 b'added': added,
544 b'added': added,
543 b'linkrevs': linkrevs,
545 b'linkrevs': linkrevs,
544 }
546 }
545
547
546
548
547 def _derivefilesfrommanifests(repo, matcher, manifestnodes):
549 def _derivefilesfrommanifests(repo, matcher, manifestnodes):
548 """Determine what file nodes are relevant given a set of manifest nodes.
550 """Determine what file nodes are relevant given a set of manifest nodes.
549
551
550 Returns a dict mapping file paths to dicts of file node to first manifest
552 Returns a dict mapping file paths to dicts of file node to first manifest
551 node.
553 node.
552 """
554 """
553 ml = repo.manifestlog
555 ml = repo.manifestlog
554 fnodes = collections.defaultdict(dict)
556 fnodes = collections.defaultdict(dict)
555
557
556 progress = repo.ui.makeprogress(
558 progress = repo.ui.makeprogress(
557 _(b'scanning manifests'), total=len(manifestnodes)
559 _(b'scanning manifests'), total=len(manifestnodes)
558 )
560 )
559
561
560 with progress:
562 with progress:
561 for manifestnode in manifestnodes:
563 for manifestnode in manifestnodes:
562 m = ml.get(b'', manifestnode)
564 m = ml.get(b'', manifestnode)
563
565
564 # TODO this will pull in unwanted nodes because it takes the storage
566 # TODO this will pull in unwanted nodes because it takes the storage
565 # delta into consideration. What we really want is something that
567 # delta into consideration. What we really want is something that
566 # takes the delta between the manifest's parents. And ideally we
568 # takes the delta between the manifest's parents. And ideally we
567 # would ignore file nodes that are known locally. For now, ignore
569 # would ignore file nodes that are known locally. For now, ignore
568 # both these limitations. This will result in incremental fetches
570 # both these limitations. This will result in incremental fetches
569 # requesting data we already have. So this is far from ideal.
571 # requesting data we already have. So this is far from ideal.
570 md = m.readfast()
572 md = m.readfast()
571
573
572 for path, fnode in md.items():
574 for path, fnode in md.items():
573 if matcher(path):
575 if matcher(path):
574 fnodes[path].setdefault(fnode, manifestnode)
576 fnodes[path].setdefault(fnode, manifestnode)
575
577
576 progress.increment()
578 progress.increment()
577
579
578 return fnodes
580 return fnodes
579
581
580
582
581 def _fetchfiles(repo, tr, remote, fnodes, linkrevs):
583 def _fetchfiles(repo, tr, remote, fnodes, linkrevs):
582 """Fetch file data from explicit file revisions."""
584 """Fetch file data from explicit file revisions."""
583
585
584 def iterrevisions(objs, progress):
586 def iterrevisions(objs, progress):
585 for filerevision in objs:
587 for filerevision in objs:
586 node = filerevision[b'node']
588 node = filerevision[b'node']
587
589
588 extrafields = {}
590 extrafields = {}
589
591
590 for field, size in filerevision.get(b'fieldsfollowing', []):
592 for field, size in filerevision.get(b'fieldsfollowing', []):
591 extrafields[field] = next(objs)
593 extrafields[field] = next(objs)
592
594
593 if b'delta' in extrafields:
595 if b'delta' in extrafields:
594 basenode = filerevision[b'deltabasenode']
596 basenode = filerevision[b'deltabasenode']
595 delta = extrafields[b'delta']
597 delta = extrafields[b'delta']
596 elif b'revision' in extrafields:
598 elif b'revision' in extrafields:
597 basenode = nullid
599 basenode = nullid
598 revision = extrafields[b'revision']
600 revision = extrafields[b'revision']
599 delta = mdiff.trivialdiffheader(len(revision)) + revision
601 delta = mdiff.trivialdiffheader(len(revision)) + revision
600 else:
602 else:
601 continue
603 continue
602
604
603 yield (
605 yield (
604 node,
606 node,
605 filerevision[b'parents'][0],
607 filerevision[b'parents'][0],
606 filerevision[b'parents'][1],
608 filerevision[b'parents'][1],
607 node,
609 node,
608 basenode,
610 basenode,
609 delta,
611 delta,
610 # Flags not yet supported.
612 # Flags not yet supported.
611 0,
613 0,
612 )
614 )
613
615
614 progress.increment()
616 progress.increment()
615
617
616 progress = repo.ui.makeprogress(
618 progress = repo.ui.makeprogress(
617 _(b'files'),
619 _(b'files'),
618 unit=_(b'chunks'),
620 unit=_(b'chunks'),
619 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
621 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
620 )
622 )
621
623
622 # TODO make batch size configurable
624 # TODO make batch size configurable
623 batchsize = 10000
625 batchsize = 10000
624 fnodeslist = [x for x in sorted(fnodes.items())]
626 fnodeslist = [x for x in sorted(fnodes.items())]
625
627
626 for i in pycompat.xrange(0, len(fnodeslist), batchsize):
628 for i in pycompat.xrange(0, len(fnodeslist), batchsize):
627 batch = [x for x in fnodeslist[i : i + batchsize]]
629 batch = [x for x in fnodeslist[i : i + batchsize]]
628 if not batch:
630 if not batch:
629 continue
631 continue
630
632
631 with remote.commandexecutor() as e:
633 with remote.commandexecutor() as e:
632 fs = []
634 fs = []
633 locallinkrevs = {}
635 locallinkrevs = {}
634
636
635 for path, nodes in batch:
637 for path, nodes in batch:
636 fs.append(
638 fs.append(
637 (
639 (
638 path,
640 path,
639 e.callcommand(
641 e.callcommand(
640 b'filedata',
642 b'filedata',
641 {
643 {
642 b'path': path,
644 b'path': path,
643 b'nodes': sorted(nodes),
645 b'nodes': sorted(nodes),
644 b'fields': {b'parents', b'revision'},
646 b'fields': {b'parents', b'revision'},
645 b'haveparents': True,
647 b'haveparents': True,
646 },
648 },
647 ),
649 ),
648 )
650 )
649 )
651 )
650
652
651 locallinkrevs[path] = {
653 locallinkrevs[path] = {
652 node: linkrevs[manifestnode]
654 node: linkrevs[manifestnode]
653 for node, manifestnode in pycompat.iteritems(nodes)
655 for node, manifestnode in pycompat.iteritems(nodes)
654 }
656 }
655
657
656 for path, f in fs:
658 for path, f in fs:
657 objs = f.result()
659 objs = f.result()
658
660
659 # Chomp off header objects.
661 # Chomp off header objects.
660 next(objs)
662 next(objs)
661
663
662 store = repo.file(path)
664 store = repo.file(path)
663 store.addgroup(
665 store.addgroup(
664 iterrevisions(objs, progress),
666 iterrevisions(objs, progress),
665 locallinkrevs[path].__getitem__,
667 locallinkrevs[path].__getitem__,
666 weakref.proxy(tr),
668 weakref.proxy(tr),
667 )
669 )
668
670
669
671
670 def _fetchfilesfromcsets(
672 def _fetchfilesfromcsets(
671 repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False
673 repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False
672 ):
674 ):
673 """Fetch file data from explicit changeset revisions."""
675 """Fetch file data from explicit changeset revisions."""
674
676
675 def iterrevisions(objs, remaining, progress):
677 def iterrevisions(objs, remaining, progress):
676 while remaining:
678 while remaining:
677 filerevision = next(objs)
679 filerevision = next(objs)
678
680
679 node = filerevision[b'node']
681 node = filerevision[b'node']
680
682
681 extrafields = {}
683 extrafields = {}
682
684
683 for field, size in filerevision.get(b'fieldsfollowing', []):
685 for field, size in filerevision.get(b'fieldsfollowing', []):
684 extrafields[field] = next(objs)
686 extrafields[field] = next(objs)
685
687
686 if b'delta' in extrafields:
688 if b'delta' in extrafields:
687 basenode = filerevision[b'deltabasenode']
689 basenode = filerevision[b'deltabasenode']
688 delta = extrafields[b'delta']
690 delta = extrafields[b'delta']
689 elif b'revision' in extrafields:
691 elif b'revision' in extrafields:
690 basenode = nullid
692 basenode = nullid
691 revision = extrafields[b'revision']
693 revision = extrafields[b'revision']
692 delta = mdiff.trivialdiffheader(len(revision)) + revision
694 delta = mdiff.trivialdiffheader(len(revision)) + revision
693 else:
695 else:
694 continue
696 continue
695
697
696 if b'linknode' in filerevision:
698 if b'linknode' in filerevision:
697 linknode = filerevision[b'linknode']
699 linknode = filerevision[b'linknode']
698 else:
700 else:
699 linknode = node
701 linknode = node
700
702
701 yield (
703 yield (
702 node,
704 node,
703 filerevision[b'parents'][0],
705 filerevision[b'parents'][0],
704 filerevision[b'parents'][1],
706 filerevision[b'parents'][1],
705 linknode,
707 linknode,
706 basenode,
708 basenode,
707 delta,
709 delta,
708 # Flags not yet supported.
710 # Flags not yet supported.
709 0,
711 0,
710 )
712 )
711
713
712 progress.increment()
714 progress.increment()
713 remaining -= 1
715 remaining -= 1
714
716
715 progress = repo.ui.makeprogress(
717 progress = repo.ui.makeprogress(
716 _(b'files'),
718 _(b'files'),
717 unit=_(b'chunks'),
719 unit=_(b'chunks'),
718 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
720 total=sum(len(v) for v in pycompat.itervalues(fnodes)),
719 )
721 )
720
722
721 commandmeta = remote.apidescriptor[b'commands'][b'filesdata']
723 commandmeta = remote.apidescriptor[b'commands'][b'filesdata']
722 batchsize = commandmeta.get(b'recommendedbatchsize', 50000)
724 batchsize = commandmeta.get(b'recommendedbatchsize', 50000)
723
725
724 shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features
726 shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features
725 fields = {b'parents', b'revision'}
727 fields = {b'parents', b'revision'}
726 clrev = repo.changelog.rev
728 clrev = repo.changelog.rev
727
729
728 # There are no guarantees that we'll have ancestor revisions if
730 # There are no guarantees that we'll have ancestor revisions if
729 # a) this repo has shallow file storage b) shallow data fetching is enabled.
731 # a) this repo has shallow file storage b) shallow data fetching is enabled.
730 # Force remote to not delta against possibly unknown revisions when these
732 # Force remote to not delta against possibly unknown revisions when these
731 # conditions hold.
733 # conditions hold.
732 haveparents = not (shallowfiles or shallow)
734 haveparents = not (shallowfiles or shallow)
733
735
734 # Similarly, we may not have calculated linkrevs for all incoming file
736 # Similarly, we may not have calculated linkrevs for all incoming file
735 # revisions. Ask the remote to do work for us in this case.
737 # revisions. Ask the remote to do work for us in this case.
736 if not haveparents:
738 if not haveparents:
737 fields.add(b'linknode')
739 fields.add(b'linknode')
738
740
739 for i in pycompat.xrange(0, len(csets), batchsize):
741 for i in pycompat.xrange(0, len(csets), batchsize):
740 batch = [x for x in csets[i : i + batchsize]]
742 batch = [x for x in csets[i : i + batchsize]]
741 if not batch:
743 if not batch:
742 continue
744 continue
743
745
744 with remote.commandexecutor() as e:
746 with remote.commandexecutor() as e:
745 args = {
747 args = {
746 b'revisions': [
748 b'revisions': [
747 {b'type': b'changesetexplicit', b'nodes': batch,}
749 {b'type': b'changesetexplicit', b'nodes': batch,}
748 ],
750 ],
749 b'fields': fields,
751 b'fields': fields,
750 b'haveparents': haveparents,
752 b'haveparents': haveparents,
751 }
753 }
752
754
753 if pathfilter:
755 if pathfilter:
754 args[b'pathfilter'] = pathfilter
756 args[b'pathfilter'] = pathfilter
755
757
756 objs = e.callcommand(b'filesdata', args).result()
758 objs = e.callcommand(b'filesdata', args).result()
757
759
758 # First object is an overall header.
760 # First object is an overall header.
759 overall = next(objs)
761 overall = next(objs)
760
762
761 # We have overall['totalpaths'] segments.
763 # We have overall['totalpaths'] segments.
762 for i in pycompat.xrange(overall[b'totalpaths']):
764 for i in pycompat.xrange(overall[b'totalpaths']):
763 header = next(objs)
765 header = next(objs)
764
766
765 path = header[b'path']
767 path = header[b'path']
766 store = repo.file(path)
768 store = repo.file(path)
767
769
768 linkrevs = {
770 linkrevs = {
769 fnode: manlinkrevs[mnode]
771 fnode: manlinkrevs[mnode]
770 for fnode, mnode in pycompat.iteritems(fnodes[path])
772 for fnode, mnode in pycompat.iteritems(fnodes[path])
771 }
773 }
772
774
773 def getlinkrev(node):
775 def getlinkrev(node):
774 if node in linkrevs:
776 if node in linkrevs:
775 return linkrevs[node]
777 return linkrevs[node]
776 else:
778 else:
777 return clrev(node)
779 return clrev(node)
778
780
779 store.addgroup(
781 store.addgroup(
780 iterrevisions(objs, header[b'totalitems'], progress),
782 iterrevisions(objs, header[b'totalitems'], progress),
781 getlinkrev,
783 getlinkrev,
782 weakref.proxy(tr),
784 weakref.proxy(tr),
783 maybemissingparents=shallow,
785 maybemissingparents=shallow,
784 )
786 )
@@ -1,943 +1,936 b''
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License version 2 or any later version.
10 of the GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phase' is an indicator that tells us how a changeset is
20 A 'changeset phase' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described
21 manipulated and communicated. The details of each phase is described
22 below, here we describe the properties they have in common.
22 below, here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not
24 Like bookmarks, phases are not stored in history and thus are not
25 permanent and leave no audit trail.
25 permanent and leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered,
27 First, no changeset can be in two phases at once. Phases are ordered,
28 so they can be considered from lowest to highest. The default, lowest
28 so they can be considered from lowest to highest. The default, lowest
29 phase is 'public' - this is the normal phase of existing changesets. A
29 phase is 'public' - this is the normal phase of existing changesets. A
30 child changeset can not be in a lower phase than its parents.
30 child changeset can not be in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 Local commits are draft by default.
39 Local commits are draft by default.
40
40
41 Phase Movement and Exchange
41 Phase Movement and Exchange
42 ===========================
42 ===========================
43
43
44 Phase data is exchanged by pushkey on pull and push. Some servers have
44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 a publish option set, we call such a server a "publishing server".
45 a publish option set, we call such a server a "publishing server".
46 Pushing a draft changeset to a publishing server changes the phase to
46 Pushing a draft changeset to a publishing server changes the phase to
47 public.
47 public.
48
48
49 A small list of fact/rules define the exchange of phase:
49 A small list of fact/rules define the exchange of phase:
50
50
51 * old client never changes server states
51 * old client never changes server states
52 * pull never changes server states
52 * pull never changes server states
53 * publish and old server changesets are seen as public by client
53 * publish and old server changesets are seen as public by client
54 * any secret changeset seen in another repository is lowered to at
54 * any secret changeset seen in another repository is lowered to at
55 least draft
55 least draft
56
56
57 Here is the final table summing up the 49 possible use cases of phase
57 Here is the final table summing up the 49 possible use cases of phase
58 exchange:
58 exchange:
59
59
60 server
60 server
61 old publish non-publish
61 old publish non-publish
62 N X N D P N D P
62 N X N D P N D P
63 old client
63 old client
64 pull
64 pull
65 N - X/X - X/D X/P - X/D X/P
65 N - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
66 X - X/X - X/D X/P - X/D X/P
67 push
67 push
68 X X/X X/X X/P X/P X/P X/D X/D X/P
68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 new client
69 new client
70 pull
70 pull
71 N - P/X - P/D P/P - D/D P/P
71 N - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
72 D - P/X - P/D P/P - D/D P/P
73 P - P/X - P/D P/P - P/D P/P
73 P - P/X - P/D P/P - P/D P/P
74 push
74 push
75 D P/X P/X P/P P/P P/P D/D D/D P/P
75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
76 P P/X P/X P/P P/P P/P P/P P/P P/P
77
77
78 Legend:
78 Legend:
79
79
80 A/B = final state on client / state on server
80 A/B = final state on client / state on server
81
81
82 * N = new/not present,
82 * N = new/not present,
83 * P = public,
83 * P = public,
84 * D = draft,
84 * D = draft,
85 * X = not tracked (i.e., the old client or server has no internal
85 * X = not tracked (i.e., the old client or server has no internal
86 way of recording the phase.)
86 way of recording the phase.)
87
87
88 passive = only pushes
88 passive = only pushes
89
89
90
90
91 A cell here can be read like this:
91 A cell here can be read like this:
92
92
93 "When a new client pushes a draft changeset (D) to a publishing
93 "When a new client pushes a draft changeset (D) to a publishing
94 server where it's not present (N), it's marked public on both
94 server where it's not present (N), it's marked public on both
95 sides (P/P)."
95 sides (P/P)."
96
96
97 Note: old client behave as a publishing server with draft only content
97 Note: old client behave as a publishing server with draft only content
98 - other people see it as public
98 - other people see it as public
99 - content is pushed as draft
99 - content is pushed as draft
100
100
101 """
101 """
102
102
103 from __future__ import absolute_import
103 from __future__ import absolute_import
104
104
105 import errno
105 import errno
106 import struct
106 import struct
107
107
108 from .i18n import _
108 from .i18n import _
109 from .node import (
109 from .node import (
110 bin,
110 bin,
111 hex,
111 hex,
112 nullid,
112 nullid,
113 nullrev,
113 nullrev,
114 short,
114 short,
115 wdirrev,
115 wdirrev,
116 )
116 )
117 from .pycompat import (
117 from .pycompat import (
118 getattr,
118 getattr,
119 setattr,
119 setattr,
120 )
120 )
121 from . import (
121 from . import (
122 error,
122 error,
123 pycompat,
123 pycompat,
124 requirements,
124 requirements,
125 smartset,
125 smartset,
126 txnutil,
126 txnutil,
127 util,
127 util,
128 )
128 )
129
129
130 _fphasesentry = struct.Struct(b'>i20s')
130 _fphasesentry = struct.Struct(b'>i20s')
131
131
132 # record phase index
132 # record phase index
133 public, draft, secret = range(3)
133 public, draft, secret = range(3)
134 archived = 32 # non-continuous for compatibility
134 archived = 32 # non-continuous for compatibility
135 internal = 96 # non-continuous for compatibility
135 internal = 96 # non-continuous for compatibility
136 allphases = (public, draft, secret, archived, internal)
136 allphases = (public, draft, secret, archived, internal)
137 trackedphases = (draft, secret, archived, internal)
137 trackedphases = (draft, secret, archived, internal)
138 # record phase names
138 # record phase names
139 cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command
139 cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command
140 phasenames = dict(enumerate(cmdphasenames))
140 phasenames = dict(enumerate(cmdphasenames))
141 phasenames[archived] = b'archived'
141 phasenames[archived] = b'archived'
142 phasenames[internal] = b'internal'
142 phasenames[internal] = b'internal'
143 # map phase name to phase number
143 # map phase name to phase number
144 phasenumber = {name: phase for phase, name in phasenames.items()}
144 phasenumber = {name: phase for phase, name in phasenames.items()}
145 # like phasenumber, but also include maps for the numeric and binary
145 # like phasenumber, but also include maps for the numeric and binary
146 # phase number to the phase number
146 # phase number to the phase number
147 phasenumber2 = phasenumber.copy()
147 phasenumber2 = phasenumber.copy()
148 phasenumber2.update({phase: phase for phase in phasenames})
148 phasenumber2.update({phase: phase for phase in phasenames})
149 phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
149 phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
150 # record phase property
150 # record phase property
151 mutablephases = (draft, secret, archived, internal)
151 mutablephases = (draft, secret, archived, internal)
152 remotehiddenphases = (secret, archived, internal)
152 remotehiddenphases = (secret, archived, internal)
153 localhiddenphases = (internal, archived)
153 localhiddenphases = (internal, archived)
154
154
155
155
156 def supportinternal(repo):
156 def supportinternal(repo):
157 """True if the internal phase can be used on a repository"""
157 """True if the internal phase can be used on a repository"""
158 return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
158 return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements
159
159
160
160
161 def _readroots(repo, phasedefaults=None):
161 def _readroots(repo, phasedefaults=None):
162 """Read phase roots from disk
162 """Read phase roots from disk
163
163
164 phasedefaults is a list of fn(repo, roots) callable, which are
164 phasedefaults is a list of fn(repo, roots) callable, which are
165 executed if the phase roots file does not exist. When phases are
165 executed if the phase roots file does not exist. When phases are
166 being initialized on an existing repository, this could be used to
166 being initialized on an existing repository, this could be used to
167 set selected changesets phase to something else than public.
167 set selected changesets phase to something else than public.
168
168
169 Return (roots, dirty) where dirty is true if roots differ from
169 Return (roots, dirty) where dirty is true if roots differ from
170 what is being stored.
170 what is being stored.
171 """
171 """
172 repo = repo.unfiltered()
172 repo = repo.unfiltered()
173 dirty = False
173 dirty = False
174 roots = {i: set() for i in allphases}
174 roots = {i: set() for i in allphases}
175 try:
175 try:
176 f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
176 f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
177 try:
177 try:
178 for line in f:
178 for line in f:
179 phase, nh = line.split()
179 phase, nh = line.split()
180 roots[int(phase)].add(bin(nh))
180 roots[int(phase)].add(bin(nh))
181 finally:
181 finally:
182 f.close()
182 f.close()
183 except IOError as inst:
183 except IOError as inst:
184 if inst.errno != errno.ENOENT:
184 if inst.errno != errno.ENOENT:
185 raise
185 raise
186 if phasedefaults:
186 if phasedefaults:
187 for f in phasedefaults:
187 for f in phasedefaults:
188 roots = f(repo, roots)
188 roots = f(repo, roots)
189 dirty = True
189 dirty = True
190 return roots, dirty
190 return roots, dirty
191
191
192
192
193 def binaryencode(phasemapping):
193 def binaryencode(phasemapping):
194 """encode a 'phase -> nodes' mapping into a binary stream
194 """encode a 'phase -> nodes' mapping into a binary stream
195
195
196 The revision lists are encoded as (phase, root) pairs.
196 The revision lists are encoded as (phase, root) pairs.
197 """
197 """
198 binarydata = []
198 binarydata = []
199 for phase, nodes in pycompat.iteritems(phasemapping):
199 for phase, nodes in pycompat.iteritems(phasemapping):
200 for head in nodes:
200 for head in nodes:
201 binarydata.append(_fphasesentry.pack(phase, head))
201 binarydata.append(_fphasesentry.pack(phase, head))
202 return b''.join(binarydata)
202 return b''.join(binarydata)
203
203
204
204
205 def binarydecode(stream):
205 def binarydecode(stream):
206 """decode a binary stream into a 'phase -> nodes' mapping
206 """decode a binary stream into a 'phase -> nodes' mapping
207
207
208 The (phase, root) pairs are turned back into a dictionary with
208 The (phase, root) pairs are turned back into a dictionary with
209 the phase as index and the aggregated roots of that phase as value."""
209 the phase as index and the aggregated roots of that phase as value."""
210 headsbyphase = {i: [] for i in allphases}
210 headsbyphase = {i: [] for i in allphases}
211 entrysize = _fphasesentry.size
211 entrysize = _fphasesentry.size
212 while True:
212 while True:
213 entry = stream.read(entrysize)
213 entry = stream.read(entrysize)
214 if len(entry) < entrysize:
214 if len(entry) < entrysize:
215 if entry:
215 if entry:
216 raise error.Abort(_(b'bad phase-heads stream'))
216 raise error.Abort(_(b'bad phase-heads stream'))
217 break
217 break
218 phase, node = _fphasesentry.unpack(entry)
218 phase, node = _fphasesentry.unpack(entry)
219 headsbyphase[phase].append(node)
219 headsbyphase[phase].append(node)
220 return headsbyphase
220 return headsbyphase
221
221
222
222
223 def _sortedrange_insert(data, idx, rev, t):
223 def _sortedrange_insert(data, idx, rev, t):
224 merge_before = False
224 merge_before = False
225 if idx:
225 if idx:
226 r1, t1 = data[idx - 1]
226 r1, t1 = data[idx - 1]
227 merge_before = r1[-1] + 1 == rev and t1 == t
227 merge_before = r1[-1] + 1 == rev and t1 == t
228 merge_after = False
228 merge_after = False
229 if idx < len(data):
229 if idx < len(data):
230 r2, t2 = data[idx]
230 r2, t2 = data[idx]
231 merge_after = r2[0] == rev + 1 and t2 == t
231 merge_after = r2[0] == rev + 1 and t2 == t
232
232
233 if merge_before and merge_after:
233 if merge_before and merge_after:
234 data[idx - 1] = (pycompat.xrange(r1[0], r2[-1] + 1), t)
234 data[idx - 1] = (pycompat.xrange(r1[0], r2[-1] + 1), t)
235 data.pop(idx)
235 data.pop(idx)
236 elif merge_before:
236 elif merge_before:
237 data[idx - 1] = (pycompat.xrange(r1[0], rev + 1), t)
237 data[idx - 1] = (pycompat.xrange(r1[0], rev + 1), t)
238 elif merge_after:
238 elif merge_after:
239 data[idx] = (pycompat.xrange(rev, r2[-1] + 1), t)
239 data[idx] = (pycompat.xrange(rev, r2[-1] + 1), t)
240 else:
240 else:
241 data.insert(idx, (pycompat.xrange(rev, rev + 1), t))
241 data.insert(idx, (pycompat.xrange(rev, rev + 1), t))
242
242
243
243
244 def _sortedrange_split(data, idx, rev, t):
244 def _sortedrange_split(data, idx, rev, t):
245 r1, t1 = data[idx]
245 r1, t1 = data[idx]
246 if t == t1:
246 if t == t1:
247 return
247 return
248 t = (t1[0], t[1])
248 t = (t1[0], t[1])
249 if len(r1) == 1:
249 if len(r1) == 1:
250 data.pop(idx)
250 data.pop(idx)
251 _sortedrange_insert(data, idx, rev, t)
251 _sortedrange_insert(data, idx, rev, t)
252 elif r1[0] == rev:
252 elif r1[0] == rev:
253 data[idx] = (pycompat.xrange(rev + 1, r1[-1] + 1), t1)
253 data[idx] = (pycompat.xrange(rev + 1, r1[-1] + 1), t1)
254 _sortedrange_insert(data, idx, rev, t)
254 _sortedrange_insert(data, idx, rev, t)
255 elif r1[-1] == rev:
255 elif r1[-1] == rev:
256 data[idx] = (pycompat.xrange(r1[0], rev), t1)
256 data[idx] = (pycompat.xrange(r1[0], rev), t1)
257 _sortedrange_insert(data, idx + 1, rev, t)
257 _sortedrange_insert(data, idx + 1, rev, t)
258 else:
258 else:
259 data[idx : idx + 1] = [
259 data[idx : idx + 1] = [
260 (pycompat.xrange(r1[0], rev), t1),
260 (pycompat.xrange(r1[0], rev), t1),
261 (pycompat.xrange(rev, rev + 1), t),
261 (pycompat.xrange(rev, rev + 1), t),
262 (pycompat.xrange(rev + 1, r1[-1] + 1), t1),
262 (pycompat.xrange(rev + 1, r1[-1] + 1), t1),
263 ]
263 ]
264
264
265
265
266 def _trackphasechange(data, rev, old, new):
266 def _trackphasechange(data, rev, old, new):
267 """add a phase move to the <data> list of ranges
267 """add a phase move to the <data> list of ranges
268
268
269 If data is None, nothing happens.
269 If data is None, nothing happens.
270 """
270 """
271 if data is None:
271 if data is None:
272 return
272 return
273
273
274 # If data is empty, create a one-revision range and done
274 # If data is empty, create a one-revision range and done
275 if not data:
275 if not data:
276 data.insert(0, (pycompat.xrange(rev, rev + 1), (old, new)))
276 data.insert(0, (pycompat.xrange(rev, rev + 1), (old, new)))
277 return
277 return
278
278
279 low = 0
279 low = 0
280 high = len(data)
280 high = len(data)
281 t = (old, new)
281 t = (old, new)
282 while low < high:
282 while low < high:
283 mid = (low + high) // 2
283 mid = (low + high) // 2
284 revs = data[mid][0]
284 revs = data[mid][0]
285 revs_low = revs[0]
285 revs_low = revs[0]
286 revs_high = revs[-1]
286 revs_high = revs[-1]
287
287
288 if rev >= revs_low and rev <= revs_high:
288 if rev >= revs_low and rev <= revs_high:
289 _sortedrange_split(data, mid, rev, t)
289 _sortedrange_split(data, mid, rev, t)
290 return
290 return
291
291
292 if revs_low == rev + 1:
292 if revs_low == rev + 1:
293 if mid and data[mid - 1][0][-1] == rev:
293 if mid and data[mid - 1][0][-1] == rev:
294 _sortedrange_split(data, mid - 1, rev, t)
294 _sortedrange_split(data, mid - 1, rev, t)
295 else:
295 else:
296 _sortedrange_insert(data, mid, rev, t)
296 _sortedrange_insert(data, mid, rev, t)
297 return
297 return
298
298
299 if revs_high == rev - 1:
299 if revs_high == rev - 1:
300 if mid + 1 < len(data) and data[mid + 1][0][0] == rev:
300 if mid + 1 < len(data) and data[mid + 1][0][0] == rev:
301 _sortedrange_split(data, mid + 1, rev, t)
301 _sortedrange_split(data, mid + 1, rev, t)
302 else:
302 else:
303 _sortedrange_insert(data, mid + 1, rev, t)
303 _sortedrange_insert(data, mid + 1, rev, t)
304 return
304 return
305
305
306 if revs_low > rev:
306 if revs_low > rev:
307 high = mid
307 high = mid
308 else:
308 else:
309 low = mid + 1
309 low = mid + 1
310
310
311 if low == len(data):
311 if low == len(data):
312 data.append((pycompat.xrange(rev, rev + 1), t))
312 data.append((pycompat.xrange(rev, rev + 1), t))
313 return
313 return
314
314
315 r1, t1 = data[low]
315 r1, t1 = data[low]
316 if r1[0] > rev:
316 if r1[0] > rev:
317 data.insert(low, (pycompat.xrange(rev, rev + 1), t))
317 data.insert(low, (pycompat.xrange(rev, rev + 1), t))
318 else:
318 else:
319 data.insert(low + 1, (pycompat.xrange(rev, rev + 1), t))
319 data.insert(low + 1, (pycompat.xrange(rev, rev + 1), t))
320
320
321
321
322 class phasecache(object):
322 class phasecache(object):
323 def __init__(self, repo, phasedefaults, _load=True):
323 def __init__(self, repo, phasedefaults, _load=True):
324 if _load:
324 if _load:
325 # Cheap trick to allow shallow-copy without copy module
325 # Cheap trick to allow shallow-copy without copy module
326 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
326 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
327 self._loadedrevslen = 0
327 self._loadedrevslen = 0
328 self._phasesets = None
328 self._phasesets = None
329 self.filterunknown(repo)
329 self.filterunknown(repo)
330 self.opener = repo.svfs
330 self.opener = repo.svfs
331
331
332 def hasnonpublicphases(self, repo):
332 def hasnonpublicphases(self, repo):
333 """detect if there are revisions with non-public phase"""
333 """detect if there are revisions with non-public phase"""
334 repo = repo.unfiltered()
334 repo = repo.unfiltered()
335 cl = repo.changelog
335 cl = repo.changelog
336 if len(cl) >= self._loadedrevslen:
336 if len(cl) >= self._loadedrevslen:
337 self.invalidate()
337 self.invalidate()
338 self.loadphaserevs(repo)
338 self.loadphaserevs(repo)
339 return any(
339 return any(
340 revs
340 revs
341 for phase, revs in pycompat.iteritems(self.phaseroots)
341 for phase, revs in pycompat.iteritems(self.phaseroots)
342 if phase != public
342 if phase != public
343 )
343 )
344
344
345 def nonpublicphaseroots(self, repo):
345 def nonpublicphaseroots(self, repo):
346 """returns the roots of all non-public phases
346 """returns the roots of all non-public phases
347
347
348 The roots are not minimized, so if the secret revisions are
348 The roots are not minimized, so if the secret revisions are
349 descendants of draft revisions, their roots will still be present.
349 descendants of draft revisions, their roots will still be present.
350 """
350 """
351 repo = repo.unfiltered()
351 repo = repo.unfiltered()
352 cl = repo.changelog
352 cl = repo.changelog
353 if len(cl) >= self._loadedrevslen:
353 if len(cl) >= self._loadedrevslen:
354 self.invalidate()
354 self.invalidate()
355 self.loadphaserevs(repo)
355 self.loadphaserevs(repo)
356 return set().union(
356 return set().union(
357 *[
357 *[
358 revs
358 revs
359 for phase, revs in pycompat.iteritems(self.phaseroots)
359 for phase, revs in pycompat.iteritems(self.phaseroots)
360 if phase != public
360 if phase != public
361 ]
361 ]
362 )
362 )
363
363
364 def getrevset(self, repo, phases, subset=None):
364 def getrevset(self, repo, phases, subset=None):
365 """return a smartset for the given phases"""
365 """return a smartset for the given phases"""
366 self.loadphaserevs(repo) # ensure phase's sets are loaded
366 self.loadphaserevs(repo) # ensure phase's sets are loaded
367 phases = set(phases)
367 phases = set(phases)
368 publicphase = public in phases
368 publicphase = public in phases
369
369
370 if publicphase:
370 if publicphase:
371 # In this case, phases keeps all the *other* phases.
371 # In this case, phases keeps all the *other* phases.
372 phases = set(allphases).difference(phases)
372 phases = set(allphases).difference(phases)
373 if not phases:
373 if not phases:
374 return smartset.fullreposet(repo)
374 return smartset.fullreposet(repo)
375
375
376 # fast path: _phasesets contains the interesting sets,
376 # fast path: _phasesets contains the interesting sets,
377 # might only need a union and post-filtering.
377 # might only need a union and post-filtering.
378 revsneedscopy = False
378 revsneedscopy = False
379 if len(phases) == 1:
379 if len(phases) == 1:
380 [p] = phases
380 [p] = phases
381 revs = self._phasesets[p]
381 revs = self._phasesets[p]
382 revsneedscopy = True # Don't modify _phasesets
382 revsneedscopy = True # Don't modify _phasesets
383 else:
383 else:
384 # revs has the revisions in all *other* phases.
384 # revs has the revisions in all *other* phases.
385 revs = set.union(*[self._phasesets[p] for p in phases])
385 revs = set.union(*[self._phasesets[p] for p in phases])
386
386
387 def _addwdir(wdirsubset, wdirrevs):
387 def _addwdir(wdirsubset, wdirrevs):
388 if wdirrev in wdirsubset and repo[None].phase() in phases:
388 if wdirrev in wdirsubset and repo[None].phase() in phases:
389 if revsneedscopy:
389 if revsneedscopy:
390 wdirrevs = wdirrevs.copy()
390 wdirrevs = wdirrevs.copy()
391 # The working dir would never be in the # cache, but it was in
391 # The working dir would never be in the # cache, but it was in
392 # the subset being filtered for its phase (or filtered out,
392 # the subset being filtered for its phase (or filtered out,
393 # depending on publicphase), so add it to the output to be
393 # depending on publicphase), so add it to the output to be
394 # included (or filtered out).
394 # included (or filtered out).
395 wdirrevs.add(wdirrev)
395 wdirrevs.add(wdirrev)
396 return wdirrevs
396 return wdirrevs
397
397
398 if not publicphase:
398 if not publicphase:
399 if repo.changelog.filteredrevs:
399 if repo.changelog.filteredrevs:
400 revs = revs - repo.changelog.filteredrevs
400 revs = revs - repo.changelog.filteredrevs
401
401
402 if subset is None:
402 if subset is None:
403 return smartset.baseset(revs)
403 return smartset.baseset(revs)
404 else:
404 else:
405 revs = _addwdir(subset, revs)
405 revs = _addwdir(subset, revs)
406 return subset & smartset.baseset(revs)
406 return subset & smartset.baseset(revs)
407 else:
407 else:
408 if subset is None:
408 if subset is None:
409 subset = smartset.fullreposet(repo)
409 subset = smartset.fullreposet(repo)
410
410
411 revs = _addwdir(subset, revs)
411 revs = _addwdir(subset, revs)
412
412
413 if not revs:
413 if not revs:
414 return subset
414 return subset
415 return subset.filter(lambda r: r not in revs)
415 return subset.filter(lambda r: r not in revs)
416
416
417 def copy(self):
417 def copy(self):
418 # Shallow copy meant to ensure isolation in
418 # Shallow copy meant to ensure isolation in
419 # advance/retractboundary(), nothing more.
419 # advance/retractboundary(), nothing more.
420 ph = self.__class__(None, None, _load=False)
420 ph = self.__class__(None, None, _load=False)
421 ph.phaseroots = self.phaseroots.copy()
421 ph.phaseroots = self.phaseroots.copy()
422 ph.dirty = self.dirty
422 ph.dirty = self.dirty
423 ph.opener = self.opener
423 ph.opener = self.opener
424 ph._loadedrevslen = self._loadedrevslen
424 ph._loadedrevslen = self._loadedrevslen
425 ph._phasesets = self._phasesets
425 ph._phasesets = self._phasesets
426 return ph
426 return ph
427
427
428 def replace(self, phcache):
428 def replace(self, phcache):
429 """replace all values in 'self' with content of phcache"""
429 """replace all values in 'self' with content of phcache"""
430 for a in (
430 for a in (
431 b'phaseroots',
431 b'phaseroots',
432 b'dirty',
432 b'dirty',
433 b'opener',
433 b'opener',
434 b'_loadedrevslen',
434 b'_loadedrevslen',
435 b'_phasesets',
435 b'_phasesets',
436 ):
436 ):
437 setattr(self, a, getattr(phcache, a))
437 setattr(self, a, getattr(phcache, a))
438
438
439 def _getphaserevsnative(self, repo):
439 def _getphaserevsnative(self, repo):
440 repo = repo.unfiltered()
440 repo = repo.unfiltered()
441 return repo.changelog.computephases(self.phaseroots)
441 return repo.changelog.computephases(self.phaseroots)
442
442
443 def _computephaserevspure(self, repo):
443 def _computephaserevspure(self, repo):
444 repo = repo.unfiltered()
444 repo = repo.unfiltered()
445 cl = repo.changelog
445 cl = repo.changelog
446 self._phasesets = {phase: set() for phase in allphases}
446 self._phasesets = {phase: set() for phase in allphases}
447 lowerroots = set()
447 lowerroots = set()
448 for phase in reversed(trackedphases):
448 for phase in reversed(trackedphases):
449 roots = pycompat.maplist(cl.rev, self.phaseroots[phase])
449 roots = pycompat.maplist(cl.rev, self.phaseroots[phase])
450 if roots:
450 if roots:
451 ps = set(cl.descendants(roots))
451 ps = set(cl.descendants(roots))
452 for root in roots:
452 for root in roots:
453 ps.add(root)
453 ps.add(root)
454 ps.difference_update(lowerroots)
454 ps.difference_update(lowerroots)
455 lowerroots.update(ps)
455 lowerroots.update(ps)
456 self._phasesets[phase] = ps
456 self._phasesets[phase] = ps
457 self._loadedrevslen = len(cl)
457 self._loadedrevslen = len(cl)
458
458
459 def loadphaserevs(self, repo):
459 def loadphaserevs(self, repo):
460 """ensure phase information is loaded in the object"""
460 """ensure phase information is loaded in the object"""
461 if self._phasesets is None:
461 if self._phasesets is None:
462 try:
462 try:
463 res = self._getphaserevsnative(repo)
463 res = self._getphaserevsnative(repo)
464 self._loadedrevslen, self._phasesets = res
464 self._loadedrevslen, self._phasesets = res
465 except AttributeError:
465 except AttributeError:
466 self._computephaserevspure(repo)
466 self._computephaserevspure(repo)
467
467
468 def invalidate(self):
468 def invalidate(self):
469 self._loadedrevslen = 0
469 self._loadedrevslen = 0
470 self._phasesets = None
470 self._phasesets = None
471
471
472 def phase(self, repo, rev):
472 def phase(self, repo, rev):
473 # We need a repo argument here to be able to build _phasesets
473 # We need a repo argument here to be able to build _phasesets
474 # if necessary. The repository instance is not stored in
474 # if necessary. The repository instance is not stored in
475 # phasecache to avoid reference cycles. The changelog instance
475 # phasecache to avoid reference cycles. The changelog instance
476 # is not stored because it is a filecache() property and can
476 # is not stored because it is a filecache() property and can
477 # be replaced without us being notified.
477 # be replaced without us being notified.
478 if rev == nullrev:
478 if rev == nullrev:
479 return public
479 return public
480 if rev < nullrev:
480 if rev < nullrev:
481 raise ValueError(_(b'cannot lookup negative revision'))
481 raise ValueError(_(b'cannot lookup negative revision'))
482 if rev >= self._loadedrevslen:
482 if rev >= self._loadedrevslen:
483 self.invalidate()
483 self.invalidate()
484 self.loadphaserevs(repo)
484 self.loadphaserevs(repo)
485 for phase in trackedphases:
485 for phase in trackedphases:
486 if rev in self._phasesets[phase]:
486 if rev in self._phasesets[phase]:
487 return phase
487 return phase
488 return public
488 return public
489
489
490 def write(self):
490 def write(self):
491 if not self.dirty:
491 if not self.dirty:
492 return
492 return
493 f = self.opener(b'phaseroots', b'w', atomictemp=True, checkambig=True)
493 f = self.opener(b'phaseroots', b'w', atomictemp=True, checkambig=True)
494 try:
494 try:
495 self._write(f)
495 self._write(f)
496 finally:
496 finally:
497 f.close()
497 f.close()
498
498
499 def _write(self, fp):
499 def _write(self, fp):
500 for phase, roots in pycompat.iteritems(self.phaseroots):
500 for phase, roots in pycompat.iteritems(self.phaseroots):
501 for h in sorted(roots):
501 for h in sorted(roots):
502 fp.write(b'%i %s\n' % (phase, hex(h)))
502 fp.write(b'%i %s\n' % (phase, hex(h)))
503 self.dirty = False
503 self.dirty = False
504
504
505 def _updateroots(self, phase, newroots, tr):
505 def _updateroots(self, phase, newroots, tr):
506 self.phaseroots[phase] = newroots
506 self.phaseroots[phase] = newroots
507 self.invalidate()
507 self.invalidate()
508 self.dirty = True
508 self.dirty = True
509
509
510 tr.addfilegenerator(b'phase', (b'phaseroots',), self._write)
510 tr.addfilegenerator(b'phase', (b'phaseroots',), self._write)
511 tr.hookargs[b'phases_moved'] = b'1'
511 tr.hookargs[b'phases_moved'] = b'1'
512
512
513 def registernew(self, repo, tr, targetphase, nodes, revs=None):
513 def registernew(self, repo, tr, targetphase, revs):
514 if revs is None:
515 revs = []
516 repo = repo.unfiltered()
514 repo = repo.unfiltered()
517 self._retractboundary(repo, tr, targetphase, nodes, revs=revs)
515 self._retractboundary(repo, tr, targetphase, [], revs=revs)
518 if tr is not None and b'phases' in tr.changes:
516 if tr is not None and b'phases' in tr.changes:
519 phasetracking = tr.changes[b'phases']
517 phasetracking = tr.changes[b'phases']
520 torev = repo.changelog.rev
521 phase = self.phase
518 phase = self.phase
522 revs = [torev(node) for node in nodes] + sorted(revs)
519 for rev in sorted(revs):
523 revs.sort()
524 for rev in revs:
525 revphase = phase(repo, rev)
520 revphase = phase(repo, rev)
526 _trackphasechange(phasetracking, rev, None, revphase)
521 _trackphasechange(phasetracking, rev, None, revphase)
527 repo.invalidatevolatilesets()
522 repo.invalidatevolatilesets()
528
523
529 def advanceboundary(
524 def advanceboundary(
530 self, repo, tr, targetphase, nodes, revs=None, dryrun=None
525 self, repo, tr, targetphase, nodes, revs=None, dryrun=None
531 ):
526 ):
532 """Set all 'nodes' to phase 'targetphase'
527 """Set all 'nodes' to phase 'targetphase'
533
528
534 Nodes with a phase lower than 'targetphase' are not affected.
529 Nodes with a phase lower than 'targetphase' are not affected.
535
530
536 If dryrun is True, no actions will be performed
531 If dryrun is True, no actions will be performed
537
532
538 Returns a set of revs whose phase is changed or should be changed
533 Returns a set of revs whose phase is changed or should be changed
539 """
534 """
540 # Be careful to preserve shallow-copied values: do not update
535 # Be careful to preserve shallow-copied values: do not update
541 # phaseroots values, replace them.
536 # phaseroots values, replace them.
542 if revs is None:
537 if revs is None:
543 revs = []
538 revs = []
544 if tr is None:
539 if tr is None:
545 phasetracking = None
540 phasetracking = None
546 else:
541 else:
547 phasetracking = tr.changes.get(b'phases')
542 phasetracking = tr.changes.get(b'phases')
548
543
549 repo = repo.unfiltered()
544 repo = repo.unfiltered()
550 revs = [repo[n].rev() for n in nodes] + [r for r in revs]
545 revs = [repo[n].rev() for n in nodes] + [r for r in revs]
551
546
552 changes = set() # set of revisions to be changed
547 changes = set() # set of revisions to be changed
553 delroots = [] # set of root deleted by this path
548 delroots = [] # set of root deleted by this path
554 for phase in (phase for phase in allphases if phase > targetphase):
549 for phase in (phase for phase in allphases if phase > targetphase):
555 # filter nodes that are not in a compatible phase already
550 # filter nodes that are not in a compatible phase already
556 revs = [rev for rev in revs if self.phase(repo, rev) >= phase]
551 revs = [rev for rev in revs if self.phase(repo, rev) >= phase]
557 if not revs:
552 if not revs:
558 break # no roots to move anymore
553 break # no roots to move anymore
559
554
560 olds = self.phaseroots[phase]
555 olds = self.phaseroots[phase]
561
556
562 affected = repo.revs(b'%ln::%ld', olds, revs)
557 affected = repo.revs(b'%ln::%ld', olds, revs)
563 changes.update(affected)
558 changes.update(affected)
564 if dryrun:
559 if dryrun:
565 continue
560 continue
566 for r in affected:
561 for r in affected:
567 _trackphasechange(
562 _trackphasechange(
568 phasetracking, r, self.phase(repo, r), targetphase
563 phasetracking, r, self.phase(repo, r), targetphase
569 )
564 )
570
565
571 roots = {
566 roots = {
572 ctx.node()
567 ctx.node()
573 for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected)
568 for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected)
574 }
569 }
575 if olds != roots:
570 if olds != roots:
576 self._updateroots(phase, roots, tr)
571 self._updateroots(phase, roots, tr)
577 # some roots may need to be declared for lower phases
572 # some roots may need to be declared for lower phases
578 delroots.extend(olds - roots)
573 delroots.extend(olds - roots)
579 if not dryrun:
574 if not dryrun:
580 # declare deleted root in the target phase
575 # declare deleted root in the target phase
581 if targetphase != 0:
576 if targetphase != 0:
582 self._retractboundary(repo, tr, targetphase, delroots)
577 self._retractboundary(repo, tr, targetphase, delroots)
583 repo.invalidatevolatilesets()
578 repo.invalidatevolatilesets()
584 return changes
579 return changes
585
580
586 def retractboundary(self, repo, tr, targetphase, nodes):
581 def retractboundary(self, repo, tr, targetphase, nodes):
587 oldroots = {
582 oldroots = {
588 phase: revs
583 phase: revs
589 for phase, revs in pycompat.iteritems(self.phaseroots)
584 for phase, revs in pycompat.iteritems(self.phaseroots)
590 if phase <= targetphase
585 if phase <= targetphase
591 }
586 }
592 if tr is None:
587 if tr is None:
593 phasetracking = None
588 phasetracking = None
594 else:
589 else:
595 phasetracking = tr.changes.get(b'phases')
590 phasetracking = tr.changes.get(b'phases')
596 repo = repo.unfiltered()
591 repo = repo.unfiltered()
597 if (
592 if (
598 self._retractboundary(repo, tr, targetphase, nodes)
593 self._retractboundary(repo, tr, targetphase, nodes)
599 and phasetracking is not None
594 and phasetracking is not None
600 ):
595 ):
601
596
602 # find the affected revisions
597 # find the affected revisions
603 new = self.phaseroots[targetphase]
598 new = self.phaseroots[targetphase]
604 old = oldroots[targetphase]
599 old = oldroots[targetphase]
605 affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old))
600 affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old))
606
601
607 # find the phase of the affected revision
602 # find the phase of the affected revision
608 for phase in pycompat.xrange(targetphase, -1, -1):
603 for phase in pycompat.xrange(targetphase, -1, -1):
609 if phase:
604 if phase:
610 roots = oldroots.get(phase, [])
605 roots = oldroots.get(phase, [])
611 revs = set(repo.revs(b'%ln::%ld', roots, affected))
606 revs = set(repo.revs(b'%ln::%ld', roots, affected))
612 affected -= revs
607 affected -= revs
613 else: # public phase
608 else: # public phase
614 revs = affected
609 revs = affected
615 for r in sorted(revs):
610 for r in sorted(revs):
616 _trackphasechange(phasetracking, r, phase, targetphase)
611 _trackphasechange(phasetracking, r, phase, targetphase)
617 repo.invalidatevolatilesets()
612 repo.invalidatevolatilesets()
618
613
619 def _retractboundary(self, repo, tr, targetphase, nodes, revs=None):
614 def _retractboundary(self, repo, tr, targetphase, nodes, revs=None):
620 # Be careful to preserve shallow-copied values: do not update
615 # Be careful to preserve shallow-copied values: do not update
621 # phaseroots values, replace them.
616 # phaseroots values, replace them.
622 if revs is None:
617 if revs is None:
623 revs = []
618 revs = []
624 if targetphase in (archived, internal) and not supportinternal(repo):
619 if targetphase in (archived, internal) and not supportinternal(repo):
625 name = phasenames[targetphase]
620 name = phasenames[targetphase]
626 msg = b'this repository does not support the %s phase' % name
621 msg = b'this repository does not support the %s phase' % name
627 raise error.ProgrammingError(msg)
622 raise error.ProgrammingError(msg)
628
623
629 repo = repo.unfiltered()
624 repo = repo.unfiltered()
630 torev = repo.changelog.rev
625 torev = repo.changelog.rev
631 tonode = repo.changelog.node
626 tonode = repo.changelog.node
632 currentroots = {torev(node) for node in self.phaseroots[targetphase]}
627 currentroots = {torev(node) for node in self.phaseroots[targetphase]}
633 finalroots = oldroots = set(currentroots)
628 finalroots = oldroots = set(currentroots)
634 newroots = [torev(node) for node in nodes] + [r for r in revs]
629 newroots = [torev(node) for node in nodes] + [r for r in revs]
635 newroots = [
630 newroots = [
636 rev for rev in newroots if self.phase(repo, rev) < targetphase
631 rev for rev in newroots if self.phase(repo, rev) < targetphase
637 ]
632 ]
638
633
639 if newroots:
634 if newroots:
640 if nullrev in newroots:
635 if nullrev in newroots:
641 raise error.Abort(_(b'cannot change null revision phase'))
636 raise error.Abort(_(b'cannot change null revision phase'))
642 currentroots.update(newroots)
637 currentroots.update(newroots)
643
638
644 # Only compute new roots for revs above the roots that are being
639 # Only compute new roots for revs above the roots that are being
645 # retracted.
640 # retracted.
646 minnewroot = min(newroots)
641 minnewroot = min(newroots)
647 aboveroots = [rev for rev in currentroots if rev >= minnewroot]
642 aboveroots = [rev for rev in currentroots if rev >= minnewroot]
648 updatedroots = repo.revs(b'roots(%ld::)', aboveroots)
643 updatedroots = repo.revs(b'roots(%ld::)', aboveroots)
649
644
650 finalroots = {rev for rev in currentroots if rev < minnewroot}
645 finalroots = {rev for rev in currentroots if rev < minnewroot}
651 finalroots.update(updatedroots)
646 finalroots.update(updatedroots)
652 if finalroots != oldroots:
647 if finalroots != oldroots:
653 self._updateroots(
648 self._updateroots(
654 targetphase, {tonode(rev) for rev in finalroots}, tr
649 targetphase, {tonode(rev) for rev in finalroots}, tr
655 )
650 )
656 return True
651 return True
657 return False
652 return False
658
653
659 def filterunknown(self, repo):
654 def filterunknown(self, repo):
660 """remove unknown nodes from the phase boundary
655 """remove unknown nodes from the phase boundary
661
656
662 Nothing is lost as unknown nodes only hold data for their descendants.
657 Nothing is lost as unknown nodes only hold data for their descendants.
663 """
658 """
664 filtered = False
659 filtered = False
665 has_node = repo.changelog.index.has_node # to filter unknown nodes
660 has_node = repo.changelog.index.has_node # to filter unknown nodes
666 for phase, nodes in pycompat.iteritems(self.phaseroots):
661 for phase, nodes in pycompat.iteritems(self.phaseroots):
667 missing = sorted(node for node in nodes if not has_node(node))
662 missing = sorted(node for node in nodes if not has_node(node))
668 if missing:
663 if missing:
669 for mnode in missing:
664 for mnode in missing:
670 repo.ui.debug(
665 repo.ui.debug(
671 b'removing unknown node %s from %i-phase boundary\n'
666 b'removing unknown node %s from %i-phase boundary\n'
672 % (short(mnode), phase)
667 % (short(mnode), phase)
673 )
668 )
674 nodes.symmetric_difference_update(missing)
669 nodes.symmetric_difference_update(missing)
675 filtered = True
670 filtered = True
676 if filtered:
671 if filtered:
677 self.dirty = True
672 self.dirty = True
678 # filterunknown is called by repo.destroyed, we may have no changes in
673 # filterunknown is called by repo.destroyed, we may have no changes in
679 # root but _phasesets contents is certainly invalid (or at least we
674 # root but _phasesets contents is certainly invalid (or at least we
680 # have not proper way to check that). related to issue 3858.
675 # have not proper way to check that). related to issue 3858.
681 #
676 #
682 # The other caller is __init__ that have no _phasesets initialized
677 # The other caller is __init__ that have no _phasesets initialized
683 # anyway. If this change we should consider adding a dedicated
678 # anyway. If this change we should consider adding a dedicated
684 # "destroyed" function to phasecache or a proper cache key mechanism
679 # "destroyed" function to phasecache or a proper cache key mechanism
685 # (see branchmap one)
680 # (see branchmap one)
686 self.invalidate()
681 self.invalidate()
687
682
688
683
689 def advanceboundary(repo, tr, targetphase, nodes, revs=None, dryrun=None):
684 def advanceboundary(repo, tr, targetphase, nodes, revs=None, dryrun=None):
690 """Add nodes to a phase changing other nodes phases if necessary.
685 """Add nodes to a phase changing other nodes phases if necessary.
691
686
692 This function move boundary *forward* this means that all nodes
687 This function move boundary *forward* this means that all nodes
693 are set in the target phase or kept in a *lower* phase.
688 are set in the target phase or kept in a *lower* phase.
694
689
695 Simplify boundary to contains phase roots only.
690 Simplify boundary to contains phase roots only.
696
691
697 If dryrun is True, no actions will be performed
692 If dryrun is True, no actions will be performed
698
693
699 Returns a set of revs whose phase is changed or should be changed
694 Returns a set of revs whose phase is changed or should be changed
700 """
695 """
701 if revs is None:
696 if revs is None:
702 revs = []
697 revs = []
703 phcache = repo._phasecache.copy()
698 phcache = repo._phasecache.copy()
704 changes = phcache.advanceboundary(
699 changes = phcache.advanceboundary(
705 repo, tr, targetphase, nodes, revs=revs, dryrun=dryrun
700 repo, tr, targetphase, nodes, revs=revs, dryrun=dryrun
706 )
701 )
707 if not dryrun:
702 if not dryrun:
708 repo._phasecache.replace(phcache)
703 repo._phasecache.replace(phcache)
709 return changes
704 return changes
710
705
711
706
712 def retractboundary(repo, tr, targetphase, nodes):
707 def retractboundary(repo, tr, targetphase, nodes):
713 """Set nodes back to a phase changing other nodes phases if
708 """Set nodes back to a phase changing other nodes phases if
714 necessary.
709 necessary.
715
710
716 This function move boundary *backward* this means that all nodes
711 This function move boundary *backward* this means that all nodes
717 are set in the target phase or kept in a *higher* phase.
712 are set in the target phase or kept in a *higher* phase.
718
713
719 Simplify boundary to contains phase roots only."""
714 Simplify boundary to contains phase roots only."""
720 phcache = repo._phasecache.copy()
715 phcache = repo._phasecache.copy()
721 phcache.retractboundary(repo, tr, targetphase, nodes)
716 phcache.retractboundary(repo, tr, targetphase, nodes)
722 repo._phasecache.replace(phcache)
717 repo._phasecache.replace(phcache)
723
718
724
719
725 def registernew(repo, tr, targetphase, nodes, revs=None):
720 def registernew(repo, tr, targetphase, revs):
726 """register a new revision and its phase
721 """register a new revision and its phase
727
722
728 Code adding revisions to the repository should use this function to
723 Code adding revisions to the repository should use this function to
729 set new changeset in their target phase (or higher).
724 set new changeset in their target phase (or higher).
730 """
725 """
731 if revs is None:
732 revs = []
733 phcache = repo._phasecache.copy()
726 phcache = repo._phasecache.copy()
734 phcache.registernew(repo, tr, targetphase, nodes, revs=revs)
727 phcache.registernew(repo, tr, targetphase, revs)
735 repo._phasecache.replace(phcache)
728 repo._phasecache.replace(phcache)
736
729
737
730
738 def listphases(repo):
731 def listphases(repo):
739 """List phases root for serialization over pushkey"""
732 """List phases root for serialization over pushkey"""
740 # Use ordered dictionary so behavior is deterministic.
733 # Use ordered dictionary so behavior is deterministic.
741 keys = util.sortdict()
734 keys = util.sortdict()
742 value = b'%i' % draft
735 value = b'%i' % draft
743 cl = repo.unfiltered().changelog
736 cl = repo.unfiltered().changelog
744 for root in repo._phasecache.phaseroots[draft]:
737 for root in repo._phasecache.phaseroots[draft]:
745 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
738 if repo._phasecache.phase(repo, cl.rev(root)) <= draft:
746 keys[hex(root)] = value
739 keys[hex(root)] = value
747
740
748 if repo.publishing():
741 if repo.publishing():
749 # Add an extra data to let remote know we are a publishing
742 # Add an extra data to let remote know we are a publishing
750 # repo. Publishing repo can't just pretend they are old repo.
743 # repo. Publishing repo can't just pretend they are old repo.
751 # When pushing to a publishing repo, the client still need to
744 # When pushing to a publishing repo, the client still need to
752 # push phase boundary
745 # push phase boundary
753 #
746 #
754 # Push do not only push changeset. It also push phase data.
747 # Push do not only push changeset. It also push phase data.
755 # New phase data may apply to common changeset which won't be
748 # New phase data may apply to common changeset which won't be
756 # push (as they are common). Here is a very simple example:
749 # push (as they are common). Here is a very simple example:
757 #
750 #
758 # 1) repo A push changeset X as draft to repo B
751 # 1) repo A push changeset X as draft to repo B
759 # 2) repo B make changeset X public
752 # 2) repo B make changeset X public
760 # 3) repo B push to repo A. X is not pushed but the data that
753 # 3) repo B push to repo A. X is not pushed but the data that
761 # X as now public should
754 # X as now public should
762 #
755 #
763 # The server can't handle it on it's own as it has no idea of
756 # The server can't handle it on it's own as it has no idea of
764 # client phase data.
757 # client phase data.
765 keys[b'publishing'] = b'True'
758 keys[b'publishing'] = b'True'
766 return keys
759 return keys
767
760
768
761
769 def pushphase(repo, nhex, oldphasestr, newphasestr):
762 def pushphase(repo, nhex, oldphasestr, newphasestr):
770 """List phases root for serialization over pushkey"""
763 """List phases root for serialization over pushkey"""
771 repo = repo.unfiltered()
764 repo = repo.unfiltered()
772 with repo.lock():
765 with repo.lock():
773 currentphase = repo[nhex].phase()
766 currentphase = repo[nhex].phase()
774 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
767 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
775 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
768 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
776 if currentphase == oldphase and newphase < oldphase:
769 if currentphase == oldphase and newphase < oldphase:
777 with repo.transaction(b'pushkey-phase') as tr:
770 with repo.transaction(b'pushkey-phase') as tr:
778 advanceboundary(repo, tr, newphase, [bin(nhex)])
771 advanceboundary(repo, tr, newphase, [bin(nhex)])
779 return True
772 return True
780 elif currentphase == newphase:
773 elif currentphase == newphase:
781 # raced, but got correct result
774 # raced, but got correct result
782 return True
775 return True
783 else:
776 else:
784 return False
777 return False
785
778
786
779
787 def subsetphaseheads(repo, subset):
780 def subsetphaseheads(repo, subset):
788 """Finds the phase heads for a subset of a history
781 """Finds the phase heads for a subset of a history
789
782
790 Returns a list indexed by phase number where each item is a list of phase
783 Returns a list indexed by phase number where each item is a list of phase
791 head nodes.
784 head nodes.
792 """
785 """
793 cl = repo.changelog
786 cl = repo.changelog
794
787
795 headsbyphase = {i: [] for i in allphases}
788 headsbyphase = {i: [] for i in allphases}
796 # No need to keep track of secret phase; any heads in the subset that
789 # No need to keep track of secret phase; any heads in the subset that
797 # are not mentioned are implicitly secret.
790 # are not mentioned are implicitly secret.
798 for phase in allphases[:secret]:
791 for phase in allphases[:secret]:
799 revset = b"heads(%%ln & %s())" % phasenames[phase]
792 revset = b"heads(%%ln & %s())" % phasenames[phase]
800 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
793 headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)]
801 return headsbyphase
794 return headsbyphase
802
795
803
796
804 def updatephases(repo, trgetter, headsbyphase):
797 def updatephases(repo, trgetter, headsbyphase):
805 """Updates the repo with the given phase heads"""
798 """Updates the repo with the given phase heads"""
806 # Now advance phase boundaries of all phases
799 # Now advance phase boundaries of all phases
807 #
800 #
808 # run the update (and fetch transaction) only if there are actually things
801 # run the update (and fetch transaction) only if there are actually things
809 # to update. This avoid creating empty transaction during no-op operation.
802 # to update. This avoid creating empty transaction during no-op operation.
810
803
811 for phase in allphases:
804 for phase in allphases:
812 revset = b'%ln - _phase(%s)'
805 revset = b'%ln - _phase(%s)'
813 heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
806 heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
814 if heads:
807 if heads:
815 advanceboundary(repo, trgetter(), phase, heads)
808 advanceboundary(repo, trgetter(), phase, heads)
816
809
817
810
818 def analyzeremotephases(repo, subset, roots):
811 def analyzeremotephases(repo, subset, roots):
819 """Compute phases heads and root in a subset of node from root dict
812 """Compute phases heads and root in a subset of node from root dict
820
813
821 * subset is heads of the subset
814 * subset is heads of the subset
822 * roots is {<nodeid> => phase} mapping. key and value are string.
815 * roots is {<nodeid> => phase} mapping. key and value are string.
823
816
824 Accept unknown element input
817 Accept unknown element input
825 """
818 """
826 repo = repo.unfiltered()
819 repo = repo.unfiltered()
827 # build list from dictionary
820 # build list from dictionary
828 draftroots = []
821 draftroots = []
829 has_node = repo.changelog.index.has_node # to filter unknown nodes
822 has_node = repo.changelog.index.has_node # to filter unknown nodes
830 for nhex, phase in pycompat.iteritems(roots):
823 for nhex, phase in pycompat.iteritems(roots):
831 if nhex == b'publishing': # ignore data related to publish option
824 if nhex == b'publishing': # ignore data related to publish option
832 continue
825 continue
833 node = bin(nhex)
826 node = bin(nhex)
834 phase = int(phase)
827 phase = int(phase)
835 if phase == public:
828 if phase == public:
836 if node != nullid:
829 if node != nullid:
837 repo.ui.warn(
830 repo.ui.warn(
838 _(
831 _(
839 b'ignoring inconsistent public root'
832 b'ignoring inconsistent public root'
840 b' from remote: %s\n'
833 b' from remote: %s\n'
841 )
834 )
842 % nhex
835 % nhex
843 )
836 )
844 elif phase == draft:
837 elif phase == draft:
845 if has_node(node):
838 if has_node(node):
846 draftroots.append(node)
839 draftroots.append(node)
847 else:
840 else:
848 repo.ui.warn(
841 repo.ui.warn(
849 _(b'ignoring unexpected root from remote: %i %s\n')
842 _(b'ignoring unexpected root from remote: %i %s\n')
850 % (phase, nhex)
843 % (phase, nhex)
851 )
844 )
852 # compute heads
845 # compute heads
853 publicheads = newheads(repo, subset, draftroots)
846 publicheads = newheads(repo, subset, draftroots)
854 return publicheads, draftroots
847 return publicheads, draftroots
855
848
856
849
857 class remotephasessummary(object):
850 class remotephasessummary(object):
858 """summarize phase information on the remote side
851 """summarize phase information on the remote side
859
852
860 :publishing: True is the remote is publishing
853 :publishing: True is the remote is publishing
861 :publicheads: list of remote public phase heads (nodes)
854 :publicheads: list of remote public phase heads (nodes)
862 :draftheads: list of remote draft phase heads (nodes)
855 :draftheads: list of remote draft phase heads (nodes)
863 :draftroots: list of remote draft phase root (nodes)
856 :draftroots: list of remote draft phase root (nodes)
864 """
857 """
865
858
866 def __init__(self, repo, remotesubset, remoteroots):
859 def __init__(self, repo, remotesubset, remoteroots):
867 unfi = repo.unfiltered()
860 unfi = repo.unfiltered()
868 self._allremoteroots = remoteroots
861 self._allremoteroots = remoteroots
869
862
870 self.publishing = remoteroots.get(b'publishing', False)
863 self.publishing = remoteroots.get(b'publishing', False)
871
864
872 ana = analyzeremotephases(repo, remotesubset, remoteroots)
865 ana = analyzeremotephases(repo, remotesubset, remoteroots)
873 self.publicheads, self.draftroots = ana
866 self.publicheads, self.draftroots = ana
874 # Get the list of all "heads" revs draft on remote
867 # Get the list of all "heads" revs draft on remote
875 dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
868 dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
876 self.draftheads = [c.node() for c in dheads]
869 self.draftheads = [c.node() for c in dheads]
877
870
878
871
879 def newheads(repo, heads, roots):
872 def newheads(repo, heads, roots):
880 """compute new head of a subset minus another
873 """compute new head of a subset minus another
881
874
882 * `heads`: define the first subset
875 * `heads`: define the first subset
883 * `roots`: define the second we subtract from the first"""
876 * `roots`: define the second we subtract from the first"""
884 # prevent an import cycle
877 # prevent an import cycle
885 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
878 # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
886 from . import dagop
879 from . import dagop
887
880
888 repo = repo.unfiltered()
881 repo = repo.unfiltered()
889 cl = repo.changelog
882 cl = repo.changelog
890 rev = cl.index.get_rev
883 rev = cl.index.get_rev
891 if not roots:
884 if not roots:
892 return heads
885 return heads
893 if not heads or heads == [nullid]:
886 if not heads or heads == [nullid]:
894 return []
887 return []
895 # The logic operated on revisions, convert arguments early for convenience
888 # The logic operated on revisions, convert arguments early for convenience
896 new_heads = {rev(n) for n in heads if n != nullid}
889 new_heads = {rev(n) for n in heads if n != nullid}
897 roots = [rev(n) for n in roots]
890 roots = [rev(n) for n in roots]
898 # compute the area we need to remove
891 # compute the area we need to remove
899 affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
892 affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
900 # heads in the area are no longer heads
893 # heads in the area are no longer heads
901 new_heads.difference_update(affected_zone)
894 new_heads.difference_update(affected_zone)
902 # revisions in the area have children outside of it,
895 # revisions in the area have children outside of it,
903 # They might be new heads
896 # They might be new heads
904 candidates = repo.revs(
897 candidates = repo.revs(
905 b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone
898 b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone
906 )
899 )
907 candidates -= affected_zone
900 candidates -= affected_zone
908 if new_heads or candidates:
901 if new_heads or candidates:
909 # remove candidate that are ancestors of other heads
902 # remove candidate that are ancestors of other heads
910 new_heads.update(candidates)
903 new_heads.update(candidates)
911 prunestart = repo.revs(b"parents(%ld) and not null", new_heads)
904 prunestart = repo.revs(b"parents(%ld) and not null", new_heads)
912 pruned = dagop.reachableroots(repo, candidates, prunestart)
905 pruned = dagop.reachableroots(repo, candidates, prunestart)
913 new_heads.difference_update(pruned)
906 new_heads.difference_update(pruned)
914
907
915 return pycompat.maplist(cl.node, sorted(new_heads))
908 return pycompat.maplist(cl.node, sorted(new_heads))
916
909
917
910
918 def newcommitphase(ui):
911 def newcommitphase(ui):
919 """helper to get the target phase of new commit
912 """helper to get the target phase of new commit
920
913
921 Handle all possible values for the phases.new-commit options.
914 Handle all possible values for the phases.new-commit options.
922
915
923 """
916 """
924 v = ui.config(b'phases', b'new-commit')
917 v = ui.config(b'phases', b'new-commit')
925 try:
918 try:
926 return phasenumber2[v]
919 return phasenumber2[v]
927 except KeyError:
920 except KeyError:
928 raise error.ConfigError(
921 raise error.ConfigError(
929 _(b"phases.new-commit: not a valid phase name ('%s')") % v
922 _(b"phases.new-commit: not a valid phase name ('%s')") % v
930 )
923 )
931
924
932
925
933 def hassecret(repo):
926 def hassecret(repo):
934 """utility function that check if a repo have any secret changeset."""
927 """utility function that check if a repo have any secret changeset."""
935 return bool(repo._phasecache.phaseroots[secret])
928 return bool(repo._phasecache.phaseroots[secret])
936
929
937
930
938 def preparehookargs(node, old, new):
931 def preparehookargs(node, old, new):
939 if old is None:
932 if old is None:
940 old = b''
933 old = b''
941 else:
934 else:
942 old = phasenames[old]
935 old = phasenames[old]
943 return {b'node': node, b'oldphase': old, b'phase': phasenames[new]}
936 return {b'node': node, b'oldphase': old, b'phase': phasenames[new]}
General Comments 0
You need to be logged in to leave comments. Login now