##// END OF EJS Templates
obsutil: move 'allsuccessors' to the new modules...
marmoute -
r33146:0a370b93 default
parent child Browse files
Show More
@@ -1,1512 +1,1513 b''
1 # rebase.py - rebasing feature for mercurial
1 # rebase.py - rebasing feature for mercurial
2 #
2 #
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to move sets of revisions to a different ancestor
8 '''command to move sets of revisions to a different ancestor
9
9
10 This extension lets you rebase changesets in an existing Mercurial
10 This extension lets you rebase changesets in an existing Mercurial
11 repository.
11 repository.
12
12
13 For more information:
13 For more information:
14 https://mercurial-scm.org/wiki/RebaseExtension
14 https://mercurial-scm.org/wiki/RebaseExtension
15 '''
15 '''
16
16
17 from __future__ import absolute_import
17 from __future__ import absolute_import
18
18
19 import errno
19 import errno
20 import os
20 import os
21
21
22 from mercurial.i18n import _
22 from mercurial.i18n import _
23 from mercurial.node import (
23 from mercurial.node import (
24 hex,
24 hex,
25 nullid,
25 nullid,
26 nullrev,
26 nullrev,
27 short,
27 short,
28 )
28 )
29 from mercurial import (
29 from mercurial import (
30 bookmarks,
30 bookmarks,
31 cmdutil,
31 cmdutil,
32 commands,
32 commands,
33 copies,
33 copies,
34 destutil,
34 destutil,
35 dirstateguard,
35 dirstateguard,
36 error,
36 error,
37 extensions,
37 extensions,
38 hg,
38 hg,
39 lock,
39 lock,
40 merge as mergemod,
40 merge as mergemod,
41 mergeutil,
41 mergeutil,
42 obsolete,
42 obsolete,
43 obsutil,
43 patch,
44 patch,
44 phases,
45 phases,
45 registrar,
46 registrar,
46 repair,
47 repair,
47 repoview,
48 repoview,
48 revset,
49 revset,
49 scmutil,
50 scmutil,
50 smartset,
51 smartset,
51 util,
52 util,
52 )
53 )
53
54
54 release = lock.release
55 release = lock.release
55 templateopts = cmdutil.templateopts
56 templateopts = cmdutil.templateopts
56
57
57 # The following constants are used throughout the rebase module. The ordering of
58 # The following constants are used throughout the rebase module. The ordering of
58 # their values must be maintained.
59 # their values must be maintained.
59
60
60 # Indicates that a revision needs to be rebased
61 # Indicates that a revision needs to be rebased
61 revtodo = -1
62 revtodo = -1
62 nullmerge = -2
63 nullmerge = -2
63 revignored = -3
64 revignored = -3
64 # successor in rebase destination
65 # successor in rebase destination
65 revprecursor = -4
66 revprecursor = -4
66 # plain prune (no successor)
67 # plain prune (no successor)
67 revpruned = -5
68 revpruned = -5
68 revskipped = (revignored, revprecursor, revpruned)
69 revskipped = (revignored, revprecursor, revpruned)
69
70
70 cmdtable = {}
71 cmdtable = {}
71 command = registrar.command(cmdtable)
72 command = registrar.command(cmdtable)
72 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
73 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
73 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
74 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
74 # be specifying the version(s) of Mercurial they are tested with, or
75 # be specifying the version(s) of Mercurial they are tested with, or
75 # leave the attribute unspecified.
76 # leave the attribute unspecified.
76 testedwith = 'ships-with-hg-core'
77 testedwith = 'ships-with-hg-core'
77
78
78 def _nothingtorebase():
79 def _nothingtorebase():
79 return 1
80 return 1
80
81
81 def _savegraft(ctx, extra):
82 def _savegraft(ctx, extra):
82 s = ctx.extra().get('source', None)
83 s = ctx.extra().get('source', None)
83 if s is not None:
84 if s is not None:
84 extra['source'] = s
85 extra['source'] = s
85 s = ctx.extra().get('intermediate-source', None)
86 s = ctx.extra().get('intermediate-source', None)
86 if s is not None:
87 if s is not None:
87 extra['intermediate-source'] = s
88 extra['intermediate-source'] = s
88
89
89 def _savebranch(ctx, extra):
90 def _savebranch(ctx, extra):
90 extra['branch'] = ctx.branch()
91 extra['branch'] = ctx.branch()
91
92
92 def _makeextrafn(copiers):
93 def _makeextrafn(copiers):
93 """make an extrafn out of the given copy-functions.
94 """make an extrafn out of the given copy-functions.
94
95
95 A copy function takes a context and an extra dict, and mutates the
96 A copy function takes a context and an extra dict, and mutates the
96 extra dict as needed based on the given context.
97 extra dict as needed based on the given context.
97 """
98 """
98 def extrafn(ctx, extra):
99 def extrafn(ctx, extra):
99 for c in copiers:
100 for c in copiers:
100 c(ctx, extra)
101 c(ctx, extra)
101 return extrafn
102 return extrafn
102
103
103 def _destrebase(repo, sourceset, destspace=None):
104 def _destrebase(repo, sourceset, destspace=None):
104 """small wrapper around destmerge to pass the right extra args
105 """small wrapper around destmerge to pass the right extra args
105
106
106 Please wrap destutil.destmerge instead."""
107 Please wrap destutil.destmerge instead."""
107 return destutil.destmerge(repo, action='rebase', sourceset=sourceset,
108 return destutil.destmerge(repo, action='rebase', sourceset=sourceset,
108 onheadcheck=False, destspace=destspace)
109 onheadcheck=False, destspace=destspace)
109
110
110 revsetpredicate = registrar.revsetpredicate()
111 revsetpredicate = registrar.revsetpredicate()
111
112
112 @revsetpredicate('_destrebase')
113 @revsetpredicate('_destrebase')
113 def _revsetdestrebase(repo, subset, x):
114 def _revsetdestrebase(repo, subset, x):
114 # ``_rebasedefaultdest()``
115 # ``_rebasedefaultdest()``
115
116
116 # default destination for rebase.
117 # default destination for rebase.
117 # # XXX: Currently private because I expect the signature to change.
118 # # XXX: Currently private because I expect the signature to change.
118 # # XXX: - bailing out in case of ambiguity vs returning all data.
119 # # XXX: - bailing out in case of ambiguity vs returning all data.
119 # i18n: "_rebasedefaultdest" is a keyword
120 # i18n: "_rebasedefaultdest" is a keyword
120 sourceset = None
121 sourceset = None
121 if x is not None:
122 if x is not None:
122 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
123 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
123 return subset & smartset.baseset([_destrebase(repo, sourceset)])
124 return subset & smartset.baseset([_destrebase(repo, sourceset)])
124
125
125 class rebaseruntime(object):
126 class rebaseruntime(object):
126 """This class is a container for rebase runtime state"""
127 """This class is a container for rebase runtime state"""
127 def __init__(self, repo, ui, opts=None):
128 def __init__(self, repo, ui, opts=None):
128 if opts is None:
129 if opts is None:
129 opts = {}
130 opts = {}
130
131
131 self.repo = repo
132 self.repo = repo
132 self.ui = ui
133 self.ui = ui
133 self.opts = opts
134 self.opts = opts
134 self.originalwd = None
135 self.originalwd = None
135 self.external = nullrev
136 self.external = nullrev
136 # Mapping between the old revision id and either what is the new rebased
137 # Mapping between the old revision id and either what is the new rebased
137 # revision or what needs to be done with the old revision. The state
138 # revision or what needs to be done with the old revision. The state
138 # dict will be what contains most of the rebase progress state.
139 # dict will be what contains most of the rebase progress state.
139 self.state = {}
140 self.state = {}
140 self.activebookmark = None
141 self.activebookmark = None
141 self.currentbookmarks = None
142 self.currentbookmarks = None
142 self.dest = None
143 self.dest = None
143 self.skipped = set()
144 self.skipped = set()
144 self.destancestors = set()
145 self.destancestors = set()
145
146
146 self.collapsef = opts.get('collapse', False)
147 self.collapsef = opts.get('collapse', False)
147 self.collapsemsg = cmdutil.logmessage(ui, opts)
148 self.collapsemsg = cmdutil.logmessage(ui, opts)
148 self.date = opts.get('date', None)
149 self.date = opts.get('date', None)
149
150
150 e = opts.get('extrafn') # internal, used by e.g. hgsubversion
151 e = opts.get('extrafn') # internal, used by e.g. hgsubversion
151 self.extrafns = [_savegraft]
152 self.extrafns = [_savegraft]
152 if e:
153 if e:
153 self.extrafns = [e]
154 self.extrafns = [e]
154
155
155 self.keepf = opts.get('keep', False)
156 self.keepf = opts.get('keep', False)
156 self.keepbranchesf = opts.get('keepbranches', False)
157 self.keepbranchesf = opts.get('keepbranches', False)
157 # keepopen is not meant for use on the command line, but by
158 # keepopen is not meant for use on the command line, but by
158 # other extensions
159 # other extensions
159 self.keepopen = opts.get('keepopen', False)
160 self.keepopen = opts.get('keepopen', False)
160 self.obsoletenotrebased = {}
161 self.obsoletenotrebased = {}
161
162
162 def storestatus(self, tr=None):
163 def storestatus(self, tr=None):
163 """Store the current status to allow recovery"""
164 """Store the current status to allow recovery"""
164 if tr:
165 if tr:
165 tr.addfilegenerator('rebasestate', ('rebasestate',),
166 tr.addfilegenerator('rebasestate', ('rebasestate',),
166 self._writestatus, location='plain')
167 self._writestatus, location='plain')
167 else:
168 else:
168 with self.repo.vfs("rebasestate", "w") as f:
169 with self.repo.vfs("rebasestate", "w") as f:
169 self._writestatus(f)
170 self._writestatus(f)
170
171
171 def _writestatus(self, f):
172 def _writestatus(self, f):
172 repo = self.repo.unfiltered()
173 repo = self.repo.unfiltered()
173 f.write(repo[self.originalwd].hex() + '\n')
174 f.write(repo[self.originalwd].hex() + '\n')
174 f.write(repo[self.dest].hex() + '\n')
175 f.write(repo[self.dest].hex() + '\n')
175 f.write(repo[self.external].hex() + '\n')
176 f.write(repo[self.external].hex() + '\n')
176 f.write('%d\n' % int(self.collapsef))
177 f.write('%d\n' % int(self.collapsef))
177 f.write('%d\n' % int(self.keepf))
178 f.write('%d\n' % int(self.keepf))
178 f.write('%d\n' % int(self.keepbranchesf))
179 f.write('%d\n' % int(self.keepbranchesf))
179 f.write('%s\n' % (self.activebookmark or ''))
180 f.write('%s\n' % (self.activebookmark or ''))
180 for d, v in self.state.iteritems():
181 for d, v in self.state.iteritems():
181 oldrev = repo[d].hex()
182 oldrev = repo[d].hex()
182 if v >= 0:
183 if v >= 0:
183 newrev = repo[v].hex()
184 newrev = repo[v].hex()
184 elif v == revtodo:
185 elif v == revtodo:
185 # To maintain format compatibility, we have to use nullid.
186 # To maintain format compatibility, we have to use nullid.
186 # Please do remove this special case when upgrading the format.
187 # Please do remove this special case when upgrading the format.
187 newrev = hex(nullid)
188 newrev = hex(nullid)
188 else:
189 else:
189 newrev = v
190 newrev = v
190 f.write("%s:%s\n" % (oldrev, newrev))
191 f.write("%s:%s\n" % (oldrev, newrev))
191 repo.ui.debug('rebase status stored\n')
192 repo.ui.debug('rebase status stored\n')
192
193
193 def restorestatus(self):
194 def restorestatus(self):
194 """Restore a previously stored status"""
195 """Restore a previously stored status"""
195 repo = self.repo
196 repo = self.repo
196 keepbranches = None
197 keepbranches = None
197 dest = None
198 dest = None
198 collapse = False
199 collapse = False
199 external = nullrev
200 external = nullrev
200 activebookmark = None
201 activebookmark = None
201 state = {}
202 state = {}
202
203
203 try:
204 try:
204 f = repo.vfs("rebasestate")
205 f = repo.vfs("rebasestate")
205 for i, l in enumerate(f.read().splitlines()):
206 for i, l in enumerate(f.read().splitlines()):
206 if i == 0:
207 if i == 0:
207 originalwd = repo[l].rev()
208 originalwd = repo[l].rev()
208 elif i == 1:
209 elif i == 1:
209 dest = repo[l].rev()
210 dest = repo[l].rev()
210 elif i == 2:
211 elif i == 2:
211 external = repo[l].rev()
212 external = repo[l].rev()
212 elif i == 3:
213 elif i == 3:
213 collapse = bool(int(l))
214 collapse = bool(int(l))
214 elif i == 4:
215 elif i == 4:
215 keep = bool(int(l))
216 keep = bool(int(l))
216 elif i == 5:
217 elif i == 5:
217 keepbranches = bool(int(l))
218 keepbranches = bool(int(l))
218 elif i == 6 and not (len(l) == 81 and ':' in l):
219 elif i == 6 and not (len(l) == 81 and ':' in l):
219 # line 6 is a recent addition, so for backwards
220 # line 6 is a recent addition, so for backwards
220 # compatibility check that the line doesn't look like the
221 # compatibility check that the line doesn't look like the
221 # oldrev:newrev lines
222 # oldrev:newrev lines
222 activebookmark = l
223 activebookmark = l
223 else:
224 else:
224 oldrev, newrev = l.split(':')
225 oldrev, newrev = l.split(':')
225 if newrev in (str(nullmerge), str(revignored),
226 if newrev in (str(nullmerge), str(revignored),
226 str(revprecursor), str(revpruned)):
227 str(revprecursor), str(revpruned)):
227 state[repo[oldrev].rev()] = int(newrev)
228 state[repo[oldrev].rev()] = int(newrev)
228 elif newrev == nullid:
229 elif newrev == nullid:
229 state[repo[oldrev].rev()] = revtodo
230 state[repo[oldrev].rev()] = revtodo
230 # Legacy compat special case
231 # Legacy compat special case
231 else:
232 else:
232 state[repo[oldrev].rev()] = repo[newrev].rev()
233 state[repo[oldrev].rev()] = repo[newrev].rev()
233
234
234 except IOError as err:
235 except IOError as err:
235 if err.errno != errno.ENOENT:
236 if err.errno != errno.ENOENT:
236 raise
237 raise
237 cmdutil.wrongtooltocontinue(repo, _('rebase'))
238 cmdutil.wrongtooltocontinue(repo, _('rebase'))
238
239
239 if keepbranches is None:
240 if keepbranches is None:
240 raise error.Abort(_('.hg/rebasestate is incomplete'))
241 raise error.Abort(_('.hg/rebasestate is incomplete'))
241
242
242 skipped = set()
243 skipped = set()
243 # recompute the set of skipped revs
244 # recompute the set of skipped revs
244 if not collapse:
245 if not collapse:
245 seen = {dest}
246 seen = {dest}
246 for old, new in sorted(state.items()):
247 for old, new in sorted(state.items()):
247 if new != revtodo and new in seen:
248 if new != revtodo and new in seen:
248 skipped.add(old)
249 skipped.add(old)
249 seen.add(new)
250 seen.add(new)
250 repo.ui.debug('computed skipped revs: %s\n' %
251 repo.ui.debug('computed skipped revs: %s\n' %
251 (' '.join(str(r) for r in sorted(skipped)) or None))
252 (' '.join(str(r) for r in sorted(skipped)) or None))
252 repo.ui.debug('rebase status resumed\n')
253 repo.ui.debug('rebase status resumed\n')
253 _setrebasesetvisibility(repo, set(state.keys()) | {originalwd})
254 _setrebasesetvisibility(repo, set(state.keys()) | {originalwd})
254
255
255 self.originalwd = originalwd
256 self.originalwd = originalwd
256 self.dest = dest
257 self.dest = dest
257 self.state = state
258 self.state = state
258 self.skipped = skipped
259 self.skipped = skipped
259 self.collapsef = collapse
260 self.collapsef = collapse
260 self.keepf = keep
261 self.keepf = keep
261 self.keepbranchesf = keepbranches
262 self.keepbranchesf = keepbranches
262 self.external = external
263 self.external = external
263 self.activebookmark = activebookmark
264 self.activebookmark = activebookmark
264
265
265 def _handleskippingobsolete(self, rebaserevs, obsoleterevs, dest):
266 def _handleskippingobsolete(self, rebaserevs, obsoleterevs, dest):
266 """Compute structures necessary for skipping obsolete revisions
267 """Compute structures necessary for skipping obsolete revisions
267
268
268 rebaserevs: iterable of all revisions that are to be rebased
269 rebaserevs: iterable of all revisions that are to be rebased
269 obsoleterevs: iterable of all obsolete revisions in rebaseset
270 obsoleterevs: iterable of all obsolete revisions in rebaseset
270 dest: a destination revision for the rebase operation
271 dest: a destination revision for the rebase operation
271 """
272 """
272 self.obsoletenotrebased = {}
273 self.obsoletenotrebased = {}
273 if not self.ui.configbool('experimental', 'rebaseskipobsolete',
274 if not self.ui.configbool('experimental', 'rebaseskipobsolete',
274 default=True):
275 default=True):
275 return
276 return
276 rebaseset = set(rebaserevs)
277 rebaseset = set(rebaserevs)
277 obsoleteset = set(obsoleterevs)
278 obsoleteset = set(obsoleterevs)
278 self.obsoletenotrebased = _computeobsoletenotrebased(self.repo,
279 self.obsoletenotrebased = _computeobsoletenotrebased(self.repo,
279 obsoleteset, dest)
280 obsoleteset, dest)
280 skippedset = set(self.obsoletenotrebased)
281 skippedset = set(self.obsoletenotrebased)
281 _checkobsrebase(self.repo, self.ui, obsoleteset, rebaseset, skippedset)
282 _checkobsrebase(self.repo, self.ui, obsoleteset, rebaseset, skippedset)
282
283
283 def _prepareabortorcontinue(self, isabort):
284 def _prepareabortorcontinue(self, isabort):
284 try:
285 try:
285 self.restorestatus()
286 self.restorestatus()
286 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
287 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
287 except error.RepoLookupError:
288 except error.RepoLookupError:
288 if isabort:
289 if isabort:
289 clearstatus(self.repo)
290 clearstatus(self.repo)
290 clearcollapsemsg(self.repo)
291 clearcollapsemsg(self.repo)
291 self.repo.ui.warn(_('rebase aborted (no revision is removed,'
292 self.repo.ui.warn(_('rebase aborted (no revision is removed,'
292 ' only broken state is cleared)\n'))
293 ' only broken state is cleared)\n'))
293 return 0
294 return 0
294 else:
295 else:
295 msg = _('cannot continue inconsistent rebase')
296 msg = _('cannot continue inconsistent rebase')
296 hint = _('use "hg rebase --abort" to clear broken state')
297 hint = _('use "hg rebase --abort" to clear broken state')
297 raise error.Abort(msg, hint=hint)
298 raise error.Abort(msg, hint=hint)
298 if isabort:
299 if isabort:
299 return abort(self.repo, self.originalwd, self.dest,
300 return abort(self.repo, self.originalwd, self.dest,
300 self.state, activebookmark=self.activebookmark)
301 self.state, activebookmark=self.activebookmark)
301
302
302 obsrevs = (r for r, st in self.state.items() if st == revprecursor)
303 obsrevs = (r for r, st in self.state.items() if st == revprecursor)
303 self._handleskippingobsolete(self.state.keys(), obsrevs, self.dest)
304 self._handleskippingobsolete(self.state.keys(), obsrevs, self.dest)
304
305
305 def _preparenewrebase(self, dest, rebaseset):
306 def _preparenewrebase(self, dest, rebaseset):
306 if dest is None:
307 if dest is None:
307 return _nothingtorebase()
308 return _nothingtorebase()
308
309
309 allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt)
310 allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt)
310 if (not (self.keepf or allowunstable)
311 if (not (self.keepf or allowunstable)
311 and self.repo.revs('first(children(%ld) - %ld)',
312 and self.repo.revs('first(children(%ld) - %ld)',
312 rebaseset, rebaseset)):
313 rebaseset, rebaseset)):
313 raise error.Abort(
314 raise error.Abort(
314 _("can't remove original changesets with"
315 _("can't remove original changesets with"
315 " unrebased descendants"),
316 " unrebased descendants"),
316 hint=_('use --keep to keep original changesets'))
317 hint=_('use --keep to keep original changesets'))
317
318
318 obsrevs = _filterobsoleterevs(self.repo, set(rebaseset))
319 obsrevs = _filterobsoleterevs(self.repo, set(rebaseset))
319 self._handleskippingobsolete(rebaseset, obsrevs, dest)
320 self._handleskippingobsolete(rebaseset, obsrevs, dest)
320
321
321 result = buildstate(self.repo, dest, rebaseset, self.collapsef,
322 result = buildstate(self.repo, dest, rebaseset, self.collapsef,
322 self.obsoletenotrebased)
323 self.obsoletenotrebased)
323
324
324 if not result:
325 if not result:
325 # Empty state built, nothing to rebase
326 # Empty state built, nothing to rebase
326 self.ui.status(_('nothing to rebase\n'))
327 self.ui.status(_('nothing to rebase\n'))
327 return _nothingtorebase()
328 return _nothingtorebase()
328
329
329 for root in self.repo.set('roots(%ld)', rebaseset):
330 for root in self.repo.set('roots(%ld)', rebaseset):
330 if not self.keepf and not root.mutable():
331 if not self.keepf and not root.mutable():
331 raise error.Abort(_("can't rebase public changeset %s")
332 raise error.Abort(_("can't rebase public changeset %s")
332 % root,
333 % root,
333 hint=_("see 'hg help phases' for details"))
334 hint=_("see 'hg help phases' for details"))
334
335
335 (self.originalwd, self.dest, self.state) = result
336 (self.originalwd, self.dest, self.state) = result
336 if self.collapsef:
337 if self.collapsef:
337 self.destancestors = self.repo.changelog.ancestors(
338 self.destancestors = self.repo.changelog.ancestors(
338 [self.dest],
339 [self.dest],
339 inclusive=True)
340 inclusive=True)
340 self.external = externalparent(self.repo, self.state,
341 self.external = externalparent(self.repo, self.state,
341 self.destancestors)
342 self.destancestors)
342
343
343 if dest.closesbranch() and not self.keepbranchesf:
344 if dest.closesbranch() and not self.keepbranchesf:
344 self.ui.status(_('reopening closed branch head %s\n') % dest)
345 self.ui.status(_('reopening closed branch head %s\n') % dest)
345
346
346 def _performrebase(self):
347 def _performrebase(self):
347 repo, ui, opts = self.repo, self.ui, self.opts
348 repo, ui, opts = self.repo, self.ui, self.opts
348 if self.keepbranchesf:
349 if self.keepbranchesf:
349 # insert _savebranch at the start of extrafns so if
350 # insert _savebranch at the start of extrafns so if
350 # there's a user-provided extrafn it can clobber branch if
351 # there's a user-provided extrafn it can clobber branch if
351 # desired
352 # desired
352 self.extrafns.insert(0, _savebranch)
353 self.extrafns.insert(0, _savebranch)
353 if self.collapsef:
354 if self.collapsef:
354 branches = set()
355 branches = set()
355 for rev in self.state:
356 for rev in self.state:
356 branches.add(repo[rev].branch())
357 branches.add(repo[rev].branch())
357 if len(branches) > 1:
358 if len(branches) > 1:
358 raise error.Abort(_('cannot collapse multiple named '
359 raise error.Abort(_('cannot collapse multiple named '
359 'branches'))
360 'branches'))
360
361
361 # Rebase
362 # Rebase
362 if not self.destancestors:
363 if not self.destancestors:
363 self.destancestors = repo.changelog.ancestors([self.dest],
364 self.destancestors = repo.changelog.ancestors([self.dest],
364 inclusive=True)
365 inclusive=True)
365
366
366 # Keep track of the current bookmarks in order to reset them later
367 # Keep track of the current bookmarks in order to reset them later
367 self.currentbookmarks = repo._bookmarks.copy()
368 self.currentbookmarks = repo._bookmarks.copy()
368 self.activebookmark = self.activebookmark or repo._activebookmark
369 self.activebookmark = self.activebookmark or repo._activebookmark
369 if self.activebookmark:
370 if self.activebookmark:
370 bookmarks.deactivate(repo)
371 bookmarks.deactivate(repo)
371
372
372 # Store the state before we begin so users can run 'hg rebase --abort'
373 # Store the state before we begin so users can run 'hg rebase --abort'
373 # if we fail before the transaction closes.
374 # if we fail before the transaction closes.
374 self.storestatus()
375 self.storestatus()
375
376
376 sortedrevs = repo.revs('sort(%ld, -topo)', self.state)
377 sortedrevs = repo.revs('sort(%ld, -topo)', self.state)
377 cands = [k for k, v in self.state.iteritems() if v == revtodo]
378 cands = [k for k, v in self.state.iteritems() if v == revtodo]
378 total = len(cands)
379 total = len(cands)
379 pos = 0
380 pos = 0
380 for rev in sortedrevs:
381 for rev in sortedrevs:
381 ctx = repo[rev]
382 ctx = repo[rev]
382 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
383 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
383 ctx.description().split('\n', 1)[0])
384 ctx.description().split('\n', 1)[0])
384 names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node())
385 names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node())
385 if names:
386 if names:
386 desc += ' (%s)' % ' '.join(names)
387 desc += ' (%s)' % ' '.join(names)
387 if self.state[rev] == rev:
388 if self.state[rev] == rev:
388 ui.status(_('already rebased %s\n') % desc)
389 ui.status(_('already rebased %s\n') % desc)
389 elif self.state[rev] == revtodo:
390 elif self.state[rev] == revtodo:
390 pos += 1
391 pos += 1
391 ui.status(_('rebasing %s\n') % desc)
392 ui.status(_('rebasing %s\n') % desc)
392 ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)),
393 ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)),
393 _('changesets'), total)
394 _('changesets'), total)
394 p1, p2, base = defineparents(repo, rev, self.dest,
395 p1, p2, base = defineparents(repo, rev, self.dest,
395 self.state,
396 self.state,
396 self.destancestors,
397 self.destancestors,
397 self.obsoletenotrebased)
398 self.obsoletenotrebased)
398 self.storestatus()
399 self.storestatus()
399 storecollapsemsg(repo, self.collapsemsg)
400 storecollapsemsg(repo, self.collapsemsg)
400 if len(repo[None].parents()) == 2:
401 if len(repo[None].parents()) == 2:
401 repo.ui.debug('resuming interrupted rebase\n')
402 repo.ui.debug('resuming interrupted rebase\n')
402 else:
403 else:
403 try:
404 try:
404 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
405 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
405 'rebase')
406 'rebase')
406 stats = rebasenode(repo, rev, p1, base, self.state,
407 stats = rebasenode(repo, rev, p1, base, self.state,
407 self.collapsef, self.dest)
408 self.collapsef, self.dest)
408 if stats and stats[3] > 0:
409 if stats and stats[3] > 0:
409 raise error.InterventionRequired(
410 raise error.InterventionRequired(
410 _('unresolved conflicts (see hg '
411 _('unresolved conflicts (see hg '
411 'resolve, then hg rebase --continue)'))
412 'resolve, then hg rebase --continue)'))
412 finally:
413 finally:
413 ui.setconfig('ui', 'forcemerge', '', 'rebase')
414 ui.setconfig('ui', 'forcemerge', '', 'rebase')
414 if not self.collapsef:
415 if not self.collapsef:
415 merging = p2 != nullrev
416 merging = p2 != nullrev
416 editform = cmdutil.mergeeditform(merging, 'rebase')
417 editform = cmdutil.mergeeditform(merging, 'rebase')
417 editor = cmdutil.getcommiteditor(editform=editform, **opts)
418 editor = cmdutil.getcommiteditor(editform=editform, **opts)
418 newnode = concludenode(repo, rev, p1, p2,
419 newnode = concludenode(repo, rev, p1, p2,
419 extrafn=_makeextrafn(self.extrafns),
420 extrafn=_makeextrafn(self.extrafns),
420 editor=editor,
421 editor=editor,
421 keepbranches=self.keepbranchesf,
422 keepbranches=self.keepbranchesf,
422 date=self.date)
423 date=self.date)
423 if newnode is None:
424 if newnode is None:
424 # If it ended up being a no-op commit, then the normal
425 # If it ended up being a no-op commit, then the normal
425 # merge state clean-up path doesn't happen, so do it
426 # merge state clean-up path doesn't happen, so do it
426 # here. Fix issue5494
427 # here. Fix issue5494
427 mergemod.mergestate.clean(repo)
428 mergemod.mergestate.clean(repo)
428 else:
429 else:
429 # Skip commit if we are collapsing
430 # Skip commit if we are collapsing
430 repo.setparents(repo[p1].node())
431 repo.setparents(repo[p1].node())
431 newnode = None
432 newnode = None
432 # Update the state
433 # Update the state
433 if newnode is not None:
434 if newnode is not None:
434 self.state[rev] = repo[newnode].rev()
435 self.state[rev] = repo[newnode].rev()
435 ui.debug('rebased as %s\n' % short(newnode))
436 ui.debug('rebased as %s\n' % short(newnode))
436 else:
437 else:
437 if not self.collapsef:
438 if not self.collapsef:
438 ui.warn(_('note: rebase of %d:%s created no changes '
439 ui.warn(_('note: rebase of %d:%s created no changes '
439 'to commit\n') % (rev, ctx))
440 'to commit\n') % (rev, ctx))
440 self.skipped.add(rev)
441 self.skipped.add(rev)
441 self.state[rev] = p1
442 self.state[rev] = p1
442 ui.debug('next revision set to %s\n' % p1)
443 ui.debug('next revision set to %s\n' % p1)
443 elif self.state[rev] == nullmerge:
444 elif self.state[rev] == nullmerge:
444 ui.debug('ignoring null merge rebase of %s\n' % rev)
445 ui.debug('ignoring null merge rebase of %s\n' % rev)
445 elif self.state[rev] == revignored:
446 elif self.state[rev] == revignored:
446 ui.status(_('not rebasing ignored %s\n') % desc)
447 ui.status(_('not rebasing ignored %s\n') % desc)
447 elif self.state[rev] == revprecursor:
448 elif self.state[rev] == revprecursor:
448 destctx = repo[self.obsoletenotrebased[rev]]
449 destctx = repo[self.obsoletenotrebased[rev]]
449 descdest = '%d:%s "%s"' % (destctx.rev(), destctx,
450 descdest = '%d:%s "%s"' % (destctx.rev(), destctx,
450 destctx.description().split('\n', 1)[0])
451 destctx.description().split('\n', 1)[0])
451 msg = _('note: not rebasing %s, already in destination as %s\n')
452 msg = _('note: not rebasing %s, already in destination as %s\n')
452 ui.status(msg % (desc, descdest))
453 ui.status(msg % (desc, descdest))
453 elif self.state[rev] == revpruned:
454 elif self.state[rev] == revpruned:
454 msg = _('note: not rebasing %s, it has no successor\n')
455 msg = _('note: not rebasing %s, it has no successor\n')
455 ui.status(msg % desc)
456 ui.status(msg % desc)
456 else:
457 else:
457 ui.status(_('already rebased %s as %s\n') %
458 ui.status(_('already rebased %s as %s\n') %
458 (desc, repo[self.state[rev]]))
459 (desc, repo[self.state[rev]]))
459
460
460 ui.progress(_('rebasing'), None)
461 ui.progress(_('rebasing'), None)
461 ui.note(_('rebase merging completed\n'))
462 ui.note(_('rebase merging completed\n'))
462
463
463 def _finishrebase(self):
464 def _finishrebase(self):
464 repo, ui, opts = self.repo, self.ui, self.opts
465 repo, ui, opts = self.repo, self.ui, self.opts
465 if self.collapsef and not self.keepopen:
466 if self.collapsef and not self.keepopen:
466 p1, p2, _base = defineparents(repo, min(self.state),
467 p1, p2, _base = defineparents(repo, min(self.state),
467 self.dest, self.state,
468 self.dest, self.state,
468 self.destancestors,
469 self.destancestors,
469 self.obsoletenotrebased)
470 self.obsoletenotrebased)
470 editopt = opts.get('edit')
471 editopt = opts.get('edit')
471 editform = 'rebase.collapse'
472 editform = 'rebase.collapse'
472 if self.collapsemsg:
473 if self.collapsemsg:
473 commitmsg = self.collapsemsg
474 commitmsg = self.collapsemsg
474 else:
475 else:
475 commitmsg = 'Collapsed revision'
476 commitmsg = 'Collapsed revision'
476 for rebased in self.state:
477 for rebased in self.state:
477 if rebased not in self.skipped and\
478 if rebased not in self.skipped and\
478 self.state[rebased] > nullmerge:
479 self.state[rebased] > nullmerge:
479 commitmsg += '\n* %s' % repo[rebased].description()
480 commitmsg += '\n* %s' % repo[rebased].description()
480 editopt = True
481 editopt = True
481 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
482 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
482 revtoreuse = max(self.state)
483 revtoreuse = max(self.state)
483 newnode = concludenode(repo, revtoreuse, p1, self.external,
484 newnode = concludenode(repo, revtoreuse, p1, self.external,
484 commitmsg=commitmsg,
485 commitmsg=commitmsg,
485 extrafn=_makeextrafn(self.extrafns),
486 extrafn=_makeextrafn(self.extrafns),
486 editor=editor,
487 editor=editor,
487 keepbranches=self.keepbranchesf,
488 keepbranches=self.keepbranchesf,
488 date=self.date)
489 date=self.date)
489 if newnode is None:
490 if newnode is None:
490 newrev = self.dest
491 newrev = self.dest
491 else:
492 else:
492 newrev = repo[newnode].rev()
493 newrev = repo[newnode].rev()
493 for oldrev in self.state.iterkeys():
494 for oldrev in self.state.iterkeys():
494 if self.state[oldrev] > nullmerge:
495 if self.state[oldrev] > nullmerge:
495 self.state[oldrev] = newrev
496 self.state[oldrev] = newrev
496
497
497 if 'qtip' in repo.tags():
498 if 'qtip' in repo.tags():
498 updatemq(repo, self.state, self.skipped, **opts)
499 updatemq(repo, self.state, self.skipped, **opts)
499
500
500 if self.currentbookmarks:
501 if self.currentbookmarks:
501 # Nodeids are needed to reset bookmarks
502 # Nodeids are needed to reset bookmarks
502 nstate = {}
503 nstate = {}
503 for k, v in self.state.iteritems():
504 for k, v in self.state.iteritems():
504 if v > nullmerge and v != k:
505 if v > nullmerge and v != k:
505 nstate[repo[k].node()] = repo[v].node()
506 nstate[repo[k].node()] = repo[v].node()
506 elif v == revprecursor:
507 elif v == revprecursor:
507 succ = self.obsoletenotrebased[k]
508 succ = self.obsoletenotrebased[k]
508 nstate[repo[k].node()] = repo[succ].node()
509 nstate[repo[k].node()] = repo[succ].node()
509 # XXX this is the same as dest.node() for the non-continue path --
510 # XXX this is the same as dest.node() for the non-continue path --
510 # this should probably be cleaned up
511 # this should probably be cleaned up
511 destnode = repo[self.dest].node()
512 destnode = repo[self.dest].node()
512
513
513 # restore original working directory
514 # restore original working directory
514 # (we do this before stripping)
515 # (we do this before stripping)
515 newwd = self.state.get(self.originalwd, self.originalwd)
516 newwd = self.state.get(self.originalwd, self.originalwd)
516 if newwd == revprecursor:
517 if newwd == revprecursor:
517 newwd = self.obsoletenotrebased[self.originalwd]
518 newwd = self.obsoletenotrebased[self.originalwd]
518 elif newwd < 0:
519 elif newwd < 0:
519 # original directory is a parent of rebase set root or ignored
520 # original directory is a parent of rebase set root or ignored
520 newwd = self.originalwd
521 newwd = self.originalwd
521 if newwd not in [c.rev() for c in repo[None].parents()]:
522 if newwd not in [c.rev() for c in repo[None].parents()]:
522 ui.note(_("update back to initial working directory parent\n"))
523 ui.note(_("update back to initial working directory parent\n"))
523 hg.updaterepo(repo, newwd, False)
524 hg.updaterepo(repo, newwd, False)
524
525
525 if self.currentbookmarks:
526 if self.currentbookmarks:
526 with repo.transaction('bookmark') as tr:
527 with repo.transaction('bookmark') as tr:
527 updatebookmarks(repo, destnode, nstate,
528 updatebookmarks(repo, destnode, nstate,
528 self.currentbookmarks, tr)
529 self.currentbookmarks, tr)
529 if self.activebookmark not in repo._bookmarks:
530 if self.activebookmark not in repo._bookmarks:
530 # active bookmark was divergent one and has been deleted
531 # active bookmark was divergent one and has been deleted
531 self.activebookmark = None
532 self.activebookmark = None
532
533
533 if not self.keepf:
534 if not self.keepf:
534 collapsedas = None
535 collapsedas = None
535 if self.collapsef:
536 if self.collapsef:
536 collapsedas = newnode
537 collapsedas = newnode
537 clearrebased(ui, repo, self.state, self.skipped, collapsedas)
538 clearrebased(ui, repo, self.state, self.skipped, collapsedas)
538
539
539 clearstatus(repo)
540 clearstatus(repo)
540 clearcollapsemsg(repo)
541 clearcollapsemsg(repo)
541
542
542 ui.note(_("rebase completed\n"))
543 ui.note(_("rebase completed\n"))
543 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
544 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
544 if self.skipped:
545 if self.skipped:
545 skippedlen = len(self.skipped)
546 skippedlen = len(self.skipped)
546 ui.note(_("%d revisions have been skipped\n") % skippedlen)
547 ui.note(_("%d revisions have been skipped\n") % skippedlen)
547
548
548 if (self.activebookmark and
549 if (self.activebookmark and
549 repo['.'].node() == repo._bookmarks[self.activebookmark]):
550 repo['.'].node() == repo._bookmarks[self.activebookmark]):
550 bookmarks.activate(repo, self.activebookmark)
551 bookmarks.activate(repo, self.activebookmark)
551
552
552 @command('rebase',
553 @command('rebase',
553 [('s', 'source', '',
554 [('s', 'source', '',
554 _('rebase the specified changeset and descendants'), _('REV')),
555 _('rebase the specified changeset and descendants'), _('REV')),
555 ('b', 'base', '',
556 ('b', 'base', '',
556 _('rebase everything from branching point of specified changeset'),
557 _('rebase everything from branching point of specified changeset'),
557 _('REV')),
558 _('REV')),
558 ('r', 'rev', [],
559 ('r', 'rev', [],
559 _('rebase these revisions'),
560 _('rebase these revisions'),
560 _('REV')),
561 _('REV')),
561 ('d', 'dest', '',
562 ('d', 'dest', '',
562 _('rebase onto the specified changeset'), _('REV')),
563 _('rebase onto the specified changeset'), _('REV')),
563 ('', 'collapse', False, _('collapse the rebased changesets')),
564 ('', 'collapse', False, _('collapse the rebased changesets')),
564 ('m', 'message', '',
565 ('m', 'message', '',
565 _('use text as collapse commit message'), _('TEXT')),
566 _('use text as collapse commit message'), _('TEXT')),
566 ('e', 'edit', False, _('invoke editor on commit messages')),
567 ('e', 'edit', False, _('invoke editor on commit messages')),
567 ('l', 'logfile', '',
568 ('l', 'logfile', '',
568 _('read collapse commit message from file'), _('FILE')),
569 _('read collapse commit message from file'), _('FILE')),
569 ('k', 'keep', False, _('keep original changesets')),
570 ('k', 'keep', False, _('keep original changesets')),
570 ('', 'keepbranches', False, _('keep original branch names')),
571 ('', 'keepbranches', False, _('keep original branch names')),
571 ('D', 'detach', False, _('(DEPRECATED)')),
572 ('D', 'detach', False, _('(DEPRECATED)')),
572 ('i', 'interactive', False, _('(DEPRECATED)')),
573 ('i', 'interactive', False, _('(DEPRECATED)')),
573 ('t', 'tool', '', _('specify merge tool')),
574 ('t', 'tool', '', _('specify merge tool')),
574 ('c', 'continue', False, _('continue an interrupted rebase')),
575 ('c', 'continue', False, _('continue an interrupted rebase')),
575 ('a', 'abort', False, _('abort an interrupted rebase'))] +
576 ('a', 'abort', False, _('abort an interrupted rebase'))] +
576 templateopts,
577 templateopts,
577 _('[-s REV | -b REV] [-d REV] [OPTION]'))
578 _('[-s REV | -b REV] [-d REV] [OPTION]'))
578 def rebase(ui, repo, **opts):
579 def rebase(ui, repo, **opts):
579 """move changeset (and descendants) to a different branch
580 """move changeset (and descendants) to a different branch
580
581
581 Rebase uses repeated merging to graft changesets from one part of
582 Rebase uses repeated merging to graft changesets from one part of
582 history (the source) onto another (the destination). This can be
583 history (the source) onto another (the destination). This can be
583 useful for linearizing *local* changes relative to a master
584 useful for linearizing *local* changes relative to a master
584 development tree.
585 development tree.
585
586
586 Published commits cannot be rebased (see :hg:`help phases`).
587 Published commits cannot be rebased (see :hg:`help phases`).
587 To copy commits, see :hg:`help graft`.
588 To copy commits, see :hg:`help graft`.
588
589
589 If you don't specify a destination changeset (``-d/--dest``), rebase
590 If you don't specify a destination changeset (``-d/--dest``), rebase
590 will use the same logic as :hg:`merge` to pick a destination. if
591 will use the same logic as :hg:`merge` to pick a destination. if
591 the current branch contains exactly one other head, the other head
592 the current branch contains exactly one other head, the other head
592 is merged with by default. Otherwise, an explicit revision with
593 is merged with by default. Otherwise, an explicit revision with
593 which to merge with must be provided. (destination changeset is not
594 which to merge with must be provided. (destination changeset is not
594 modified by rebasing, but new changesets are added as its
595 modified by rebasing, but new changesets are added as its
595 descendants.)
596 descendants.)
596
597
597 Here are the ways to select changesets:
598 Here are the ways to select changesets:
598
599
599 1. Explicitly select them using ``--rev``.
600 1. Explicitly select them using ``--rev``.
600
601
601 2. Use ``--source`` to select a root changeset and include all of its
602 2. Use ``--source`` to select a root changeset and include all of its
602 descendants.
603 descendants.
603
604
604 3. Use ``--base`` to select a changeset; rebase will find ancestors
605 3. Use ``--base`` to select a changeset; rebase will find ancestors
605 and their descendants which are not also ancestors of the destination.
606 and their descendants which are not also ancestors of the destination.
606
607
607 4. If you do not specify any of ``--rev``, ``source``, or ``--base``,
608 4. If you do not specify any of ``--rev``, ``source``, or ``--base``,
608 rebase will use ``--base .`` as above.
609 rebase will use ``--base .`` as above.
609
610
610 Rebase will destroy original changesets unless you use ``--keep``.
611 Rebase will destroy original changesets unless you use ``--keep``.
611 It will also move your bookmarks (even if you do).
612 It will also move your bookmarks (even if you do).
612
613
613 Some changesets may be dropped if they do not contribute changes
614 Some changesets may be dropped if they do not contribute changes
614 (e.g. merges from the destination branch).
615 (e.g. merges from the destination branch).
615
616
616 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
617 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
617 a named branch with two heads. You will need to explicitly specify source
618 a named branch with two heads. You will need to explicitly specify source
618 and/or destination.
619 and/or destination.
619
620
620 If you need to use a tool to automate merge/conflict decisions, you
621 If you need to use a tool to automate merge/conflict decisions, you
621 can specify one with ``--tool``, see :hg:`help merge-tools`.
622 can specify one with ``--tool``, see :hg:`help merge-tools`.
622 As a caveat: the tool will not be used to mediate when a file was
623 As a caveat: the tool will not be used to mediate when a file was
623 deleted, there is no hook presently available for this.
624 deleted, there is no hook presently available for this.
624
625
625 If a rebase is interrupted to manually resolve a conflict, it can be
626 If a rebase is interrupted to manually resolve a conflict, it can be
626 continued with --continue/-c or aborted with --abort/-a.
627 continued with --continue/-c or aborted with --abort/-a.
627
628
628 .. container:: verbose
629 .. container:: verbose
629
630
630 Examples:
631 Examples:
631
632
632 - move "local changes" (current commit back to branching point)
633 - move "local changes" (current commit back to branching point)
633 to the current branch tip after a pull::
634 to the current branch tip after a pull::
634
635
635 hg rebase
636 hg rebase
636
637
637 - move a single changeset to the stable branch::
638 - move a single changeset to the stable branch::
638
639
639 hg rebase -r 5f493448 -d stable
640 hg rebase -r 5f493448 -d stable
640
641
641 - splice a commit and all its descendants onto another part of history::
642 - splice a commit and all its descendants onto another part of history::
642
643
643 hg rebase --source c0c3 --dest 4cf9
644 hg rebase --source c0c3 --dest 4cf9
644
645
645 - rebase everything on a branch marked by a bookmark onto the
646 - rebase everything on a branch marked by a bookmark onto the
646 default branch::
647 default branch::
647
648
648 hg rebase --base myfeature --dest default
649 hg rebase --base myfeature --dest default
649
650
650 - collapse a sequence of changes into a single commit::
651 - collapse a sequence of changes into a single commit::
651
652
652 hg rebase --collapse -r 1520:1525 -d .
653 hg rebase --collapse -r 1520:1525 -d .
653
654
654 - move a named branch while preserving its name::
655 - move a named branch while preserving its name::
655
656
656 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
657 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
657
658
658 Configuration Options:
659 Configuration Options:
659
660
660 You can make rebase require a destination if you set the following config
661 You can make rebase require a destination if you set the following config
661 option::
662 option::
662
663
663 [commands]
664 [commands]
664 rebase.requiredest = True
665 rebase.requiredest = True
665
666
666 Return Values:
667 Return Values:
667
668
668 Returns 0 on success, 1 if nothing to rebase or there are
669 Returns 0 on success, 1 if nothing to rebase or there are
669 unresolved conflicts.
670 unresolved conflicts.
670
671
671 """
672 """
672 rbsrt = rebaseruntime(repo, ui, opts)
673 rbsrt = rebaseruntime(repo, ui, opts)
673
674
674 with repo.wlock(), repo.lock():
675 with repo.wlock(), repo.lock():
675 # Validate input and define rebasing points
676 # Validate input and define rebasing points
676 destf = opts.get('dest', None)
677 destf = opts.get('dest', None)
677 srcf = opts.get('source', None)
678 srcf = opts.get('source', None)
678 basef = opts.get('base', None)
679 basef = opts.get('base', None)
679 revf = opts.get('rev', [])
680 revf = opts.get('rev', [])
680 # search default destination in this space
681 # search default destination in this space
681 # used in the 'hg pull --rebase' case, see issue 5214.
682 # used in the 'hg pull --rebase' case, see issue 5214.
682 destspace = opts.get('_destspace')
683 destspace = opts.get('_destspace')
683 contf = opts.get('continue')
684 contf = opts.get('continue')
684 abortf = opts.get('abort')
685 abortf = opts.get('abort')
685 if opts.get('interactive'):
686 if opts.get('interactive'):
686 try:
687 try:
687 if extensions.find('histedit'):
688 if extensions.find('histedit'):
688 enablehistedit = ''
689 enablehistedit = ''
689 except KeyError:
690 except KeyError:
690 enablehistedit = " --config extensions.histedit="
691 enablehistedit = " --config extensions.histedit="
691 help = "hg%s help -e histedit" % enablehistedit
692 help = "hg%s help -e histedit" % enablehistedit
692 msg = _("interactive history editing is supported by the "
693 msg = _("interactive history editing is supported by the "
693 "'histedit' extension (see \"%s\")") % help
694 "'histedit' extension (see \"%s\")") % help
694 raise error.Abort(msg)
695 raise error.Abort(msg)
695
696
696 if rbsrt.collapsemsg and not rbsrt.collapsef:
697 if rbsrt.collapsemsg and not rbsrt.collapsef:
697 raise error.Abort(
698 raise error.Abort(
698 _('message can only be specified with collapse'))
699 _('message can only be specified with collapse'))
699
700
700 if contf or abortf:
701 if contf or abortf:
701 if contf and abortf:
702 if contf and abortf:
702 raise error.Abort(_('cannot use both abort and continue'))
703 raise error.Abort(_('cannot use both abort and continue'))
703 if rbsrt.collapsef:
704 if rbsrt.collapsef:
704 raise error.Abort(
705 raise error.Abort(
705 _('cannot use collapse with continue or abort'))
706 _('cannot use collapse with continue or abort'))
706 if srcf or basef or destf:
707 if srcf or basef or destf:
707 raise error.Abort(
708 raise error.Abort(
708 _('abort and continue do not allow specifying revisions'))
709 _('abort and continue do not allow specifying revisions'))
709 if abortf and opts.get('tool', False):
710 if abortf and opts.get('tool', False):
710 ui.warn(_('tool option will be ignored\n'))
711 ui.warn(_('tool option will be ignored\n'))
711 if contf:
712 if contf:
712 ms = mergemod.mergestate.read(repo)
713 ms = mergemod.mergestate.read(repo)
713 mergeutil.checkunresolved(ms)
714 mergeutil.checkunresolved(ms)
714
715
715 retcode = rbsrt._prepareabortorcontinue(abortf)
716 retcode = rbsrt._prepareabortorcontinue(abortf)
716 if retcode is not None:
717 if retcode is not None:
717 return retcode
718 return retcode
718 else:
719 else:
719 dest, rebaseset = _definesets(ui, repo, destf, srcf, basef, revf,
720 dest, rebaseset = _definesets(ui, repo, destf, srcf, basef, revf,
720 destspace=destspace)
721 destspace=destspace)
721 retcode = rbsrt._preparenewrebase(dest, rebaseset)
722 retcode = rbsrt._preparenewrebase(dest, rebaseset)
722 if retcode is not None:
723 if retcode is not None:
723 return retcode
724 return retcode
724
725
725 rbsrt._performrebase()
726 rbsrt._performrebase()
726 rbsrt._finishrebase()
727 rbsrt._finishrebase()
727
728
728 def _definesets(ui, repo, destf=None, srcf=None, basef=None, revf=None,
729 def _definesets(ui, repo, destf=None, srcf=None, basef=None, revf=None,
729 destspace=None):
730 destspace=None):
730 """use revisions argument to define destination and rebase set
731 """use revisions argument to define destination and rebase set
731 """
732 """
732 if revf is None:
733 if revf is None:
733 revf = []
734 revf = []
734
735
735 # destspace is here to work around issues with `hg pull --rebase` see
736 # destspace is here to work around issues with `hg pull --rebase` see
736 # issue5214 for details
737 # issue5214 for details
737 if srcf and basef:
738 if srcf and basef:
738 raise error.Abort(_('cannot specify both a source and a base'))
739 raise error.Abort(_('cannot specify both a source and a base'))
739 if revf and basef:
740 if revf and basef:
740 raise error.Abort(_('cannot specify both a revision and a base'))
741 raise error.Abort(_('cannot specify both a revision and a base'))
741 if revf and srcf:
742 if revf and srcf:
742 raise error.Abort(_('cannot specify both a revision and a source'))
743 raise error.Abort(_('cannot specify both a revision and a source'))
743
744
744 cmdutil.checkunfinished(repo)
745 cmdutil.checkunfinished(repo)
745 cmdutil.bailifchanged(repo)
746 cmdutil.bailifchanged(repo)
746
747
747 if ui.configbool('commands', 'rebase.requiredest') and not destf:
748 if ui.configbool('commands', 'rebase.requiredest') and not destf:
748 raise error.Abort(_('you must specify a destination'),
749 raise error.Abort(_('you must specify a destination'),
749 hint=_('use: hg rebase -d REV'))
750 hint=_('use: hg rebase -d REV'))
750
751
751 if destf:
752 if destf:
752 dest = scmutil.revsingle(repo, destf)
753 dest = scmutil.revsingle(repo, destf)
753
754
754 if revf:
755 if revf:
755 rebaseset = scmutil.revrange(repo, revf)
756 rebaseset = scmutil.revrange(repo, revf)
756 if not rebaseset:
757 if not rebaseset:
757 ui.status(_('empty "rev" revision set - nothing to rebase\n'))
758 ui.status(_('empty "rev" revision set - nothing to rebase\n'))
758 return None, None
759 return None, None
759 elif srcf:
760 elif srcf:
760 src = scmutil.revrange(repo, [srcf])
761 src = scmutil.revrange(repo, [srcf])
761 if not src:
762 if not src:
762 ui.status(_('empty "source" revision set - nothing to rebase\n'))
763 ui.status(_('empty "source" revision set - nothing to rebase\n'))
763 return None, None
764 return None, None
764 rebaseset = repo.revs('(%ld)::', src)
765 rebaseset = repo.revs('(%ld)::', src)
765 assert rebaseset
766 assert rebaseset
766 else:
767 else:
767 base = scmutil.revrange(repo, [basef or '.'])
768 base = scmutil.revrange(repo, [basef or '.'])
768 if not base:
769 if not base:
769 ui.status(_('empty "base" revision set - '
770 ui.status(_('empty "base" revision set - '
770 "can't compute rebase set\n"))
771 "can't compute rebase set\n"))
771 return None, None
772 return None, None
772 if not destf:
773 if not destf:
773 dest = repo[_destrebase(repo, base, destspace=destspace)]
774 dest = repo[_destrebase(repo, base, destspace=destspace)]
774 destf = str(dest)
775 destf = str(dest)
775
776
776 roots = [] # selected children of branching points
777 roots = [] # selected children of branching points
777 bpbase = {} # {branchingpoint: [origbase]}
778 bpbase = {} # {branchingpoint: [origbase]}
778 for b in base: # group bases by branching points
779 for b in base: # group bases by branching points
779 bp = repo.revs('ancestor(%d, %d)', b, dest).first()
780 bp = repo.revs('ancestor(%d, %d)', b, dest).first()
780 bpbase[bp] = bpbase.get(bp, []) + [b]
781 bpbase[bp] = bpbase.get(bp, []) + [b]
781 if None in bpbase:
782 if None in bpbase:
782 # emulate the old behavior, showing "nothing to rebase" (a better
783 # emulate the old behavior, showing "nothing to rebase" (a better
783 # behavior may be abort with "cannot find branching point" error)
784 # behavior may be abort with "cannot find branching point" error)
784 bpbase.clear()
785 bpbase.clear()
785 for bp, bs in bpbase.iteritems(): # calculate roots
786 for bp, bs in bpbase.iteritems(): # calculate roots
786 roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs))
787 roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs))
787
788
788 rebaseset = repo.revs('%ld::', roots)
789 rebaseset = repo.revs('%ld::', roots)
789
790
790 if not rebaseset:
791 if not rebaseset:
791 # transform to list because smartsets are not comparable to
792 # transform to list because smartsets are not comparable to
792 # lists. This should be improved to honor laziness of
793 # lists. This should be improved to honor laziness of
793 # smartset.
794 # smartset.
794 if list(base) == [dest.rev()]:
795 if list(base) == [dest.rev()]:
795 if basef:
796 if basef:
796 ui.status(_('nothing to rebase - %s is both "base"'
797 ui.status(_('nothing to rebase - %s is both "base"'
797 ' and destination\n') % dest)
798 ' and destination\n') % dest)
798 else:
799 else:
799 ui.status(_('nothing to rebase - working directory '
800 ui.status(_('nothing to rebase - working directory '
800 'parent is also destination\n'))
801 'parent is also destination\n'))
801 elif not repo.revs('%ld - ::%d', base, dest):
802 elif not repo.revs('%ld - ::%d', base, dest):
802 if basef:
803 if basef:
803 ui.status(_('nothing to rebase - "base" %s is '
804 ui.status(_('nothing to rebase - "base" %s is '
804 'already an ancestor of destination '
805 'already an ancestor of destination '
805 '%s\n') %
806 '%s\n') %
806 ('+'.join(str(repo[r]) for r in base),
807 ('+'.join(str(repo[r]) for r in base),
807 dest))
808 dest))
808 else:
809 else:
809 ui.status(_('nothing to rebase - working '
810 ui.status(_('nothing to rebase - working '
810 'directory parent is already an '
811 'directory parent is already an '
811 'ancestor of destination %s\n') % dest)
812 'ancestor of destination %s\n') % dest)
812 else: # can it happen?
813 else: # can it happen?
813 ui.status(_('nothing to rebase from %s to %s\n') %
814 ui.status(_('nothing to rebase from %s to %s\n') %
814 ('+'.join(str(repo[r]) for r in base), dest))
815 ('+'.join(str(repo[r]) for r in base), dest))
815 return None, None
816 return None, None
816
817
817 if not destf:
818 if not destf:
818 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
819 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
819 destf = str(dest)
820 destf = str(dest)
820
821
821 return dest, rebaseset
822 return dest, rebaseset
822
823
823 def externalparent(repo, state, destancestors):
824 def externalparent(repo, state, destancestors):
824 """Return the revision that should be used as the second parent
825 """Return the revision that should be used as the second parent
825 when the revisions in state is collapsed on top of destancestors.
826 when the revisions in state is collapsed on top of destancestors.
826 Abort if there is more than one parent.
827 Abort if there is more than one parent.
827 """
828 """
828 parents = set()
829 parents = set()
829 source = min(state)
830 source = min(state)
830 for rev in state:
831 for rev in state:
831 if rev == source:
832 if rev == source:
832 continue
833 continue
833 for p in repo[rev].parents():
834 for p in repo[rev].parents():
834 if (p.rev() not in state
835 if (p.rev() not in state
835 and p.rev() not in destancestors):
836 and p.rev() not in destancestors):
836 parents.add(p.rev())
837 parents.add(p.rev())
837 if not parents:
838 if not parents:
838 return nullrev
839 return nullrev
839 if len(parents) == 1:
840 if len(parents) == 1:
840 return parents.pop()
841 return parents.pop()
841 raise error.Abort(_('unable to collapse on top of %s, there is more '
842 raise error.Abort(_('unable to collapse on top of %s, there is more '
842 'than one external parent: %s') %
843 'than one external parent: %s') %
843 (max(destancestors),
844 (max(destancestors),
844 ', '.join(str(p) for p in sorted(parents))))
845 ', '.join(str(p) for p in sorted(parents))))
845
846
846 def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None,
847 def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None,
847 keepbranches=False, date=None):
848 keepbranches=False, date=None):
848 '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev
849 '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev
849 but also store useful information in extra.
850 but also store useful information in extra.
850 Return node of committed revision.'''
851 Return node of committed revision.'''
851 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
852 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
852 try:
853 try:
853 repo.setparents(repo[p1].node(), repo[p2].node())
854 repo.setparents(repo[p1].node(), repo[p2].node())
854 ctx = repo[rev]
855 ctx = repo[rev]
855 if commitmsg is None:
856 if commitmsg is None:
856 commitmsg = ctx.description()
857 commitmsg = ctx.description()
857 keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
858 keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
858 extra = {'rebase_source': ctx.hex()}
859 extra = {'rebase_source': ctx.hex()}
859 if extrafn:
860 if extrafn:
860 extrafn(ctx, extra)
861 extrafn(ctx, extra)
861
862
862 destphase = max(ctx.phase(), phases.draft)
863 destphase = max(ctx.phase(), phases.draft)
863 overrides = {('phases', 'new-commit'): destphase}
864 overrides = {('phases', 'new-commit'): destphase}
864 with repo.ui.configoverride(overrides, 'rebase'):
865 with repo.ui.configoverride(overrides, 'rebase'):
865 if keepbranch:
866 if keepbranch:
866 repo.ui.setconfig('ui', 'allowemptycommit', True)
867 repo.ui.setconfig('ui', 'allowemptycommit', True)
867 # Commit might fail if unresolved files exist
868 # Commit might fail if unresolved files exist
868 if date is None:
869 if date is None:
869 date = ctx.date()
870 date = ctx.date()
870 newnode = repo.commit(text=commitmsg, user=ctx.user(),
871 newnode = repo.commit(text=commitmsg, user=ctx.user(),
871 date=date, extra=extra, editor=editor)
872 date=date, extra=extra, editor=editor)
872
873
873 repo.dirstate.setbranch(repo[newnode].branch())
874 repo.dirstate.setbranch(repo[newnode].branch())
874 dsguard.close()
875 dsguard.close()
875 return newnode
876 return newnode
876 finally:
877 finally:
877 release(dsguard)
878 release(dsguard)
878
879
879 def rebasenode(repo, rev, p1, base, state, collapse, dest):
880 def rebasenode(repo, rev, p1, base, state, collapse, dest):
880 'Rebase a single revision rev on top of p1 using base as merge ancestor'
881 'Rebase a single revision rev on top of p1 using base as merge ancestor'
881 # Merge phase
882 # Merge phase
882 # Update to destination and merge it with local
883 # Update to destination and merge it with local
883 if repo['.'].rev() != p1:
884 if repo['.'].rev() != p1:
884 repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
885 repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
885 mergemod.update(repo, p1, False, True)
886 mergemod.update(repo, p1, False, True)
886 else:
887 else:
887 repo.ui.debug(" already in destination\n")
888 repo.ui.debug(" already in destination\n")
888 repo.dirstate.write(repo.currenttransaction())
889 repo.dirstate.write(repo.currenttransaction())
889 repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
890 repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
890 if base is not None:
891 if base is not None:
891 repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
892 repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
892 # When collapsing in-place, the parent is the common ancestor, we
893 # When collapsing in-place, the parent is the common ancestor, we
893 # have to allow merging with it.
894 # have to allow merging with it.
894 stats = mergemod.update(repo, rev, True, True, base, collapse,
895 stats = mergemod.update(repo, rev, True, True, base, collapse,
895 labels=['dest', 'source'])
896 labels=['dest', 'source'])
896 if collapse:
897 if collapse:
897 copies.duplicatecopies(repo, rev, dest)
898 copies.duplicatecopies(repo, rev, dest)
898 else:
899 else:
899 # If we're not using --collapse, we need to
900 # If we're not using --collapse, we need to
900 # duplicate copies between the revision we're
901 # duplicate copies between the revision we're
901 # rebasing and its first parent, but *not*
902 # rebasing and its first parent, but *not*
902 # duplicate any copies that have already been
903 # duplicate any copies that have already been
903 # performed in the destination.
904 # performed in the destination.
904 p1rev = repo[rev].p1().rev()
905 p1rev = repo[rev].p1().rev()
905 copies.duplicatecopies(repo, rev, p1rev, skiprev=dest)
906 copies.duplicatecopies(repo, rev, p1rev, skiprev=dest)
906 return stats
907 return stats
907
908
908 def nearestrebased(repo, rev, state):
909 def nearestrebased(repo, rev, state):
909 """return the nearest ancestors of rev in the rebase result"""
910 """return the nearest ancestors of rev in the rebase result"""
910 rebased = [r for r in state if state[r] > nullmerge]
911 rebased = [r for r in state if state[r] > nullmerge]
911 candidates = repo.revs('max(%ld and (::%d))', rebased, rev)
912 candidates = repo.revs('max(%ld and (::%d))', rebased, rev)
912 if candidates:
913 if candidates:
913 return state[candidates.first()]
914 return state[candidates.first()]
914 else:
915 else:
915 return None
916 return None
916
917
917 def _checkobsrebase(repo, ui, rebaseobsrevs, rebasesetrevs, rebaseobsskipped):
918 def _checkobsrebase(repo, ui, rebaseobsrevs, rebasesetrevs, rebaseobsskipped):
918 """
919 """
919 Abort if rebase will create divergence or rebase is noop because of markers
920 Abort if rebase will create divergence or rebase is noop because of markers
920
921
921 `rebaseobsrevs`: set of obsolete revision in source
922 `rebaseobsrevs`: set of obsolete revision in source
922 `rebasesetrevs`: set of revisions to be rebased from source
923 `rebasesetrevs`: set of revisions to be rebased from source
923 `rebaseobsskipped`: set of revisions from source skipped because they have
924 `rebaseobsskipped`: set of revisions from source skipped because they have
924 successors in destination
925 successors in destination
925 """
926 """
926 # Obsolete node with successors not in dest leads to divergence
927 # Obsolete node with successors not in dest leads to divergence
927 divergenceok = ui.configbool('experimental',
928 divergenceok = ui.configbool('experimental',
928 'allowdivergence')
929 'allowdivergence')
929 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
930 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
930
931
931 if divergencebasecandidates and not divergenceok:
932 if divergencebasecandidates and not divergenceok:
932 divhashes = (str(repo[r])
933 divhashes = (str(repo[r])
933 for r in divergencebasecandidates)
934 for r in divergencebasecandidates)
934 msg = _("this rebase will cause "
935 msg = _("this rebase will cause "
935 "divergences from: %s")
936 "divergences from: %s")
936 h = _("to force the rebase please set "
937 h = _("to force the rebase please set "
937 "experimental.allowdivergence=True")
938 "experimental.allowdivergence=True")
938 raise error.Abort(msg % (",".join(divhashes),), hint=h)
939 raise error.Abort(msg % (",".join(divhashes),), hint=h)
939
940
940 def defineparents(repo, rev, dest, state, destancestors,
941 def defineparents(repo, rev, dest, state, destancestors,
941 obsoletenotrebased):
942 obsoletenotrebased):
942 'Return the new parent relationship of the revision that will be rebased'
943 'Return the new parent relationship of the revision that will be rebased'
943 parents = repo[rev].parents()
944 parents = repo[rev].parents()
944 p1 = p2 = nullrev
945 p1 = p2 = nullrev
945 rp1 = None
946 rp1 = None
946
947
947 p1n = parents[0].rev()
948 p1n = parents[0].rev()
948 if p1n in destancestors:
949 if p1n in destancestors:
949 p1 = dest
950 p1 = dest
950 elif p1n in state:
951 elif p1n in state:
951 if state[p1n] == nullmerge:
952 if state[p1n] == nullmerge:
952 p1 = dest
953 p1 = dest
953 elif state[p1n] in revskipped:
954 elif state[p1n] in revskipped:
954 p1 = nearestrebased(repo, p1n, state)
955 p1 = nearestrebased(repo, p1n, state)
955 if p1 is None:
956 if p1 is None:
956 p1 = dest
957 p1 = dest
957 else:
958 else:
958 p1 = state[p1n]
959 p1 = state[p1n]
959 else: # p1n external
960 else: # p1n external
960 p1 = dest
961 p1 = dest
961 p2 = p1n
962 p2 = p1n
962
963
963 if len(parents) == 2 and parents[1].rev() not in destancestors:
964 if len(parents) == 2 and parents[1].rev() not in destancestors:
964 p2n = parents[1].rev()
965 p2n = parents[1].rev()
965 # interesting second parent
966 # interesting second parent
966 if p2n in state:
967 if p2n in state:
967 if p1 == dest: # p1n in destancestors or external
968 if p1 == dest: # p1n in destancestors or external
968 p1 = state[p2n]
969 p1 = state[p2n]
969 if p1 == revprecursor:
970 if p1 == revprecursor:
970 rp1 = obsoletenotrebased[p2n]
971 rp1 = obsoletenotrebased[p2n]
971 elif state[p2n] in revskipped:
972 elif state[p2n] in revskipped:
972 p2 = nearestrebased(repo, p2n, state)
973 p2 = nearestrebased(repo, p2n, state)
973 if p2 is None:
974 if p2 is None:
974 # no ancestors rebased yet, detach
975 # no ancestors rebased yet, detach
975 p2 = dest
976 p2 = dest
976 else:
977 else:
977 p2 = state[p2n]
978 p2 = state[p2n]
978 else: # p2n external
979 else: # p2n external
979 if p2 != nullrev: # p1n external too => rev is a merged revision
980 if p2 != nullrev: # p1n external too => rev is a merged revision
980 raise error.Abort(_('cannot use revision %d as base, result '
981 raise error.Abort(_('cannot use revision %d as base, result '
981 'would have 3 parents') % rev)
982 'would have 3 parents') % rev)
982 p2 = p2n
983 p2 = p2n
983 repo.ui.debug(" future parents are %d and %d\n" %
984 repo.ui.debug(" future parents are %d and %d\n" %
984 (repo[rp1 or p1].rev(), repo[p2].rev()))
985 (repo[rp1 or p1].rev(), repo[p2].rev()))
985
986
986 if not any(p.rev() in state for p in parents):
987 if not any(p.rev() in state for p in parents):
987 # Case (1) root changeset of a non-detaching rebase set.
988 # Case (1) root changeset of a non-detaching rebase set.
988 # Let the merge mechanism find the base itself.
989 # Let the merge mechanism find the base itself.
989 base = None
990 base = None
990 elif not repo[rev].p2():
991 elif not repo[rev].p2():
991 # Case (2) detaching the node with a single parent, use this parent
992 # Case (2) detaching the node with a single parent, use this parent
992 base = repo[rev].p1().rev()
993 base = repo[rev].p1().rev()
993 else:
994 else:
994 # Assuming there is a p1, this is the case where there also is a p2.
995 # Assuming there is a p1, this is the case where there also is a p2.
995 # We are thus rebasing a merge and need to pick the right merge base.
996 # We are thus rebasing a merge and need to pick the right merge base.
996 #
997 #
997 # Imagine we have:
998 # Imagine we have:
998 # - M: current rebase revision in this step
999 # - M: current rebase revision in this step
999 # - A: one parent of M
1000 # - A: one parent of M
1000 # - B: other parent of M
1001 # - B: other parent of M
1001 # - D: destination of this merge step (p1 var)
1002 # - D: destination of this merge step (p1 var)
1002 #
1003 #
1003 # Consider the case where D is a descendant of A or B and the other is
1004 # Consider the case where D is a descendant of A or B and the other is
1004 # 'outside'. In this case, the right merge base is the D ancestor.
1005 # 'outside'. In this case, the right merge base is the D ancestor.
1005 #
1006 #
1006 # An informal proof, assuming A is 'outside' and B is the D ancestor:
1007 # An informal proof, assuming A is 'outside' and B is the D ancestor:
1007 #
1008 #
1008 # If we pick B as the base, the merge involves:
1009 # If we pick B as the base, the merge involves:
1009 # - changes from B to M (actual changeset payload)
1010 # - changes from B to M (actual changeset payload)
1010 # - changes from B to D (induced by rebase) as D is a rebased
1011 # - changes from B to D (induced by rebase) as D is a rebased
1011 # version of B)
1012 # version of B)
1012 # Which exactly represent the rebase operation.
1013 # Which exactly represent the rebase operation.
1013 #
1014 #
1014 # If we pick A as the base, the merge involves:
1015 # If we pick A as the base, the merge involves:
1015 # - changes from A to M (actual changeset payload)
1016 # - changes from A to M (actual changeset payload)
1016 # - changes from A to D (with include changes between unrelated A and B
1017 # - changes from A to D (with include changes between unrelated A and B
1017 # plus changes induced by rebase)
1018 # plus changes induced by rebase)
1018 # Which does not represent anything sensible and creates a lot of
1019 # Which does not represent anything sensible and creates a lot of
1019 # conflicts. A is thus not the right choice - B is.
1020 # conflicts. A is thus not the right choice - B is.
1020 #
1021 #
1021 # Note: The base found in this 'proof' is only correct in the specified
1022 # Note: The base found in this 'proof' is only correct in the specified
1022 # case. This base does not make sense if is not D a descendant of A or B
1023 # case. This base does not make sense if is not D a descendant of A or B
1023 # or if the other is not parent 'outside' (especially not if the other
1024 # or if the other is not parent 'outside' (especially not if the other
1024 # parent has been rebased). The current implementation does not
1025 # parent has been rebased). The current implementation does not
1025 # make it feasible to consider different cases separately. In these
1026 # make it feasible to consider different cases separately. In these
1026 # other cases we currently just leave it to the user to correctly
1027 # other cases we currently just leave it to the user to correctly
1027 # resolve an impossible merge using a wrong ancestor.
1028 # resolve an impossible merge using a wrong ancestor.
1028 #
1029 #
1029 # xx, p1 could be -4, and both parents could probably be -4...
1030 # xx, p1 could be -4, and both parents could probably be -4...
1030 for p in repo[rev].parents():
1031 for p in repo[rev].parents():
1031 if state.get(p.rev()) == p1:
1032 if state.get(p.rev()) == p1:
1032 base = p.rev()
1033 base = p.rev()
1033 break
1034 break
1034 else: # fallback when base not found
1035 else: # fallback when base not found
1035 base = None
1036 base = None
1036
1037
1037 # Raise because this function is called wrong (see issue 4106)
1038 # Raise because this function is called wrong (see issue 4106)
1038 raise AssertionError('no base found to rebase on '
1039 raise AssertionError('no base found to rebase on '
1039 '(defineparents called wrong)')
1040 '(defineparents called wrong)')
1040 return rp1 or p1, p2, base
1041 return rp1 or p1, p2, base
1041
1042
1042 def isagitpatch(repo, patchname):
1043 def isagitpatch(repo, patchname):
1043 'Return true if the given patch is in git format'
1044 'Return true if the given patch is in git format'
1044 mqpatch = os.path.join(repo.mq.path, patchname)
1045 mqpatch = os.path.join(repo.mq.path, patchname)
1045 for line in patch.linereader(file(mqpatch, 'rb')):
1046 for line in patch.linereader(file(mqpatch, 'rb')):
1046 if line.startswith('diff --git'):
1047 if line.startswith('diff --git'):
1047 return True
1048 return True
1048 return False
1049 return False
1049
1050
1050 def updatemq(repo, state, skipped, **opts):
1051 def updatemq(repo, state, skipped, **opts):
1051 'Update rebased mq patches - finalize and then import them'
1052 'Update rebased mq patches - finalize and then import them'
1052 mqrebase = {}
1053 mqrebase = {}
1053 mq = repo.mq
1054 mq = repo.mq
1054 original_series = mq.fullseries[:]
1055 original_series = mq.fullseries[:]
1055 skippedpatches = set()
1056 skippedpatches = set()
1056
1057
1057 for p in mq.applied:
1058 for p in mq.applied:
1058 rev = repo[p.node].rev()
1059 rev = repo[p.node].rev()
1059 if rev in state:
1060 if rev in state:
1060 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
1061 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
1061 (rev, p.name))
1062 (rev, p.name))
1062 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1063 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1063 else:
1064 else:
1064 # Applied but not rebased, not sure this should happen
1065 # Applied but not rebased, not sure this should happen
1065 skippedpatches.add(p.name)
1066 skippedpatches.add(p.name)
1066
1067
1067 if mqrebase:
1068 if mqrebase:
1068 mq.finish(repo, mqrebase.keys())
1069 mq.finish(repo, mqrebase.keys())
1069
1070
1070 # We must start import from the newest revision
1071 # We must start import from the newest revision
1071 for rev in sorted(mqrebase, reverse=True):
1072 for rev in sorted(mqrebase, reverse=True):
1072 if rev not in skipped:
1073 if rev not in skipped:
1073 name, isgit = mqrebase[rev]
1074 name, isgit = mqrebase[rev]
1074 repo.ui.note(_('updating mq patch %s to %s:%s\n') %
1075 repo.ui.note(_('updating mq patch %s to %s:%s\n') %
1075 (name, state[rev], repo[state[rev]]))
1076 (name, state[rev], repo[state[rev]]))
1076 mq.qimport(repo, (), patchname=name, git=isgit,
1077 mq.qimport(repo, (), patchname=name, git=isgit,
1077 rev=[str(state[rev])])
1078 rev=[str(state[rev])])
1078 else:
1079 else:
1079 # Rebased and skipped
1080 # Rebased and skipped
1080 skippedpatches.add(mqrebase[rev][0])
1081 skippedpatches.add(mqrebase[rev][0])
1081
1082
1082 # Patches were either applied and rebased and imported in
1083 # Patches were either applied and rebased and imported in
1083 # order, applied and removed or unapplied. Discard the removed
1084 # order, applied and removed or unapplied. Discard the removed
1084 # ones while preserving the original series order and guards.
1085 # ones while preserving the original series order and guards.
1085 newseries = [s for s in original_series
1086 newseries = [s for s in original_series
1086 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
1087 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
1087 mq.fullseries[:] = newseries
1088 mq.fullseries[:] = newseries
1088 mq.seriesdirty = True
1089 mq.seriesdirty = True
1089 mq.savedirty()
1090 mq.savedirty()
1090
1091
1091 def updatebookmarks(repo, destnode, nstate, originalbookmarks, tr):
1092 def updatebookmarks(repo, destnode, nstate, originalbookmarks, tr):
1092 'Move bookmarks to their correct changesets, and delete divergent ones'
1093 'Move bookmarks to their correct changesets, and delete divergent ones'
1093 marks = repo._bookmarks
1094 marks = repo._bookmarks
1094 for k, v in originalbookmarks.iteritems():
1095 for k, v in originalbookmarks.iteritems():
1095 if v in nstate:
1096 if v in nstate:
1096 # update the bookmarks for revs that have moved
1097 # update the bookmarks for revs that have moved
1097 marks[k] = nstate[v]
1098 marks[k] = nstate[v]
1098 bookmarks.deletedivergent(repo, [destnode], k)
1099 bookmarks.deletedivergent(repo, [destnode], k)
1099 marks.recordchange(tr)
1100 marks.recordchange(tr)
1100
1101
1101 def storecollapsemsg(repo, collapsemsg):
1102 def storecollapsemsg(repo, collapsemsg):
1102 'Store the collapse message to allow recovery'
1103 'Store the collapse message to allow recovery'
1103 collapsemsg = collapsemsg or ''
1104 collapsemsg = collapsemsg or ''
1104 f = repo.vfs("last-message.txt", "w")
1105 f = repo.vfs("last-message.txt", "w")
1105 f.write("%s\n" % collapsemsg)
1106 f.write("%s\n" % collapsemsg)
1106 f.close()
1107 f.close()
1107
1108
1108 def clearcollapsemsg(repo):
1109 def clearcollapsemsg(repo):
1109 'Remove collapse message file'
1110 'Remove collapse message file'
1110 repo.vfs.unlinkpath("last-message.txt", ignoremissing=True)
1111 repo.vfs.unlinkpath("last-message.txt", ignoremissing=True)
1111
1112
1112 def restorecollapsemsg(repo, isabort):
1113 def restorecollapsemsg(repo, isabort):
1113 'Restore previously stored collapse message'
1114 'Restore previously stored collapse message'
1114 try:
1115 try:
1115 f = repo.vfs("last-message.txt")
1116 f = repo.vfs("last-message.txt")
1116 collapsemsg = f.readline().strip()
1117 collapsemsg = f.readline().strip()
1117 f.close()
1118 f.close()
1118 except IOError as err:
1119 except IOError as err:
1119 if err.errno != errno.ENOENT:
1120 if err.errno != errno.ENOENT:
1120 raise
1121 raise
1121 if isabort:
1122 if isabort:
1122 # Oh well, just abort like normal
1123 # Oh well, just abort like normal
1123 collapsemsg = ''
1124 collapsemsg = ''
1124 else:
1125 else:
1125 raise error.Abort(_('missing .hg/last-message.txt for rebase'))
1126 raise error.Abort(_('missing .hg/last-message.txt for rebase'))
1126 return collapsemsg
1127 return collapsemsg
1127
1128
1128 def clearstatus(repo):
1129 def clearstatus(repo):
1129 'Remove the status files'
1130 'Remove the status files'
1130 _clearrebasesetvisibiliy(repo)
1131 _clearrebasesetvisibiliy(repo)
1131 # Make sure the active transaction won't write the state file
1132 # Make sure the active transaction won't write the state file
1132 tr = repo.currenttransaction()
1133 tr = repo.currenttransaction()
1133 if tr:
1134 if tr:
1134 tr.removefilegenerator('rebasestate')
1135 tr.removefilegenerator('rebasestate')
1135 repo.vfs.unlinkpath("rebasestate", ignoremissing=True)
1136 repo.vfs.unlinkpath("rebasestate", ignoremissing=True)
1136
1137
1137 def needupdate(repo, state):
1138 def needupdate(repo, state):
1138 '''check whether we should `update --clean` away from a merge, or if
1139 '''check whether we should `update --clean` away from a merge, or if
1139 somehow the working dir got forcibly updated, e.g. by older hg'''
1140 somehow the working dir got forcibly updated, e.g. by older hg'''
1140 parents = [p.rev() for p in repo[None].parents()]
1141 parents = [p.rev() for p in repo[None].parents()]
1141
1142
1142 # Are we in a merge state at all?
1143 # Are we in a merge state at all?
1143 if len(parents) < 2:
1144 if len(parents) < 2:
1144 return False
1145 return False
1145
1146
1146 # We should be standing on the first as-of-yet unrebased commit.
1147 # We should be standing on the first as-of-yet unrebased commit.
1147 firstunrebased = min([old for old, new in state.iteritems()
1148 firstunrebased = min([old for old, new in state.iteritems()
1148 if new == nullrev])
1149 if new == nullrev])
1149 if firstunrebased in parents:
1150 if firstunrebased in parents:
1150 return True
1151 return True
1151
1152
1152 return False
1153 return False
1153
1154
1154 def abort(repo, originalwd, dest, state, activebookmark=None):
1155 def abort(repo, originalwd, dest, state, activebookmark=None):
1155 '''Restore the repository to its original state. Additional args:
1156 '''Restore the repository to its original state. Additional args:
1156
1157
1157 activebookmark: the name of the bookmark that should be active after the
1158 activebookmark: the name of the bookmark that should be active after the
1158 restore'''
1159 restore'''
1159
1160
1160 try:
1161 try:
1161 # If the first commits in the rebased set get skipped during the rebase,
1162 # If the first commits in the rebased set get skipped during the rebase,
1162 # their values within the state mapping will be the dest rev id. The
1163 # their values within the state mapping will be the dest rev id. The
1163 # dstates list must must not contain the dest rev (issue4896)
1164 # dstates list must must not contain the dest rev (issue4896)
1164 dstates = [s for s in state.values() if s >= 0 and s != dest]
1165 dstates = [s for s in state.values() if s >= 0 and s != dest]
1165 immutable = [d for d in dstates if not repo[d].mutable()]
1166 immutable = [d for d in dstates if not repo[d].mutable()]
1166 cleanup = True
1167 cleanup = True
1167 if immutable:
1168 if immutable:
1168 repo.ui.warn(_("warning: can't clean up public changesets %s\n")
1169 repo.ui.warn(_("warning: can't clean up public changesets %s\n")
1169 % ', '.join(str(repo[r]) for r in immutable),
1170 % ', '.join(str(repo[r]) for r in immutable),
1170 hint=_("see 'hg help phases' for details"))
1171 hint=_("see 'hg help phases' for details"))
1171 cleanup = False
1172 cleanup = False
1172
1173
1173 descendants = set()
1174 descendants = set()
1174 if dstates:
1175 if dstates:
1175 descendants = set(repo.changelog.descendants(dstates))
1176 descendants = set(repo.changelog.descendants(dstates))
1176 if descendants - set(dstates):
1177 if descendants - set(dstates):
1177 repo.ui.warn(_("warning: new changesets detected on destination "
1178 repo.ui.warn(_("warning: new changesets detected on destination "
1178 "branch, can't strip\n"))
1179 "branch, can't strip\n"))
1179 cleanup = False
1180 cleanup = False
1180
1181
1181 if cleanup:
1182 if cleanup:
1182 shouldupdate = False
1183 shouldupdate = False
1183 rebased = filter(lambda x: x >= 0 and x != dest, state.values())
1184 rebased = filter(lambda x: x >= 0 and x != dest, state.values())
1184 if rebased:
1185 if rebased:
1185 strippoints = [
1186 strippoints = [
1186 c.node() for c in repo.set('roots(%ld)', rebased)]
1187 c.node() for c in repo.set('roots(%ld)', rebased)]
1187
1188
1188 updateifonnodes = set(rebased)
1189 updateifonnodes = set(rebased)
1189 updateifonnodes.add(dest)
1190 updateifonnodes.add(dest)
1190 updateifonnodes.add(originalwd)
1191 updateifonnodes.add(originalwd)
1191 shouldupdate = repo['.'].rev() in updateifonnodes
1192 shouldupdate = repo['.'].rev() in updateifonnodes
1192
1193
1193 # Update away from the rebase if necessary
1194 # Update away from the rebase if necessary
1194 if shouldupdate or needupdate(repo, state):
1195 if shouldupdate or needupdate(repo, state):
1195 mergemod.update(repo, originalwd, False, True)
1196 mergemod.update(repo, originalwd, False, True)
1196
1197
1197 # Strip from the first rebased revision
1198 # Strip from the first rebased revision
1198 if rebased:
1199 if rebased:
1199 # no backup of rebased cset versions needed
1200 # no backup of rebased cset versions needed
1200 repair.strip(repo.ui, repo, strippoints)
1201 repair.strip(repo.ui, repo, strippoints)
1201
1202
1202 if activebookmark and activebookmark in repo._bookmarks:
1203 if activebookmark and activebookmark in repo._bookmarks:
1203 bookmarks.activate(repo, activebookmark)
1204 bookmarks.activate(repo, activebookmark)
1204
1205
1205 finally:
1206 finally:
1206 clearstatus(repo)
1207 clearstatus(repo)
1207 clearcollapsemsg(repo)
1208 clearcollapsemsg(repo)
1208 repo.ui.warn(_('rebase aborted\n'))
1209 repo.ui.warn(_('rebase aborted\n'))
1209 return 0
1210 return 0
1210
1211
1211 def buildstate(repo, dest, rebaseset, collapse, obsoletenotrebased):
1212 def buildstate(repo, dest, rebaseset, collapse, obsoletenotrebased):
1212 '''Define which revisions are going to be rebased and where
1213 '''Define which revisions are going to be rebased and where
1213
1214
1214 repo: repo
1215 repo: repo
1215 dest: context
1216 dest: context
1216 rebaseset: set of rev
1217 rebaseset: set of rev
1217 '''
1218 '''
1218 originalwd = repo['.'].rev()
1219 originalwd = repo['.'].rev()
1219 _setrebasesetvisibility(repo, set(rebaseset) | {originalwd})
1220 _setrebasesetvisibility(repo, set(rebaseset) | {originalwd})
1220
1221
1221 # This check isn't strictly necessary, since mq detects commits over an
1222 # This check isn't strictly necessary, since mq detects commits over an
1222 # applied patch. But it prevents messing up the working directory when
1223 # applied patch. But it prevents messing up the working directory when
1223 # a partially completed rebase is blocked by mq.
1224 # a partially completed rebase is blocked by mq.
1224 if 'qtip' in repo.tags() and (dest.node() in
1225 if 'qtip' in repo.tags() and (dest.node() in
1225 [s.node for s in repo.mq.applied]):
1226 [s.node for s in repo.mq.applied]):
1226 raise error.Abort(_('cannot rebase onto an applied mq patch'))
1227 raise error.Abort(_('cannot rebase onto an applied mq patch'))
1227
1228
1228 roots = list(repo.set('roots(%ld)', rebaseset))
1229 roots = list(repo.set('roots(%ld)', rebaseset))
1229 if not roots:
1230 if not roots:
1230 raise error.Abort(_('no matching revisions'))
1231 raise error.Abort(_('no matching revisions'))
1231 roots.sort()
1232 roots.sort()
1232 state = dict.fromkeys(rebaseset, revtodo)
1233 state = dict.fromkeys(rebaseset, revtodo)
1233 detachset = set()
1234 detachset = set()
1234 emptyrebase = True
1235 emptyrebase = True
1235 for root in roots:
1236 for root in roots:
1236 commonbase = root.ancestor(dest)
1237 commonbase = root.ancestor(dest)
1237 if commonbase == root:
1238 if commonbase == root:
1238 raise error.Abort(_('source is ancestor of destination'))
1239 raise error.Abort(_('source is ancestor of destination'))
1239 if commonbase == dest:
1240 if commonbase == dest:
1240 wctx = repo[None]
1241 wctx = repo[None]
1241 if dest == wctx.p1():
1242 if dest == wctx.p1():
1242 # when rebasing to '.', it will use the current wd branch name
1243 # when rebasing to '.', it will use the current wd branch name
1243 samebranch = root.branch() == wctx.branch()
1244 samebranch = root.branch() == wctx.branch()
1244 else:
1245 else:
1245 samebranch = root.branch() == dest.branch()
1246 samebranch = root.branch() == dest.branch()
1246 if not collapse and samebranch and dest in root.parents():
1247 if not collapse and samebranch and dest in root.parents():
1247 # mark the revision as done by setting its new revision
1248 # mark the revision as done by setting its new revision
1248 # equal to its old (current) revisions
1249 # equal to its old (current) revisions
1249 state[root.rev()] = root.rev()
1250 state[root.rev()] = root.rev()
1250 repo.ui.debug('source is a child of destination\n')
1251 repo.ui.debug('source is a child of destination\n')
1251 continue
1252 continue
1252
1253
1253 emptyrebase = False
1254 emptyrebase = False
1254 repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root))
1255 repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root))
1255 # Rebase tries to turn <dest> into a parent of <root> while
1256 # Rebase tries to turn <dest> into a parent of <root> while
1256 # preserving the number of parents of rebased changesets:
1257 # preserving the number of parents of rebased changesets:
1257 #
1258 #
1258 # - A changeset with a single parent will always be rebased as a
1259 # - A changeset with a single parent will always be rebased as a
1259 # changeset with a single parent.
1260 # changeset with a single parent.
1260 #
1261 #
1261 # - A merge will be rebased as merge unless its parents are both
1262 # - A merge will be rebased as merge unless its parents are both
1262 # ancestors of <dest> or are themselves in the rebased set and
1263 # ancestors of <dest> or are themselves in the rebased set and
1263 # pruned while rebased.
1264 # pruned while rebased.
1264 #
1265 #
1265 # If one parent of <root> is an ancestor of <dest>, the rebased
1266 # If one parent of <root> is an ancestor of <dest>, the rebased
1266 # version of this parent will be <dest>. This is always true with
1267 # version of this parent will be <dest>. This is always true with
1267 # --base option.
1268 # --base option.
1268 #
1269 #
1269 # Otherwise, we need to *replace* the original parents with
1270 # Otherwise, we need to *replace* the original parents with
1270 # <dest>. This "detaches" the rebased set from its former location
1271 # <dest>. This "detaches" the rebased set from its former location
1271 # and rebases it onto <dest>. Changes introduced by ancestors of
1272 # and rebases it onto <dest>. Changes introduced by ancestors of
1272 # <root> not common with <dest> (the detachset, marked as
1273 # <root> not common with <dest> (the detachset, marked as
1273 # nullmerge) are "removed" from the rebased changesets.
1274 # nullmerge) are "removed" from the rebased changesets.
1274 #
1275 #
1275 # - If <root> has a single parent, set it to <dest>.
1276 # - If <root> has a single parent, set it to <dest>.
1276 #
1277 #
1277 # - If <root> is a merge, we cannot decide which parent to
1278 # - If <root> is a merge, we cannot decide which parent to
1278 # replace, the rebase operation is not clearly defined.
1279 # replace, the rebase operation is not clearly defined.
1279 #
1280 #
1280 # The table below sums up this behavior:
1281 # The table below sums up this behavior:
1281 #
1282 #
1282 # +------------------+----------------------+-------------------------+
1283 # +------------------+----------------------+-------------------------+
1283 # | | one parent | merge |
1284 # | | one parent | merge |
1284 # +------------------+----------------------+-------------------------+
1285 # +------------------+----------------------+-------------------------+
1285 # | parent in | new parent is <dest> | parents in ::<dest> are |
1286 # | parent in | new parent is <dest> | parents in ::<dest> are |
1286 # | ::<dest> | | remapped to <dest> |
1287 # | ::<dest> | | remapped to <dest> |
1287 # +------------------+----------------------+-------------------------+
1288 # +------------------+----------------------+-------------------------+
1288 # | unrelated source | new parent is <dest> | ambiguous, abort |
1289 # | unrelated source | new parent is <dest> | ambiguous, abort |
1289 # +------------------+----------------------+-------------------------+
1290 # +------------------+----------------------+-------------------------+
1290 #
1291 #
1291 # The actual abort is handled by `defineparents`
1292 # The actual abort is handled by `defineparents`
1292 if len(root.parents()) <= 1:
1293 if len(root.parents()) <= 1:
1293 # ancestors of <root> not ancestors of <dest>
1294 # ancestors of <root> not ancestors of <dest>
1294 detachset.update(repo.changelog.findmissingrevs([commonbase.rev()],
1295 detachset.update(repo.changelog.findmissingrevs([commonbase.rev()],
1295 [root.rev()]))
1296 [root.rev()]))
1296 if emptyrebase:
1297 if emptyrebase:
1297 return None
1298 return None
1298 for rev in sorted(state):
1299 for rev in sorted(state):
1299 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1300 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1300 # if all parents of this revision are done, then so is this revision
1301 # if all parents of this revision are done, then so is this revision
1301 if parents and all((state.get(p) == p for p in parents)):
1302 if parents and all((state.get(p) == p for p in parents)):
1302 state[rev] = rev
1303 state[rev] = rev
1303 for r in detachset:
1304 for r in detachset:
1304 if r not in state:
1305 if r not in state:
1305 state[r] = nullmerge
1306 state[r] = nullmerge
1306 if len(roots) > 1:
1307 if len(roots) > 1:
1307 # If we have multiple roots, we may have "hole" in the rebase set.
1308 # If we have multiple roots, we may have "hole" in the rebase set.
1308 # Rebase roots that descend from those "hole" should not be detached as
1309 # Rebase roots that descend from those "hole" should not be detached as
1309 # other root are. We use the special `revignored` to inform rebase that
1310 # other root are. We use the special `revignored` to inform rebase that
1310 # the revision should be ignored but that `defineparents` should search
1311 # the revision should be ignored but that `defineparents` should search
1311 # a rebase destination that make sense regarding rebased topology.
1312 # a rebase destination that make sense regarding rebased topology.
1312 rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset))
1313 rebasedomain = set(repo.revs('%ld::%ld', rebaseset, rebaseset))
1313 for ignored in set(rebasedomain) - set(rebaseset):
1314 for ignored in set(rebasedomain) - set(rebaseset):
1314 state[ignored] = revignored
1315 state[ignored] = revignored
1315 for r in obsoletenotrebased:
1316 for r in obsoletenotrebased:
1316 if obsoletenotrebased[r] is None:
1317 if obsoletenotrebased[r] is None:
1317 state[r] = revpruned
1318 state[r] = revpruned
1318 else:
1319 else:
1319 state[r] = revprecursor
1320 state[r] = revprecursor
1320 return originalwd, dest.rev(), state
1321 return originalwd, dest.rev(), state
1321
1322
1322 def clearrebased(ui, repo, state, skipped, collapsedas=None):
1323 def clearrebased(ui, repo, state, skipped, collapsedas=None):
1323 """dispose of rebased revision at the end of the rebase
1324 """dispose of rebased revision at the end of the rebase
1324
1325
1325 If `collapsedas` is not None, the rebase was a collapse whose result if the
1326 If `collapsedas` is not None, the rebase was a collapse whose result if the
1326 `collapsedas` node."""
1327 `collapsedas` node."""
1327 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1328 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1328 markers = []
1329 markers = []
1329 for rev, newrev in sorted(state.items()):
1330 for rev, newrev in sorted(state.items()):
1330 if newrev >= 0 and newrev != rev:
1331 if newrev >= 0 and newrev != rev:
1331 if rev in skipped:
1332 if rev in skipped:
1332 succs = ()
1333 succs = ()
1333 elif collapsedas is not None:
1334 elif collapsedas is not None:
1334 succs = (repo[collapsedas],)
1335 succs = (repo[collapsedas],)
1335 else:
1336 else:
1336 succs = (repo[newrev],)
1337 succs = (repo[newrev],)
1337 markers.append((repo[rev], succs))
1338 markers.append((repo[rev], succs))
1338 if markers:
1339 if markers:
1339 obsolete.createmarkers(repo, markers, operation='rebase')
1340 obsolete.createmarkers(repo, markers, operation='rebase')
1340 else:
1341 else:
1341 rebased = [rev for rev in state
1342 rebased = [rev for rev in state
1342 if state[rev] > nullmerge and state[rev] != rev]
1343 if state[rev] > nullmerge and state[rev] != rev]
1343 if rebased:
1344 if rebased:
1344 stripped = []
1345 stripped = []
1345 for root in repo.set('roots(%ld)', rebased):
1346 for root in repo.set('roots(%ld)', rebased):
1346 if set(repo.changelog.descendants([root.rev()])) - set(state):
1347 if set(repo.changelog.descendants([root.rev()])) - set(state):
1347 ui.warn(_("warning: new changesets detected "
1348 ui.warn(_("warning: new changesets detected "
1348 "on source branch, not stripping\n"))
1349 "on source branch, not stripping\n"))
1349 else:
1350 else:
1350 stripped.append(root.node())
1351 stripped.append(root.node())
1351 if stripped:
1352 if stripped:
1352 # backup the old csets by default
1353 # backup the old csets by default
1353 repair.strip(ui, repo, stripped, "all")
1354 repair.strip(ui, repo, stripped, "all")
1354
1355
1355
1356
1356 def pullrebase(orig, ui, repo, *args, **opts):
1357 def pullrebase(orig, ui, repo, *args, **opts):
1357 'Call rebase after pull if the latter has been invoked with --rebase'
1358 'Call rebase after pull if the latter has been invoked with --rebase'
1358 ret = None
1359 ret = None
1359 if opts.get('rebase'):
1360 if opts.get('rebase'):
1360 if ui.configbool('commands', 'rebase.requiredest'):
1361 if ui.configbool('commands', 'rebase.requiredest'):
1361 msg = _('rebase destination required by configuration')
1362 msg = _('rebase destination required by configuration')
1362 hint = _('use hg pull followed by hg rebase -d DEST')
1363 hint = _('use hg pull followed by hg rebase -d DEST')
1363 raise error.Abort(msg, hint=hint)
1364 raise error.Abort(msg, hint=hint)
1364
1365
1365 with repo.wlock(), repo.lock():
1366 with repo.wlock(), repo.lock():
1366 if opts.get('update'):
1367 if opts.get('update'):
1367 del opts['update']
1368 del opts['update']
1368 ui.debug('--update and --rebase are not compatible, ignoring '
1369 ui.debug('--update and --rebase are not compatible, ignoring '
1369 'the update flag\n')
1370 'the update flag\n')
1370
1371
1371 cmdutil.checkunfinished(repo)
1372 cmdutil.checkunfinished(repo)
1372 cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
1373 cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
1373 'please commit or shelve your changes first'))
1374 'please commit or shelve your changes first'))
1374
1375
1375 revsprepull = len(repo)
1376 revsprepull = len(repo)
1376 origpostincoming = commands.postincoming
1377 origpostincoming = commands.postincoming
1377 def _dummy(*args, **kwargs):
1378 def _dummy(*args, **kwargs):
1378 pass
1379 pass
1379 commands.postincoming = _dummy
1380 commands.postincoming = _dummy
1380 try:
1381 try:
1381 ret = orig(ui, repo, *args, **opts)
1382 ret = orig(ui, repo, *args, **opts)
1382 finally:
1383 finally:
1383 commands.postincoming = origpostincoming
1384 commands.postincoming = origpostincoming
1384 revspostpull = len(repo)
1385 revspostpull = len(repo)
1385 if revspostpull > revsprepull:
1386 if revspostpull > revsprepull:
1386 # --rev option from pull conflict with rebase own --rev
1387 # --rev option from pull conflict with rebase own --rev
1387 # dropping it
1388 # dropping it
1388 if 'rev' in opts:
1389 if 'rev' in opts:
1389 del opts['rev']
1390 del opts['rev']
1390 # positional argument from pull conflicts with rebase's own
1391 # positional argument from pull conflicts with rebase's own
1391 # --source.
1392 # --source.
1392 if 'source' in opts:
1393 if 'source' in opts:
1393 del opts['source']
1394 del opts['source']
1394 # revsprepull is the len of the repo, not revnum of tip.
1395 # revsprepull is the len of the repo, not revnum of tip.
1395 destspace = list(repo.changelog.revs(start=revsprepull))
1396 destspace = list(repo.changelog.revs(start=revsprepull))
1396 opts['_destspace'] = destspace
1397 opts['_destspace'] = destspace
1397 try:
1398 try:
1398 rebase(ui, repo, **opts)
1399 rebase(ui, repo, **opts)
1399 except error.NoMergeDestAbort:
1400 except error.NoMergeDestAbort:
1400 # we can maybe update instead
1401 # we can maybe update instead
1401 rev, _a, _b = destutil.destupdate(repo)
1402 rev, _a, _b = destutil.destupdate(repo)
1402 if rev == repo['.'].rev():
1403 if rev == repo['.'].rev():
1403 ui.status(_('nothing to rebase\n'))
1404 ui.status(_('nothing to rebase\n'))
1404 else:
1405 else:
1405 ui.status(_('nothing to rebase - updating instead\n'))
1406 ui.status(_('nothing to rebase - updating instead\n'))
1406 # not passing argument to get the bare update behavior
1407 # not passing argument to get the bare update behavior
1407 # with warning and trumpets
1408 # with warning and trumpets
1408 commands.update(ui, repo)
1409 commands.update(ui, repo)
1409 else:
1410 else:
1410 if opts.get('tool'):
1411 if opts.get('tool'):
1411 raise error.Abort(_('--tool can only be used with --rebase'))
1412 raise error.Abort(_('--tool can only be used with --rebase'))
1412 ret = orig(ui, repo, *args, **opts)
1413 ret = orig(ui, repo, *args, **opts)
1413
1414
1414 return ret
1415 return ret
1415
1416
1416 def _setrebasesetvisibility(repo, revs):
1417 def _setrebasesetvisibility(repo, revs):
1417 """store the currently rebased set on the repo object
1418 """store the currently rebased set on the repo object
1418
1419
1419 This is used by another function to prevent rebased revision to because
1420 This is used by another function to prevent rebased revision to because
1420 hidden (see issue4504)"""
1421 hidden (see issue4504)"""
1421 repo = repo.unfiltered()
1422 repo = repo.unfiltered()
1422 repo._rebaseset = revs
1423 repo._rebaseset = revs
1423 # invalidate cache if visibility changes
1424 # invalidate cache if visibility changes
1424 hiddens = repo.filteredrevcache.get('visible', set())
1425 hiddens = repo.filteredrevcache.get('visible', set())
1425 if revs & hiddens:
1426 if revs & hiddens:
1426 repo.invalidatevolatilesets()
1427 repo.invalidatevolatilesets()
1427
1428
1428 def _clearrebasesetvisibiliy(repo):
1429 def _clearrebasesetvisibiliy(repo):
1429 """remove rebaseset data from the repo"""
1430 """remove rebaseset data from the repo"""
1430 repo = repo.unfiltered()
1431 repo = repo.unfiltered()
1431 if '_rebaseset' in vars(repo):
1432 if '_rebaseset' in vars(repo):
1432 del repo._rebaseset
1433 del repo._rebaseset
1433
1434
1434 def _rebasedvisible(orig, repo):
1435 def _rebasedvisible(orig, repo):
1435 """ensure rebased revs stay visible (see issue4504)"""
1436 """ensure rebased revs stay visible (see issue4504)"""
1436 blockers = orig(repo)
1437 blockers = orig(repo)
1437 blockers.update(getattr(repo, '_rebaseset', ()))
1438 blockers.update(getattr(repo, '_rebaseset', ()))
1438 return blockers
1439 return blockers
1439
1440
1440 def _filterobsoleterevs(repo, revs):
1441 def _filterobsoleterevs(repo, revs):
1441 """returns a set of the obsolete revisions in revs"""
1442 """returns a set of the obsolete revisions in revs"""
1442 return set(r for r in revs if repo[r].obsolete())
1443 return set(r for r in revs if repo[r].obsolete())
1443
1444
1444 def _computeobsoletenotrebased(repo, rebaseobsrevs, dest):
1445 def _computeobsoletenotrebased(repo, rebaseobsrevs, dest):
1445 """return a mapping obsolete => successor for all obsolete nodes to be
1446 """return a mapping obsolete => successor for all obsolete nodes to be
1446 rebased that have a successors in the destination
1447 rebased that have a successors in the destination
1447
1448
1448 obsolete => None entries in the mapping indicate nodes with no successor"""
1449 obsolete => None entries in the mapping indicate nodes with no successor"""
1449 obsoletenotrebased = {}
1450 obsoletenotrebased = {}
1450
1451
1451 # Build a mapping successor => obsolete nodes for the obsolete
1452 # Build a mapping successor => obsolete nodes for the obsolete
1452 # nodes to be rebased
1453 # nodes to be rebased
1453 allsuccessors = {}
1454 allsuccessors = {}
1454 cl = repo.changelog
1455 cl = repo.changelog
1455 for r in rebaseobsrevs:
1456 for r in rebaseobsrevs:
1456 node = cl.node(r)
1457 node = cl.node(r)
1457 for s in obsolete.allsuccessors(repo.obsstore, [node]):
1458 for s in obsutil.allsuccessors(repo.obsstore, [node]):
1458 try:
1459 try:
1459 allsuccessors[cl.rev(s)] = cl.rev(node)
1460 allsuccessors[cl.rev(s)] = cl.rev(node)
1460 except LookupError:
1461 except LookupError:
1461 pass
1462 pass
1462
1463
1463 if allsuccessors:
1464 if allsuccessors:
1464 # Look for successors of obsolete nodes to be rebased among
1465 # Look for successors of obsolete nodes to be rebased among
1465 # the ancestors of dest
1466 # the ancestors of dest
1466 ancs = cl.ancestors([repo[dest].rev()],
1467 ancs = cl.ancestors([repo[dest].rev()],
1467 stoprev=min(allsuccessors),
1468 stoprev=min(allsuccessors),
1468 inclusive=True)
1469 inclusive=True)
1469 for s in allsuccessors:
1470 for s in allsuccessors:
1470 if s in ancs:
1471 if s in ancs:
1471 obsoletenotrebased[allsuccessors[s]] = s
1472 obsoletenotrebased[allsuccessors[s]] = s
1472 elif (s == allsuccessors[s] and
1473 elif (s == allsuccessors[s] and
1473 allsuccessors.values().count(s) == 1):
1474 allsuccessors.values().count(s) == 1):
1474 # plain prune
1475 # plain prune
1475 obsoletenotrebased[s] = None
1476 obsoletenotrebased[s] = None
1476
1477
1477 return obsoletenotrebased
1478 return obsoletenotrebased
1478
1479
1479 def summaryhook(ui, repo):
1480 def summaryhook(ui, repo):
1480 if not repo.vfs.exists('rebasestate'):
1481 if not repo.vfs.exists('rebasestate'):
1481 return
1482 return
1482 try:
1483 try:
1483 rbsrt = rebaseruntime(repo, ui, {})
1484 rbsrt = rebaseruntime(repo, ui, {})
1484 rbsrt.restorestatus()
1485 rbsrt.restorestatus()
1485 state = rbsrt.state
1486 state = rbsrt.state
1486 except error.RepoLookupError:
1487 except error.RepoLookupError:
1487 # i18n: column positioning for "hg summary"
1488 # i18n: column positioning for "hg summary"
1488 msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
1489 msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
1489 ui.write(msg)
1490 ui.write(msg)
1490 return
1491 return
1491 numrebased = len([i for i in state.itervalues() if i >= 0])
1492 numrebased = len([i for i in state.itervalues() if i >= 0])
1492 # i18n: column positioning for "hg summary"
1493 # i18n: column positioning for "hg summary"
1493 ui.write(_('rebase: %s, %s (rebase --continue)\n') %
1494 ui.write(_('rebase: %s, %s (rebase --continue)\n') %
1494 (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
1495 (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
1495 ui.label(_('%d remaining'), 'rebase.remaining') %
1496 ui.label(_('%d remaining'), 'rebase.remaining') %
1496 (len(state) - numrebased)))
1497 (len(state) - numrebased)))
1497
1498
1498 def uisetup(ui):
1499 def uisetup(ui):
1499 #Replace pull with a decorator to provide --rebase option
1500 #Replace pull with a decorator to provide --rebase option
1500 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
1501 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
1501 entry[1].append(('', 'rebase', None,
1502 entry[1].append(('', 'rebase', None,
1502 _("rebase working directory to branch head")))
1503 _("rebase working directory to branch head")))
1503 entry[1].append(('t', 'tool', '',
1504 entry[1].append(('t', 'tool', '',
1504 _("specify merge tool for rebase")))
1505 _("specify merge tool for rebase")))
1505 cmdutil.summaryhooks.add('rebase', summaryhook)
1506 cmdutil.summaryhooks.add('rebase', summaryhook)
1506 cmdutil.unfinishedstates.append(
1507 cmdutil.unfinishedstates.append(
1507 ['rebasestate', False, False, _('rebase in progress'),
1508 ['rebasestate', False, False, _('rebase in progress'),
1508 _("use 'hg rebase --continue' or 'hg rebase --abort'")])
1509 _("use 'hg rebase --continue' or 'hg rebase --abort'")])
1509 cmdutil.afterresolvedstates.append(
1510 cmdutil.afterresolvedstates.append(
1510 ['rebasestate', _('hg rebase --continue')])
1511 ['rebasestate', _('hg rebase --continue')])
1511 # ensure rebased rev are not hidden
1512 # ensure rebased rev are not hidden
1512 extensions.wrapfunction(repoview, 'pinnedrevs', _rebasedvisible)
1513 extensions.wrapfunction(repoview, 'pinnedrevs', _rebasedvisible)
@@ -1,1131 +1,1115 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete marker handling
9 """Obsolete marker handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewrite operations, and help
17 transformations performed by history rewrite operations, and help
18 building new tools to reconcile conflicting rewrite actions. To
18 building new tools to reconcile conflicting rewrite actions. To
19 facilitate conflict resolution, markers include various annotations
19 facilitate conflict resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called a "precursor" and possible
23 The old obsoleted changeset is called a "precursor" and possible
24 replacements are called "successors". Markers that used changeset X as
24 replacements are called "successors". Markers that used changeset X as
25 a precursor are called "successor markers of X" because they hold
25 a precursor are called "successor markers of X" because they hold
26 information about the successors of X. Markers that use changeset Y as
26 information about the successors of X. Markers that use changeset Y as
27 a successors are call "precursor markers of Y" because they hold
27 a successors are call "precursor markers of Y" because they hold
28 information about the precursors of Y.
28 information about the precursors of Y.
29
29
30 Examples:
30 Examples:
31
31
32 - When changeset A is replaced by changeset A', one marker is stored:
32 - When changeset A is replaced by changeset A', one marker is stored:
33
33
34 (A, (A',))
34 (A, (A',))
35
35
36 - When changesets A and B are folded into a new changeset C, two markers are
36 - When changesets A and B are folded into a new changeset C, two markers are
37 stored:
37 stored:
38
38
39 (A, (C,)) and (B, (C,))
39 (A, (C,)) and (B, (C,))
40
40
41 - When changeset A is simply "pruned" from the graph, a marker is created:
41 - When changeset A is simply "pruned" from the graph, a marker is created:
42
42
43 (A, ())
43 (A, ())
44
44
45 - When changeset A is split into B and C, a single marker is used:
45 - When changeset A is split into B and C, a single marker is used:
46
46
47 (A, (B, C))
47 (A, (B, C))
48
48
49 We use a single marker to distinguish the "split" case from the "divergence"
49 We use a single marker to distinguish the "split" case from the "divergence"
50 case. If two independent operations rewrite the same changeset A in to A' and
50 case. If two independent operations rewrite the same changeset A in to A' and
51 A'', we have an error case: divergent rewriting. We can detect it because
51 A'', we have an error case: divergent rewriting. We can detect it because
52 two markers will be created independently:
52 two markers will be created independently:
53
53
54 (A, (B,)) and (A, (C,))
54 (A, (B,)) and (A, (C,))
55
55
56 Format
56 Format
57 ------
57 ------
58
58
59 Markers are stored in an append-only file stored in
59 Markers are stored in an append-only file stored in
60 '.hg/store/obsstore'.
60 '.hg/store/obsstore'.
61
61
62 The file starts with a version header:
62 The file starts with a version header:
63
63
64 - 1 unsigned byte: version number, starting at zero.
64 - 1 unsigned byte: version number, starting at zero.
65
65
66 The header is followed by the markers. Marker format depend of the version. See
66 The header is followed by the markers. Marker format depend of the version. See
67 comment associated with each format for details.
67 comment associated with each format for details.
68
68
69 """
69 """
70 from __future__ import absolute_import
70 from __future__ import absolute_import
71
71
72 import errno
72 import errno
73 import struct
73 import struct
74
74
75 from .i18n import _
75 from .i18n import _
76 from . import (
76 from . import (
77 error,
77 error,
78 node,
78 node,
79 obsutil,
79 obsutil,
80 phases,
80 phases,
81 policy,
81 policy,
82 util,
82 util,
83 )
83 )
84
84
85 parsers = policy.importmod(r'parsers')
85 parsers = policy.importmod(r'parsers')
86
86
87 _pack = struct.pack
87 _pack = struct.pack
88 _unpack = struct.unpack
88 _unpack = struct.unpack
89 _calcsize = struct.calcsize
89 _calcsize = struct.calcsize
90 propertycache = util.propertycache
90 propertycache = util.propertycache
91
91
92 # the obsolete feature is not mature enough to be enabled by default.
92 # the obsolete feature is not mature enough to be enabled by default.
93 # you have to rely on third party extension extension to enable this.
93 # you have to rely on third party extension extension to enable this.
94 _enabled = False
94 _enabled = False
95
95
96 # Options for obsolescence
96 # Options for obsolescence
97 createmarkersopt = 'createmarkers'
97 createmarkersopt = 'createmarkers'
98 allowunstableopt = 'allowunstable'
98 allowunstableopt = 'allowunstable'
99 exchangeopt = 'exchange'
99 exchangeopt = 'exchange'
100
100
101 def isenabled(repo, option):
101 def isenabled(repo, option):
102 """Returns True if the given repository has the given obsolete option
102 """Returns True if the given repository has the given obsolete option
103 enabled.
103 enabled.
104 """
104 """
105 result = set(repo.ui.configlist('experimental', 'evolution'))
105 result = set(repo.ui.configlist('experimental', 'evolution'))
106 if 'all' in result:
106 if 'all' in result:
107 return True
107 return True
108
108
109 # For migration purposes, temporarily return true if the config hasn't been
109 # For migration purposes, temporarily return true if the config hasn't been
110 # set but _enabled is true.
110 # set but _enabled is true.
111 if len(result) == 0 and _enabled:
111 if len(result) == 0 and _enabled:
112 return True
112 return True
113
113
114 # createmarkers must be enabled if other options are enabled
114 # createmarkers must be enabled if other options are enabled
115 if ((allowunstableopt in result or exchangeopt in result) and
115 if ((allowunstableopt in result or exchangeopt in result) and
116 not createmarkersopt in result):
116 not createmarkersopt in result):
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 "if other obsolete options are enabled"))
118 "if other obsolete options are enabled"))
119
119
120 return option in result
120 return option in result
121
121
122 ### obsolescence marker flag
122 ### obsolescence marker flag
123
123
124 ## bumpedfix flag
124 ## bumpedfix flag
125 #
125 #
126 # When a changeset A' succeed to a changeset A which became public, we call A'
126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 # "bumped" because it's a successors of a public changesets
127 # "bumped" because it's a successors of a public changesets
128 #
128 #
129 # o A' (bumped)
129 # o A' (bumped)
130 # |`:
130 # |`:
131 # | o A
131 # | o A
132 # |/
132 # |/
133 # o Z
133 # o Z
134 #
134 #
135 # The way to solve this situation is to create a new changeset Ad as children
135 # The way to solve this situation is to create a new changeset Ad as children
136 # of A. This changeset have the same content than A'. So the diff from A to A'
136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 #
138 #
139 # o Ad
139 # o Ad
140 # |`:
140 # |`:
141 # | x A'
141 # | x A'
142 # |'|
142 # |'|
143 # o | A
143 # o | A
144 # |/
144 # |/
145 # o Z
145 # o Z
146 #
146 #
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 # This flag mean that the successors express the changes between the public and
149 # This flag mean that the successors express the changes between the public and
150 # bumped version and fix the situation, breaking the transitivity of
150 # bumped version and fix the situation, breaking the transitivity of
151 # "bumped" here.
151 # "bumped" here.
152 bumpedfix = 1
152 bumpedfix = 1
153 usingsha256 = 2
153 usingsha256 = 2
154
154
155 ## Parsing and writing of version "0"
155 ## Parsing and writing of version "0"
156 #
156 #
157 # The header is followed by the markers. Each marker is made of:
157 # The header is followed by the markers. Each marker is made of:
158 #
158 #
159 # - 1 uint8 : number of new changesets "N", can be zero.
159 # - 1 uint8 : number of new changesets "N", can be zero.
160 #
160 #
161 # - 1 uint32: metadata size "M" in bytes.
161 # - 1 uint32: metadata size "M" in bytes.
162 #
162 #
163 # - 1 byte: a bit field. It is reserved for flags used in common
163 # - 1 byte: a bit field. It is reserved for flags used in common
164 # obsolete marker operations, to avoid repeated decoding of metadata
164 # obsolete marker operations, to avoid repeated decoding of metadata
165 # entries.
165 # entries.
166 #
166 #
167 # - 20 bytes: obsoleted changeset identifier.
167 # - 20 bytes: obsoleted changeset identifier.
168 #
168 #
169 # - N*20 bytes: new changesets identifiers.
169 # - N*20 bytes: new changesets identifiers.
170 #
170 #
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 # string contains a key and a value, separated by a colon ':', without
172 # string contains a key and a value, separated by a colon ':', without
173 # additional encoding. Keys cannot contain '\0' or ':' and values
173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 # cannot contain '\0'.
174 # cannot contain '\0'.
175 _fm0version = 0
175 _fm0version = 0
176 _fm0fixed = '>BIB20s'
176 _fm0fixed = '>BIB20s'
177 _fm0node = '20s'
177 _fm0node = '20s'
178 _fm0fsize = _calcsize(_fm0fixed)
178 _fm0fsize = _calcsize(_fm0fixed)
179 _fm0fnodesize = _calcsize(_fm0node)
179 _fm0fnodesize = _calcsize(_fm0node)
180
180
181 def _fm0readmarkers(data, off):
181 def _fm0readmarkers(data, off):
182 # Loop on markers
182 # Loop on markers
183 l = len(data)
183 l = len(data)
184 while off + _fm0fsize <= l:
184 while off + _fm0fsize <= l:
185 # read fixed part
185 # read fixed part
186 cur = data[off:off + _fm0fsize]
186 cur = data[off:off + _fm0fsize]
187 off += _fm0fsize
187 off += _fm0fsize
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
189 # read replacement
189 # read replacement
190 sucs = ()
190 sucs = ()
191 if numsuc:
191 if numsuc:
192 s = (_fm0fnodesize * numsuc)
192 s = (_fm0fnodesize * numsuc)
193 cur = data[off:off + s]
193 cur = data[off:off + s]
194 sucs = _unpack(_fm0node * numsuc, cur)
194 sucs = _unpack(_fm0node * numsuc, cur)
195 off += s
195 off += s
196 # read metadata
196 # read metadata
197 # (metadata will be decoded on demand)
197 # (metadata will be decoded on demand)
198 metadata = data[off:off + mdsize]
198 metadata = data[off:off + mdsize]
199 if len(metadata) != mdsize:
199 if len(metadata) != mdsize:
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 raise error.Abort(_('parsing obsolete marker: metadata is too '
201 'short, %d bytes expected, got %d')
201 'short, %d bytes expected, got %d')
202 % (mdsize, len(metadata)))
202 % (mdsize, len(metadata)))
203 off += mdsize
203 off += mdsize
204 metadata = _fm0decodemeta(metadata)
204 metadata = _fm0decodemeta(metadata)
205 try:
205 try:
206 when, offset = metadata.pop('date', '0 0').split(' ')
206 when, offset = metadata.pop('date', '0 0').split(' ')
207 date = float(when), int(offset)
207 date = float(when), int(offset)
208 except ValueError:
208 except ValueError:
209 date = (0., 0)
209 date = (0., 0)
210 parents = None
210 parents = None
211 if 'p2' in metadata:
211 if 'p2' in metadata:
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
213 elif 'p1' in metadata:
213 elif 'p1' in metadata:
214 parents = (metadata.pop('p1', None),)
214 parents = (metadata.pop('p1', None),)
215 elif 'p0' in metadata:
215 elif 'p0' in metadata:
216 parents = ()
216 parents = ()
217 if parents is not None:
217 if parents is not None:
218 try:
218 try:
219 parents = tuple(node.bin(p) for p in parents)
219 parents = tuple(node.bin(p) for p in parents)
220 # if parent content is not a nodeid, drop the data
220 # if parent content is not a nodeid, drop the data
221 for p in parents:
221 for p in parents:
222 if len(p) != 20:
222 if len(p) != 20:
223 parents = None
223 parents = None
224 break
224 break
225 except TypeError:
225 except TypeError:
226 # if content cannot be translated to nodeid drop the data.
226 # if content cannot be translated to nodeid drop the data.
227 parents = None
227 parents = None
228
228
229 metadata = tuple(sorted(metadata.iteritems()))
229 metadata = tuple(sorted(metadata.iteritems()))
230
230
231 yield (pre, sucs, flags, metadata, date, parents)
231 yield (pre, sucs, flags, metadata, date, parents)
232
232
233 def _fm0encodeonemarker(marker):
233 def _fm0encodeonemarker(marker):
234 pre, sucs, flags, metadata, date, parents = marker
234 pre, sucs, flags, metadata, date, parents = marker
235 if flags & usingsha256:
235 if flags & usingsha256:
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
237 metadata = dict(metadata)
237 metadata = dict(metadata)
238 time, tz = date
238 time, tz = date
239 metadata['date'] = '%r %i' % (time, tz)
239 metadata['date'] = '%r %i' % (time, tz)
240 if parents is not None:
240 if parents is not None:
241 if not parents:
241 if not parents:
242 # mark that we explicitly recorded no parents
242 # mark that we explicitly recorded no parents
243 metadata['p0'] = ''
243 metadata['p0'] = ''
244 for i, p in enumerate(parents, 1):
244 for i, p in enumerate(parents, 1):
245 metadata['p%i' % i] = node.hex(p)
245 metadata['p%i' % i] = node.hex(p)
246 metadata = _fm0encodemeta(metadata)
246 metadata = _fm0encodemeta(metadata)
247 numsuc = len(sucs)
247 numsuc = len(sucs)
248 format = _fm0fixed + (_fm0node * numsuc)
248 format = _fm0fixed + (_fm0node * numsuc)
249 data = [numsuc, len(metadata), flags, pre]
249 data = [numsuc, len(metadata), flags, pre]
250 data.extend(sucs)
250 data.extend(sucs)
251 return _pack(format, *data) + metadata
251 return _pack(format, *data) + metadata
252
252
253 def _fm0encodemeta(meta):
253 def _fm0encodemeta(meta):
254 """Return encoded metadata string to string mapping.
254 """Return encoded metadata string to string mapping.
255
255
256 Assume no ':' in key and no '\0' in both key and value."""
256 Assume no ':' in key and no '\0' in both key and value."""
257 for key, value in meta.iteritems():
257 for key, value in meta.iteritems():
258 if ':' in key or '\0' in key:
258 if ':' in key or '\0' in key:
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 raise ValueError("':' and '\0' are forbidden in metadata key'")
260 if '\0' in value:
260 if '\0' in value:
261 raise ValueError("':' is forbidden in metadata value'")
261 raise ValueError("':' is forbidden in metadata value'")
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
263
263
264 def _fm0decodemeta(data):
264 def _fm0decodemeta(data):
265 """Return string to string dictionary from encoded version."""
265 """Return string to string dictionary from encoded version."""
266 d = {}
266 d = {}
267 for l in data.split('\0'):
267 for l in data.split('\0'):
268 if l:
268 if l:
269 key, value = l.split(':')
269 key, value = l.split(':')
270 d[key] = value
270 d[key] = value
271 return d
271 return d
272
272
273 ## Parsing and writing of version "1"
273 ## Parsing and writing of version "1"
274 #
274 #
275 # The header is followed by the markers. Each marker is made of:
275 # The header is followed by the markers. Each marker is made of:
276 #
276 #
277 # - uint32: total size of the marker (including this field)
277 # - uint32: total size of the marker (including this field)
278 #
278 #
279 # - float64: date in seconds since epoch
279 # - float64: date in seconds since epoch
280 #
280 #
281 # - int16: timezone offset in minutes
281 # - int16: timezone offset in minutes
282 #
282 #
283 # - uint16: a bit field. It is reserved for flags used in common
283 # - uint16: a bit field. It is reserved for flags used in common
284 # obsolete marker operations, to avoid repeated decoding of metadata
284 # obsolete marker operations, to avoid repeated decoding of metadata
285 # entries.
285 # entries.
286 #
286 #
287 # - uint8: number of successors "N", can be zero.
287 # - uint8: number of successors "N", can be zero.
288 #
288 #
289 # - uint8: number of parents "P", can be zero.
289 # - uint8: number of parents "P", can be zero.
290 #
290 #
291 # 0: parents data stored but no parent,
291 # 0: parents data stored but no parent,
292 # 1: one parent stored,
292 # 1: one parent stored,
293 # 2: two parents stored,
293 # 2: two parents stored,
294 # 3: no parent data stored
294 # 3: no parent data stored
295 #
295 #
296 # - uint8: number of metadata entries M
296 # - uint8: number of metadata entries M
297 #
297 #
298 # - 20 or 32 bytes: precursor changeset identifier.
298 # - 20 or 32 bytes: precursor changeset identifier.
299 #
299 #
300 # - N*(20 or 32) bytes: successors changesets identifiers.
300 # - N*(20 or 32) bytes: successors changesets identifiers.
301 #
301 #
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
302 # - P*(20 or 32) bytes: parents of the precursors changesets.
303 #
303 #
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 # - M*(uint8, uint8): size of all metadata entries (key and value)
305 #
305 #
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 # - remaining bytes: the metadata, each (key, value) pair after the other.
307 _fm1version = 1
307 _fm1version = 1
308 _fm1fixed = '>IdhHBBB20s'
308 _fm1fixed = '>IdhHBBB20s'
309 _fm1nodesha1 = '20s'
309 _fm1nodesha1 = '20s'
310 _fm1nodesha256 = '32s'
310 _fm1nodesha256 = '32s'
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 _fm1nodesha1size = _calcsize(_fm1nodesha1)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 _fm1nodesha256size = _calcsize(_fm1nodesha256)
313 _fm1fsize = _calcsize(_fm1fixed)
313 _fm1fsize = _calcsize(_fm1fixed)
314 _fm1parentnone = 3
314 _fm1parentnone = 3
315 _fm1parentshift = 14
315 _fm1parentshift = 14
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
317 _fm1metapair = 'BB'
317 _fm1metapair = 'BB'
318 _fm1metapairsize = _calcsize('BB')
318 _fm1metapairsize = _calcsize('BB')
319
319
320 def _fm1purereadmarkers(data, off):
320 def _fm1purereadmarkers(data, off):
321 # make some global constants local for performance
321 # make some global constants local for performance
322 noneflag = _fm1parentnone
322 noneflag = _fm1parentnone
323 sha2flag = usingsha256
323 sha2flag = usingsha256
324 sha1size = _fm1nodesha1size
324 sha1size = _fm1nodesha1size
325 sha2size = _fm1nodesha256size
325 sha2size = _fm1nodesha256size
326 sha1fmt = _fm1nodesha1
326 sha1fmt = _fm1nodesha1
327 sha2fmt = _fm1nodesha256
327 sha2fmt = _fm1nodesha256
328 metasize = _fm1metapairsize
328 metasize = _fm1metapairsize
329 metafmt = _fm1metapair
329 metafmt = _fm1metapair
330 fsize = _fm1fsize
330 fsize = _fm1fsize
331 unpack = _unpack
331 unpack = _unpack
332
332
333 # Loop on markers
333 # Loop on markers
334 stop = len(data) - _fm1fsize
334 stop = len(data) - _fm1fsize
335 ufixed = struct.Struct(_fm1fixed).unpack
335 ufixed = struct.Struct(_fm1fixed).unpack
336
336
337 while off <= stop:
337 while off <= stop:
338 # read fixed part
338 # read fixed part
339 o1 = off + fsize
339 o1 = off + fsize
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
340 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
341
341
342 if flags & sha2flag:
342 if flags & sha2flag:
343 # FIXME: prec was read as a SHA1, needs to be amended
343 # FIXME: prec was read as a SHA1, needs to be amended
344
344
345 # read 0 or more successors
345 # read 0 or more successors
346 if numsuc == 1:
346 if numsuc == 1:
347 o2 = o1 + sha2size
347 o2 = o1 + sha2size
348 sucs = (data[o1:o2],)
348 sucs = (data[o1:o2],)
349 else:
349 else:
350 o2 = o1 + sha2size * numsuc
350 o2 = o1 + sha2size * numsuc
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
351 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
352
352
353 # read parents
353 # read parents
354 if numpar == noneflag:
354 if numpar == noneflag:
355 o3 = o2
355 o3 = o2
356 parents = None
356 parents = None
357 elif numpar == 1:
357 elif numpar == 1:
358 o3 = o2 + sha2size
358 o3 = o2 + sha2size
359 parents = (data[o2:o3],)
359 parents = (data[o2:o3],)
360 else:
360 else:
361 o3 = o2 + sha2size * numpar
361 o3 = o2 + sha2size * numpar
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
362 parents = unpack(sha2fmt * numpar, data[o2:o3])
363 else:
363 else:
364 # read 0 or more successors
364 # read 0 or more successors
365 if numsuc == 1:
365 if numsuc == 1:
366 o2 = o1 + sha1size
366 o2 = o1 + sha1size
367 sucs = (data[o1:o2],)
367 sucs = (data[o1:o2],)
368 else:
368 else:
369 o2 = o1 + sha1size * numsuc
369 o2 = o1 + sha1size * numsuc
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
370 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
371
371
372 # read parents
372 # read parents
373 if numpar == noneflag:
373 if numpar == noneflag:
374 o3 = o2
374 o3 = o2
375 parents = None
375 parents = None
376 elif numpar == 1:
376 elif numpar == 1:
377 o3 = o2 + sha1size
377 o3 = o2 + sha1size
378 parents = (data[o2:o3],)
378 parents = (data[o2:o3],)
379 else:
379 else:
380 o3 = o2 + sha1size * numpar
380 o3 = o2 + sha1size * numpar
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
381 parents = unpack(sha1fmt * numpar, data[o2:o3])
382
382
383 # read metadata
383 # read metadata
384 off = o3 + metasize * nummeta
384 off = o3 + metasize * nummeta
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
385 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
386 metadata = []
386 metadata = []
387 for idx in xrange(0, len(metapairsize), 2):
387 for idx in xrange(0, len(metapairsize), 2):
388 o1 = off + metapairsize[idx]
388 o1 = off + metapairsize[idx]
389 o2 = o1 + metapairsize[idx + 1]
389 o2 = o1 + metapairsize[idx + 1]
390 metadata.append((data[off:o1], data[o1:o2]))
390 metadata.append((data[off:o1], data[o1:o2]))
391 off = o2
391 off = o2
392
392
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
393 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
394
394
395 def _fm1encodeonemarker(marker):
395 def _fm1encodeonemarker(marker):
396 pre, sucs, flags, metadata, date, parents = marker
396 pre, sucs, flags, metadata, date, parents = marker
397 # determine node size
397 # determine node size
398 _fm1node = _fm1nodesha1
398 _fm1node = _fm1nodesha1
399 if flags & usingsha256:
399 if flags & usingsha256:
400 _fm1node = _fm1nodesha256
400 _fm1node = _fm1nodesha256
401 numsuc = len(sucs)
401 numsuc = len(sucs)
402 numextranodes = numsuc
402 numextranodes = numsuc
403 if parents is None:
403 if parents is None:
404 numpar = _fm1parentnone
404 numpar = _fm1parentnone
405 else:
405 else:
406 numpar = len(parents)
406 numpar = len(parents)
407 numextranodes += numpar
407 numextranodes += numpar
408 formatnodes = _fm1node * numextranodes
408 formatnodes = _fm1node * numextranodes
409 formatmeta = _fm1metapair * len(metadata)
409 formatmeta = _fm1metapair * len(metadata)
410 format = _fm1fixed + formatnodes + formatmeta
410 format = _fm1fixed + formatnodes + formatmeta
411 # tz is stored in minutes so we divide by 60
411 # tz is stored in minutes so we divide by 60
412 tz = date[1]//60
412 tz = date[1]//60
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
413 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
414 data.extend(sucs)
414 data.extend(sucs)
415 if parents is not None:
415 if parents is not None:
416 data.extend(parents)
416 data.extend(parents)
417 totalsize = _calcsize(format)
417 totalsize = _calcsize(format)
418 for key, value in metadata:
418 for key, value in metadata:
419 lk = len(key)
419 lk = len(key)
420 lv = len(value)
420 lv = len(value)
421 data.append(lk)
421 data.append(lk)
422 data.append(lv)
422 data.append(lv)
423 totalsize += lk + lv
423 totalsize += lk + lv
424 data[0] = totalsize
424 data[0] = totalsize
425 data = [_pack(format, *data)]
425 data = [_pack(format, *data)]
426 for key, value in metadata:
426 for key, value in metadata:
427 data.append(key)
427 data.append(key)
428 data.append(value)
428 data.append(value)
429 return ''.join(data)
429 return ''.join(data)
430
430
431 def _fm1readmarkers(data, off):
431 def _fm1readmarkers(data, off):
432 native = getattr(parsers, 'fm1readmarkers', None)
432 native = getattr(parsers, 'fm1readmarkers', None)
433 if not native:
433 if not native:
434 return _fm1purereadmarkers(data, off)
434 return _fm1purereadmarkers(data, off)
435 stop = len(data) - _fm1fsize
435 stop = len(data) - _fm1fsize
436 return native(data, off, stop)
436 return native(data, off, stop)
437
437
438 # mapping to read/write various marker formats
438 # mapping to read/write various marker formats
439 # <version> -> (decoder, encoder)
439 # <version> -> (decoder, encoder)
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
440 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
441 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
442
442
443 def _readmarkerversion(data):
443 def _readmarkerversion(data):
444 return _unpack('>B', data[0:1])[0]
444 return _unpack('>B', data[0:1])[0]
445
445
446 @util.nogc
446 @util.nogc
447 def _readmarkers(data):
447 def _readmarkers(data):
448 """Read and enumerate markers from raw data"""
448 """Read and enumerate markers from raw data"""
449 diskversion = _readmarkerversion(data)
449 diskversion = _readmarkerversion(data)
450 off = 1
450 off = 1
451 if diskversion not in formats:
451 if diskversion not in formats:
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 raise error.UnknownVersion(msg, version=diskversion)
453 raise error.UnknownVersion(msg, version=diskversion)
454 return diskversion, formats[diskversion][0](data, off)
454 return diskversion, formats[diskversion][0](data, off)
455
455
456 def encodeheader(version=_fm0version):
456 def encodeheader(version=_fm0version):
457 return _pack('>B', version)
457 return _pack('>B', version)
458
458
459 def encodemarkers(markers, addheader=False, version=_fm0version):
459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 # Kept separate from flushmarkers(), it will be reused for
460 # Kept separate from flushmarkers(), it will be reused for
461 # markers exchange.
461 # markers exchange.
462 encodeone = formats[version][1]
462 encodeone = formats[version][1]
463 if addheader:
463 if addheader:
464 yield encodeheader(version)
464 yield encodeheader(version)
465 for marker in markers:
465 for marker in markers:
466 yield encodeone(marker)
466 yield encodeone(marker)
467
467
468
468
469 class marker(object):
469 class marker(object):
470 """Wrap obsolete marker raw data"""
470 """Wrap obsolete marker raw data"""
471
471
472 def __init__(self, repo, data):
472 def __init__(self, repo, data):
473 # the repo argument will be used to create changectx in later version
473 # the repo argument will be used to create changectx in later version
474 self._repo = repo
474 self._repo = repo
475 self._data = data
475 self._data = data
476 self._decodedmeta = None
476 self._decodedmeta = None
477
477
478 def __hash__(self):
478 def __hash__(self):
479 return hash(self._data)
479 return hash(self._data)
480
480
481 def __eq__(self, other):
481 def __eq__(self, other):
482 if type(other) != type(self):
482 if type(other) != type(self):
483 return False
483 return False
484 return self._data == other._data
484 return self._data == other._data
485
485
486 def precnode(self):
486 def precnode(self):
487 """Precursor changeset node identifier"""
487 """Precursor changeset node identifier"""
488 return self._data[0]
488 return self._data[0]
489
489
490 def succnodes(self):
490 def succnodes(self):
491 """List of successor changesets node identifiers"""
491 """List of successor changesets node identifiers"""
492 return self._data[1]
492 return self._data[1]
493
493
494 def parentnodes(self):
494 def parentnodes(self):
495 """Parents of the precursors (None if not recorded)"""
495 """Parents of the precursors (None if not recorded)"""
496 return self._data[5]
496 return self._data[5]
497
497
498 def metadata(self):
498 def metadata(self):
499 """Decoded metadata dictionary"""
499 """Decoded metadata dictionary"""
500 return dict(self._data[3])
500 return dict(self._data[3])
501
501
502 def date(self):
502 def date(self):
503 """Creation date as (unixtime, offset)"""
503 """Creation date as (unixtime, offset)"""
504 return self._data[4]
504 return self._data[4]
505
505
506 def flags(self):
506 def flags(self):
507 """The flags field of the marker"""
507 """The flags field of the marker"""
508 return self._data[2]
508 return self._data[2]
509
509
510 @util.nogc
510 @util.nogc
511 def _addsuccessors(successors, markers):
511 def _addsuccessors(successors, markers):
512 for mark in markers:
512 for mark in markers:
513 successors.setdefault(mark[0], set()).add(mark)
513 successors.setdefault(mark[0], set()).add(mark)
514
514
515 @util.nogc
515 @util.nogc
516 def _addprecursors(precursors, markers):
516 def _addprecursors(precursors, markers):
517 for mark in markers:
517 for mark in markers:
518 for suc in mark[1]:
518 for suc in mark[1]:
519 precursors.setdefault(suc, set()).add(mark)
519 precursors.setdefault(suc, set()).add(mark)
520
520
521 @util.nogc
521 @util.nogc
522 def _addchildren(children, markers):
522 def _addchildren(children, markers):
523 for mark in markers:
523 for mark in markers:
524 parents = mark[5]
524 parents = mark[5]
525 if parents is not None:
525 if parents is not None:
526 for p in parents:
526 for p in parents:
527 children.setdefault(p, set()).add(mark)
527 children.setdefault(p, set()).add(mark)
528
528
529 def _checkinvalidmarkers(markers):
529 def _checkinvalidmarkers(markers):
530 """search for marker with invalid data and raise error if needed
530 """search for marker with invalid data and raise error if needed
531
531
532 Exist as a separated function to allow the evolve extension for a more
532 Exist as a separated function to allow the evolve extension for a more
533 subtle handling.
533 subtle handling.
534 """
534 """
535 for mark in markers:
535 for mark in markers:
536 if node.nullid in mark[1]:
536 if node.nullid in mark[1]:
537 raise error.Abort(_('bad obsolescence marker detected: '
537 raise error.Abort(_('bad obsolescence marker detected: '
538 'invalid successors nullid'))
538 'invalid successors nullid'))
539
539
540 class obsstore(object):
540 class obsstore(object):
541 """Store obsolete markers
541 """Store obsolete markers
542
542
543 Markers can be accessed with two mappings:
543 Markers can be accessed with two mappings:
544 - precursors[x] -> set(markers on precursors edges of x)
544 - precursors[x] -> set(markers on precursors edges of x)
545 - successors[x] -> set(markers on successors edges of x)
545 - successors[x] -> set(markers on successors edges of x)
546 - children[x] -> set(markers on precursors edges of children(x)
546 - children[x] -> set(markers on precursors edges of children(x)
547 """
547 """
548
548
549 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
549 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
550 # prec: nodeid, precursor changesets
550 # prec: nodeid, precursor changesets
551 # succs: tuple of nodeid, successor changesets (0-N length)
551 # succs: tuple of nodeid, successor changesets (0-N length)
552 # flag: integer, flag field carrying modifier for the markers (see doc)
552 # flag: integer, flag field carrying modifier for the markers (see doc)
553 # meta: binary blob, encoded metadata dictionary
553 # meta: binary blob, encoded metadata dictionary
554 # date: (float, int) tuple, date of marker creation
554 # date: (float, int) tuple, date of marker creation
555 # parents: (tuple of nodeid) or None, parents of precursors
555 # parents: (tuple of nodeid) or None, parents of precursors
556 # None is used when no data has been recorded
556 # None is used when no data has been recorded
557
557
558 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
558 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
559 # caches for various obsolescence related cache
559 # caches for various obsolescence related cache
560 self.caches = {}
560 self.caches = {}
561 self.svfs = svfs
561 self.svfs = svfs
562 self._defaultformat = defaultformat
562 self._defaultformat = defaultformat
563 self._readonly = readonly
563 self._readonly = readonly
564
564
565 def __iter__(self):
565 def __iter__(self):
566 return iter(self._all)
566 return iter(self._all)
567
567
568 def __len__(self):
568 def __len__(self):
569 return len(self._all)
569 return len(self._all)
570
570
571 def __nonzero__(self):
571 def __nonzero__(self):
572 if not self._cached('_all'):
572 if not self._cached('_all'):
573 try:
573 try:
574 return self.svfs.stat('obsstore').st_size > 1
574 return self.svfs.stat('obsstore').st_size > 1
575 except OSError as inst:
575 except OSError as inst:
576 if inst.errno != errno.ENOENT:
576 if inst.errno != errno.ENOENT:
577 raise
577 raise
578 # just build an empty _all list if no obsstore exists, which
578 # just build an empty _all list if no obsstore exists, which
579 # avoids further stat() syscalls
579 # avoids further stat() syscalls
580 pass
580 pass
581 return bool(self._all)
581 return bool(self._all)
582
582
583 __bool__ = __nonzero__
583 __bool__ = __nonzero__
584
584
585 @property
585 @property
586 def readonly(self):
586 def readonly(self):
587 """True if marker creation is disabled
587 """True if marker creation is disabled
588
588
589 Remove me in the future when obsolete marker is always on."""
589 Remove me in the future when obsolete marker is always on."""
590 return self._readonly
590 return self._readonly
591
591
592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
592 def create(self, transaction, prec, succs=(), flag=0, parents=None,
593 date=None, metadata=None, ui=None):
593 date=None, metadata=None, ui=None):
594 """obsolete: add a new obsolete marker
594 """obsolete: add a new obsolete marker
595
595
596 * ensuring it is hashable
596 * ensuring it is hashable
597 * check mandatory metadata
597 * check mandatory metadata
598 * encode metadata
598 * encode metadata
599
599
600 If you are a human writing code creating marker you want to use the
600 If you are a human writing code creating marker you want to use the
601 `createmarkers` function in this module instead.
601 `createmarkers` function in this module instead.
602
602
603 return True if a new marker have been added, False if the markers
603 return True if a new marker have been added, False if the markers
604 already existed (no op).
604 already existed (no op).
605 """
605 """
606 if metadata is None:
606 if metadata is None:
607 metadata = {}
607 metadata = {}
608 if date is None:
608 if date is None:
609 if 'date' in metadata:
609 if 'date' in metadata:
610 # as a courtesy for out-of-tree extensions
610 # as a courtesy for out-of-tree extensions
611 date = util.parsedate(metadata.pop('date'))
611 date = util.parsedate(metadata.pop('date'))
612 elif ui is not None:
612 elif ui is not None:
613 date = ui.configdate('devel', 'default-date')
613 date = ui.configdate('devel', 'default-date')
614 if date is None:
614 if date is None:
615 date = util.makedate()
615 date = util.makedate()
616 else:
616 else:
617 date = util.makedate()
617 date = util.makedate()
618 if len(prec) != 20:
618 if len(prec) != 20:
619 raise ValueError(prec)
619 raise ValueError(prec)
620 for succ in succs:
620 for succ in succs:
621 if len(succ) != 20:
621 if len(succ) != 20:
622 raise ValueError(succ)
622 raise ValueError(succ)
623 if prec in succs:
623 if prec in succs:
624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
624 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
625
625
626 metadata = tuple(sorted(metadata.iteritems()))
626 metadata = tuple(sorted(metadata.iteritems()))
627
627
628 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
628 marker = (str(prec), tuple(succs), int(flag), metadata, date, parents)
629 return bool(self.add(transaction, [marker]))
629 return bool(self.add(transaction, [marker]))
630
630
631 def add(self, transaction, markers):
631 def add(self, transaction, markers):
632 """Add new markers to the store
632 """Add new markers to the store
633
633
634 Take care of filtering duplicate.
634 Take care of filtering duplicate.
635 Return the number of new marker."""
635 Return the number of new marker."""
636 if self._readonly:
636 if self._readonly:
637 raise error.Abort(_('creating obsolete markers is not enabled on '
637 raise error.Abort(_('creating obsolete markers is not enabled on '
638 'this repo'))
638 'this repo'))
639 known = set()
639 known = set()
640 getsuccessors = self.successors.get
640 getsuccessors = self.successors.get
641 new = []
641 new = []
642 for m in markers:
642 for m in markers:
643 if m not in getsuccessors(m[0], ()) and m not in known:
643 if m not in getsuccessors(m[0], ()) and m not in known:
644 known.add(m)
644 known.add(m)
645 new.append(m)
645 new.append(m)
646 if new:
646 if new:
647 f = self.svfs('obsstore', 'ab')
647 f = self.svfs('obsstore', 'ab')
648 try:
648 try:
649 offset = f.tell()
649 offset = f.tell()
650 transaction.add('obsstore', offset)
650 transaction.add('obsstore', offset)
651 # offset == 0: new file - add the version header
651 # offset == 0: new file - add the version header
652 for bytes in encodemarkers(new, offset == 0, self._version):
652 for bytes in encodemarkers(new, offset == 0, self._version):
653 f.write(bytes)
653 f.write(bytes)
654 finally:
654 finally:
655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
655 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
656 # call 'filecacheentry.refresh()' here
656 # call 'filecacheentry.refresh()' here
657 f.close()
657 f.close()
658 self._addmarkers(new)
658 self._addmarkers(new)
659 # new marker *may* have changed several set. invalidate the cache.
659 # new marker *may* have changed several set. invalidate the cache.
660 self.caches.clear()
660 self.caches.clear()
661 # records the number of new markers for the transaction hooks
661 # records the number of new markers for the transaction hooks
662 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
662 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
663 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
663 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
664 return len(new)
664 return len(new)
665
665
666 def mergemarkers(self, transaction, data):
666 def mergemarkers(self, transaction, data):
667 """merge a binary stream of markers inside the obsstore
667 """merge a binary stream of markers inside the obsstore
668
668
669 Returns the number of new markers added."""
669 Returns the number of new markers added."""
670 version, markers = _readmarkers(data)
670 version, markers = _readmarkers(data)
671 return self.add(transaction, markers)
671 return self.add(transaction, markers)
672
672
673 @propertycache
673 @propertycache
674 def _data(self):
674 def _data(self):
675 return self.svfs.tryread('obsstore')
675 return self.svfs.tryread('obsstore')
676
676
677 @propertycache
677 @propertycache
678 def _version(self):
678 def _version(self):
679 if len(self._data) >= 1:
679 if len(self._data) >= 1:
680 return _readmarkerversion(self._data)
680 return _readmarkerversion(self._data)
681 else:
681 else:
682 return self._defaultformat
682 return self._defaultformat
683
683
684 @propertycache
684 @propertycache
685 def _all(self):
685 def _all(self):
686 data = self._data
686 data = self._data
687 if not data:
687 if not data:
688 return []
688 return []
689 self._version, markers = _readmarkers(data)
689 self._version, markers = _readmarkers(data)
690 markers = list(markers)
690 markers = list(markers)
691 _checkinvalidmarkers(markers)
691 _checkinvalidmarkers(markers)
692 return markers
692 return markers
693
693
694 @propertycache
694 @propertycache
695 def successors(self):
695 def successors(self):
696 successors = {}
696 successors = {}
697 _addsuccessors(successors, self._all)
697 _addsuccessors(successors, self._all)
698 return successors
698 return successors
699
699
700 @propertycache
700 @propertycache
701 def precursors(self):
701 def precursors(self):
702 precursors = {}
702 precursors = {}
703 _addprecursors(precursors, self._all)
703 _addprecursors(precursors, self._all)
704 return precursors
704 return precursors
705
705
706 @propertycache
706 @propertycache
707 def children(self):
707 def children(self):
708 children = {}
708 children = {}
709 _addchildren(children, self._all)
709 _addchildren(children, self._all)
710 return children
710 return children
711
711
712 def _cached(self, attr):
712 def _cached(self, attr):
713 return attr in self.__dict__
713 return attr in self.__dict__
714
714
715 def _addmarkers(self, markers):
715 def _addmarkers(self, markers):
716 markers = list(markers) # to allow repeated iteration
716 markers = list(markers) # to allow repeated iteration
717 self._all.extend(markers)
717 self._all.extend(markers)
718 if self._cached('successors'):
718 if self._cached('successors'):
719 _addsuccessors(self.successors, markers)
719 _addsuccessors(self.successors, markers)
720 if self._cached('precursors'):
720 if self._cached('precursors'):
721 _addprecursors(self.precursors, markers)
721 _addprecursors(self.precursors, markers)
722 if self._cached('children'):
722 if self._cached('children'):
723 _addchildren(self.children, markers)
723 _addchildren(self.children, markers)
724 _checkinvalidmarkers(markers)
724 _checkinvalidmarkers(markers)
725
725
726 def relevantmarkers(self, nodes):
726 def relevantmarkers(self, nodes):
727 """return a set of all obsolescence markers relevant to a set of nodes.
727 """return a set of all obsolescence markers relevant to a set of nodes.
728
728
729 "relevant" to a set of nodes mean:
729 "relevant" to a set of nodes mean:
730
730
731 - marker that use this changeset as successor
731 - marker that use this changeset as successor
732 - prune marker of direct children on this changeset
732 - prune marker of direct children on this changeset
733 - recursive application of the two rules on precursors of these markers
733 - recursive application of the two rules on precursors of these markers
734
734
735 It is a set so you cannot rely on order."""
735 It is a set so you cannot rely on order."""
736
736
737 pendingnodes = set(nodes)
737 pendingnodes = set(nodes)
738 seenmarkers = set()
738 seenmarkers = set()
739 seennodes = set(pendingnodes)
739 seennodes = set(pendingnodes)
740 precursorsmarkers = self.precursors
740 precursorsmarkers = self.precursors
741 succsmarkers = self.successors
741 succsmarkers = self.successors
742 children = self.children
742 children = self.children
743 while pendingnodes:
743 while pendingnodes:
744 direct = set()
744 direct = set()
745 for current in pendingnodes:
745 for current in pendingnodes:
746 direct.update(precursorsmarkers.get(current, ()))
746 direct.update(precursorsmarkers.get(current, ()))
747 pruned = [m for m in children.get(current, ()) if not m[1]]
747 pruned = [m for m in children.get(current, ()) if not m[1]]
748 direct.update(pruned)
748 direct.update(pruned)
749 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
749 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
750 direct.update(pruned)
750 direct.update(pruned)
751 direct -= seenmarkers
751 direct -= seenmarkers
752 pendingnodes = set([m[0] for m in direct])
752 pendingnodes = set([m[0] for m in direct])
753 seenmarkers |= direct
753 seenmarkers |= direct
754 pendingnodes -= seennodes
754 pendingnodes -= seennodes
755 seennodes |= pendingnodes
755 seennodes |= pendingnodes
756 return seenmarkers
756 return seenmarkers
757
757
758 def makestore(ui, repo):
758 def makestore(ui, repo):
759 """Create an obsstore instance from a repo."""
759 """Create an obsstore instance from a repo."""
760 # read default format for new obsstore.
760 # read default format for new obsstore.
761 # developer config: format.obsstore-version
761 # developer config: format.obsstore-version
762 defaultformat = ui.configint('format', 'obsstore-version', None)
762 defaultformat = ui.configint('format', 'obsstore-version', None)
763 # rely on obsstore class default when possible.
763 # rely on obsstore class default when possible.
764 kwargs = {}
764 kwargs = {}
765 if defaultformat is not None:
765 if defaultformat is not None:
766 kwargs['defaultformat'] = defaultformat
766 kwargs['defaultformat'] = defaultformat
767 readonly = not isenabled(repo, createmarkersopt)
767 readonly = not isenabled(repo, createmarkersopt)
768 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
768 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
769 if store and readonly:
769 if store and readonly:
770 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
770 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
771 % len(list(store)))
771 % len(list(store)))
772 return store
772 return store
773
773
774 def commonversion(versions):
774 def commonversion(versions):
775 """Return the newest version listed in both versions and our local formats.
775 """Return the newest version listed in both versions and our local formats.
776
776
777 Returns None if no common version exists.
777 Returns None if no common version exists.
778 """
778 """
779 versions.sort(reverse=True)
779 versions.sort(reverse=True)
780 # search for highest version known on both side
780 # search for highest version known on both side
781 for v in versions:
781 for v in versions:
782 if v in formats:
782 if v in formats:
783 return v
783 return v
784 return None
784 return None
785
785
786 # arbitrary picked to fit into 8K limit from HTTP server
786 # arbitrary picked to fit into 8K limit from HTTP server
787 # you have to take in account:
787 # you have to take in account:
788 # - the version header
788 # - the version header
789 # - the base85 encoding
789 # - the base85 encoding
790 _maxpayload = 5300
790 _maxpayload = 5300
791
791
792 def _pushkeyescape(markers):
792 def _pushkeyescape(markers):
793 """encode markers into a dict suitable for pushkey exchange
793 """encode markers into a dict suitable for pushkey exchange
794
794
795 - binary data is base85 encoded
795 - binary data is base85 encoded
796 - split in chunks smaller than 5300 bytes"""
796 - split in chunks smaller than 5300 bytes"""
797 keys = {}
797 keys = {}
798 parts = []
798 parts = []
799 currentlen = _maxpayload * 2 # ensure we create a new part
799 currentlen = _maxpayload * 2 # ensure we create a new part
800 for marker in markers:
800 for marker in markers:
801 nextdata = _fm0encodeonemarker(marker)
801 nextdata = _fm0encodeonemarker(marker)
802 if (len(nextdata) + currentlen > _maxpayload):
802 if (len(nextdata) + currentlen > _maxpayload):
803 currentpart = []
803 currentpart = []
804 currentlen = 0
804 currentlen = 0
805 parts.append(currentpart)
805 parts.append(currentpart)
806 currentpart.append(nextdata)
806 currentpart.append(nextdata)
807 currentlen += len(nextdata)
807 currentlen += len(nextdata)
808 for idx, part in enumerate(reversed(parts)):
808 for idx, part in enumerate(reversed(parts)):
809 data = ''.join([_pack('>B', _fm0version)] + part)
809 data = ''.join([_pack('>B', _fm0version)] + part)
810 keys['dump%i' % idx] = util.b85encode(data)
810 keys['dump%i' % idx] = util.b85encode(data)
811 return keys
811 return keys
812
812
813 def listmarkers(repo):
813 def listmarkers(repo):
814 """List markers over pushkey"""
814 """List markers over pushkey"""
815 if not repo.obsstore:
815 if not repo.obsstore:
816 return {}
816 return {}
817 return _pushkeyescape(sorted(repo.obsstore))
817 return _pushkeyescape(sorted(repo.obsstore))
818
818
819 def pushmarker(repo, key, old, new):
819 def pushmarker(repo, key, old, new):
820 """Push markers over pushkey"""
820 """Push markers over pushkey"""
821 if not key.startswith('dump'):
821 if not key.startswith('dump'):
822 repo.ui.warn(_('unknown key: %r') % key)
822 repo.ui.warn(_('unknown key: %r') % key)
823 return False
823 return False
824 if old:
824 if old:
825 repo.ui.warn(_('unexpected old value for %r') % key)
825 repo.ui.warn(_('unexpected old value for %r') % key)
826 return False
826 return False
827 data = util.b85decode(new)
827 data = util.b85decode(new)
828 lock = repo.lock()
828 lock = repo.lock()
829 try:
829 try:
830 tr = repo.transaction('pushkey: obsolete markers')
830 tr = repo.transaction('pushkey: obsolete markers')
831 try:
831 try:
832 repo.obsstore.mergemarkers(tr, data)
832 repo.obsstore.mergemarkers(tr, data)
833 repo.invalidatevolatilesets()
833 repo.invalidatevolatilesets()
834 tr.close()
834 tr.close()
835 return True
835 return True
836 finally:
836 finally:
837 tr.release()
837 tr.release()
838 finally:
838 finally:
839 lock.release()
839 lock.release()
840
840
841 def getmarkers(repo, nodes=None, exclusive=False):
841 def getmarkers(repo, nodes=None, exclusive=False):
842 """returns markers known in a repository
842 """returns markers known in a repository
843
843
844 If <nodes> is specified, only markers "relevant" to those nodes are are
844 If <nodes> is specified, only markers "relevant" to those nodes are are
845 returned"""
845 returned"""
846 if nodes is None:
846 if nodes is None:
847 rawmarkers = repo.obsstore
847 rawmarkers = repo.obsstore
848 elif exclusive:
848 elif exclusive:
849 rawmarkers = obsutil.exclusivemarkers(repo, nodes)
849 rawmarkers = obsutil.exclusivemarkers(repo, nodes)
850 else:
850 else:
851 rawmarkers = repo.obsstore.relevantmarkers(nodes)
851 rawmarkers = repo.obsstore.relevantmarkers(nodes)
852
852
853 for markerdata in rawmarkers:
853 for markerdata in rawmarkers:
854 yield marker(repo, markerdata)
854 yield marker(repo, markerdata)
855
855
856 def relevantmarkers(repo, node):
856 def relevantmarkers(repo, node):
857 """all obsolete markers relevant to some revision"""
857 """all obsolete markers relevant to some revision"""
858 for markerdata in repo.obsstore.relevantmarkers(node):
858 for markerdata in repo.obsstore.relevantmarkers(node):
859 yield marker(repo, markerdata)
859 yield marker(repo, markerdata)
860
860
861
861
862 def precursormarkers(ctx):
862 def precursormarkers(ctx):
863 """obsolete marker marking this changeset as a successors"""
863 """obsolete marker marking this changeset as a successors"""
864 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
864 for data in ctx.repo().obsstore.precursors.get(ctx.node(), ()):
865 yield marker(ctx.repo(), data)
865 yield marker(ctx.repo(), data)
866
866
867 def successormarkers(ctx):
867 def successormarkers(ctx):
868 """obsolete marker making this changeset obsolete"""
868 """obsolete marker making this changeset obsolete"""
869 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
869 for data in ctx.repo().obsstore.successors.get(ctx.node(), ()):
870 yield marker(ctx.repo(), data)
870 yield marker(ctx.repo(), data)
871
871
872 def allsuccessors(obsstore, nodes, ignoreflags=0):
873 """Yield node for every successor of <nodes>.
874
875 Some successors may be unknown locally.
876
877 This is a linear yield unsuited to detecting split changesets. It includes
878 initial nodes too."""
879 remaining = set(nodes)
880 seen = set(remaining)
881 while remaining:
882 current = remaining.pop()
883 yield current
884 for mark in obsstore.successors.get(current, ()):
885 # ignore marker flagged with specified flag
886 if mark[2] & ignoreflags:
887 continue
888 for suc in mark[1]:
889 if suc not in seen:
890 seen.add(suc)
891 remaining.add(suc)
892
893 def foreground(repo, nodes):
872 def foreground(repo, nodes):
894 """return all nodes in the "foreground" of other node
873 """return all nodes in the "foreground" of other node
895
874
896 The foreground of a revision is anything reachable using parent -> children
875 The foreground of a revision is anything reachable using parent -> children
897 or precursor -> successor relation. It is very similar to "descendant" but
876 or precursor -> successor relation. It is very similar to "descendant" but
898 augmented with obsolescence information.
877 augmented with obsolescence information.
899
878
900 Beware that possible obsolescence cycle may result if complex situation.
879 Beware that possible obsolescence cycle may result if complex situation.
901 """
880 """
902 repo = repo.unfiltered()
881 repo = repo.unfiltered()
903 foreground = set(repo.set('%ln::', nodes))
882 foreground = set(repo.set('%ln::', nodes))
904 if repo.obsstore:
883 if repo.obsstore:
905 # We only need this complicated logic if there is obsolescence
884 # We only need this complicated logic if there is obsolescence
906 # XXX will probably deserve an optimised revset.
885 # XXX will probably deserve an optimised revset.
907 nm = repo.changelog.nodemap
886 nm = repo.changelog.nodemap
908 plen = -1
887 plen = -1
909 # compute the whole set of successors or descendants
888 # compute the whole set of successors or descendants
910 while len(foreground) != plen:
889 while len(foreground) != plen:
911 plen = len(foreground)
890 plen = len(foreground)
912 succs = set(c.node() for c in foreground)
891 succs = set(c.node() for c in foreground)
913 mutable = [c.node() for c in foreground if c.mutable()]
892 mutable = [c.node() for c in foreground if c.mutable()]
914 succs.update(allsuccessors(repo.obsstore, mutable))
893 succs.update(obsutil.allsuccessors(repo.obsstore, mutable))
915 known = (n for n in succs if n in nm)
894 known = (n for n in succs if n in nm)
916 foreground = set(repo.set('%ln::', known))
895 foreground = set(repo.set('%ln::', known))
917 return set(c.node() for c in foreground)
896 return set(c.node() for c in foreground)
918
897
919 # keep compatibility for the 4.3 cycle
898 # keep compatibility for the 4.3 cycle
920 def allprecursors(obsstore, nodes, ignoreflags=0):
899 def allprecursors(obsstore, nodes, ignoreflags=0):
921 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
900 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
922 util.nouideprecwarn(movemsg, '4.3')
901 util.nouideprecwarn(movemsg, '4.3')
923 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
902 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
924
903
904 def allsuccessors(obsstore, nodes, ignoreflags=0):
905 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
906 util.nouideprecwarn(movemsg, '4.3')
907 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
908
925 def exclusivemarkers(repo, nodes):
909 def exclusivemarkers(repo, nodes):
926 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
910 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
927 repo.ui.deprecwarn(movemsg, '4.3')
911 repo.ui.deprecwarn(movemsg, '4.3')
928 return obsutil.exclusivemarkers(repo, nodes)
912 return obsutil.exclusivemarkers(repo, nodes)
929
913
930 def successorssets(repo, initialnode, cache=None):
914 def successorssets(repo, initialnode, cache=None):
931 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
915 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
932 repo.ui.deprecwarn(movemsg, '4.3')
916 repo.ui.deprecwarn(movemsg, '4.3')
933 return obsutil.successorssets(repo, initialnode, cache=cache)
917 return obsutil.successorssets(repo, initialnode, cache=cache)
934
918
935 # mapping of 'set-name' -> <function to compute this set>
919 # mapping of 'set-name' -> <function to compute this set>
936 cachefuncs = {}
920 cachefuncs = {}
937 def cachefor(name):
921 def cachefor(name):
938 """Decorator to register a function as computing the cache for a set"""
922 """Decorator to register a function as computing the cache for a set"""
939 def decorator(func):
923 def decorator(func):
940 if name in cachefuncs:
924 if name in cachefuncs:
941 msg = "duplicated registration for volatileset '%s' (existing: %r)"
925 msg = "duplicated registration for volatileset '%s' (existing: %r)"
942 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
926 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
943 cachefuncs[name] = func
927 cachefuncs[name] = func
944 return func
928 return func
945 return decorator
929 return decorator
946
930
947 def getrevs(repo, name):
931 def getrevs(repo, name):
948 """Return the set of revision that belong to the <name> set
932 """Return the set of revision that belong to the <name> set
949
933
950 Such access may compute the set and cache it for future use"""
934 Such access may compute the set and cache it for future use"""
951 repo = repo.unfiltered()
935 repo = repo.unfiltered()
952 if not repo.obsstore:
936 if not repo.obsstore:
953 return frozenset()
937 return frozenset()
954 if name not in repo.obsstore.caches:
938 if name not in repo.obsstore.caches:
955 repo.obsstore.caches[name] = cachefuncs[name](repo)
939 repo.obsstore.caches[name] = cachefuncs[name](repo)
956 return repo.obsstore.caches[name]
940 return repo.obsstore.caches[name]
957
941
958 # To be simple we need to invalidate obsolescence cache when:
942 # To be simple we need to invalidate obsolescence cache when:
959 #
943 #
960 # - new changeset is added:
944 # - new changeset is added:
961 # - public phase is changed
945 # - public phase is changed
962 # - obsolescence marker are added
946 # - obsolescence marker are added
963 # - strip is used a repo
947 # - strip is used a repo
964 def clearobscaches(repo):
948 def clearobscaches(repo):
965 """Remove all obsolescence related cache from a repo
949 """Remove all obsolescence related cache from a repo
966
950
967 This remove all cache in obsstore is the obsstore already exist on the
951 This remove all cache in obsstore is the obsstore already exist on the
968 repo.
952 repo.
969
953
970 (We could be smarter here given the exact event that trigger the cache
954 (We could be smarter here given the exact event that trigger the cache
971 clearing)"""
955 clearing)"""
972 # only clear cache is there is obsstore data in this repo
956 # only clear cache is there is obsstore data in this repo
973 if 'obsstore' in repo._filecache:
957 if 'obsstore' in repo._filecache:
974 repo.obsstore.caches.clear()
958 repo.obsstore.caches.clear()
975
959
976 def _mutablerevs(repo):
960 def _mutablerevs(repo):
977 """the set of mutable revision in the repository"""
961 """the set of mutable revision in the repository"""
978 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
962 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
979
963
980 @cachefor('obsolete')
964 @cachefor('obsolete')
981 def _computeobsoleteset(repo):
965 def _computeobsoleteset(repo):
982 """the set of obsolete revisions"""
966 """the set of obsolete revisions"""
983 getnode = repo.changelog.node
967 getnode = repo.changelog.node
984 notpublic = _mutablerevs(repo)
968 notpublic = _mutablerevs(repo)
985 isobs = repo.obsstore.successors.__contains__
969 isobs = repo.obsstore.successors.__contains__
986 obs = set(r for r in notpublic if isobs(getnode(r)))
970 obs = set(r for r in notpublic if isobs(getnode(r)))
987 return obs
971 return obs
988
972
989 @cachefor('unstable')
973 @cachefor('unstable')
990 def _computeunstableset(repo):
974 def _computeunstableset(repo):
991 """the set of non obsolete revisions with obsolete parents"""
975 """the set of non obsolete revisions with obsolete parents"""
992 pfunc = repo.changelog.parentrevs
976 pfunc = repo.changelog.parentrevs
993 mutable = _mutablerevs(repo)
977 mutable = _mutablerevs(repo)
994 obsolete = getrevs(repo, 'obsolete')
978 obsolete = getrevs(repo, 'obsolete')
995 others = mutable - obsolete
979 others = mutable - obsolete
996 unstable = set()
980 unstable = set()
997 for r in sorted(others):
981 for r in sorted(others):
998 # A rev is unstable if one of its parent is obsolete or unstable
982 # A rev is unstable if one of its parent is obsolete or unstable
999 # this works since we traverse following growing rev order
983 # this works since we traverse following growing rev order
1000 for p in pfunc(r):
984 for p in pfunc(r):
1001 if p in obsolete or p in unstable:
985 if p in obsolete or p in unstable:
1002 unstable.add(r)
986 unstable.add(r)
1003 break
987 break
1004 return unstable
988 return unstable
1005
989
1006 @cachefor('suspended')
990 @cachefor('suspended')
1007 def _computesuspendedset(repo):
991 def _computesuspendedset(repo):
1008 """the set of obsolete parents with non obsolete descendants"""
992 """the set of obsolete parents with non obsolete descendants"""
1009 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
993 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
1010 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
994 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
1011
995
1012 @cachefor('extinct')
996 @cachefor('extinct')
1013 def _computeextinctset(repo):
997 def _computeextinctset(repo):
1014 """the set of obsolete parents without non obsolete descendants"""
998 """the set of obsolete parents without non obsolete descendants"""
1015 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
999 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
1016
1000
1017
1001
1018 @cachefor('bumped')
1002 @cachefor('bumped')
1019 def _computebumpedset(repo):
1003 def _computebumpedset(repo):
1020 """the set of revs trying to obsolete public revisions"""
1004 """the set of revs trying to obsolete public revisions"""
1021 bumped = set()
1005 bumped = set()
1022 # util function (avoid attribute lookup in the loop)
1006 # util function (avoid attribute lookup in the loop)
1023 phase = repo._phasecache.phase # would be faster to grab the full list
1007 phase = repo._phasecache.phase # would be faster to grab the full list
1024 public = phases.public
1008 public = phases.public
1025 cl = repo.changelog
1009 cl = repo.changelog
1026 torev = cl.nodemap.get
1010 torev = cl.nodemap.get
1027 for ctx in repo.set('(not public()) and (not obsolete())'):
1011 for ctx in repo.set('(not public()) and (not obsolete())'):
1028 rev = ctx.rev()
1012 rev = ctx.rev()
1029 # We only evaluate mutable, non-obsolete revision
1013 # We only evaluate mutable, non-obsolete revision
1030 node = ctx.node()
1014 node = ctx.node()
1031 # (future) A cache of precursors may worth if split is very common
1015 # (future) A cache of precursors may worth if split is very common
1032 for pnode in obsutil.allprecursors(repo.obsstore, [node],
1016 for pnode in obsutil.allprecursors(repo.obsstore, [node],
1033 ignoreflags=bumpedfix):
1017 ignoreflags=bumpedfix):
1034 prev = torev(pnode) # unfiltered! but so is phasecache
1018 prev = torev(pnode) # unfiltered! but so is phasecache
1035 if (prev is not None) and (phase(repo, prev) <= public):
1019 if (prev is not None) and (phase(repo, prev) <= public):
1036 # we have a public precursor
1020 # we have a public precursor
1037 bumped.add(rev)
1021 bumped.add(rev)
1038 break # Next draft!
1022 break # Next draft!
1039 return bumped
1023 return bumped
1040
1024
1041 @cachefor('divergent')
1025 @cachefor('divergent')
1042 def _computedivergentset(repo):
1026 def _computedivergentset(repo):
1043 """the set of rev that compete to be the final successors of some revision.
1027 """the set of rev that compete to be the final successors of some revision.
1044 """
1028 """
1045 divergent = set()
1029 divergent = set()
1046 obsstore = repo.obsstore
1030 obsstore = repo.obsstore
1047 newermap = {}
1031 newermap = {}
1048 for ctx in repo.set('(not public()) - obsolete()'):
1032 for ctx in repo.set('(not public()) - obsolete()'):
1049 mark = obsstore.precursors.get(ctx.node(), ())
1033 mark = obsstore.precursors.get(ctx.node(), ())
1050 toprocess = set(mark)
1034 toprocess = set(mark)
1051 seen = set()
1035 seen = set()
1052 while toprocess:
1036 while toprocess:
1053 prec = toprocess.pop()[0]
1037 prec = toprocess.pop()[0]
1054 if prec in seen:
1038 if prec in seen:
1055 continue # emergency cycle hanging prevention
1039 continue # emergency cycle hanging prevention
1056 seen.add(prec)
1040 seen.add(prec)
1057 if prec not in newermap:
1041 if prec not in newermap:
1058 obsutil.successorssets(repo, prec, newermap)
1042 obsutil.successorssets(repo, prec, newermap)
1059 newer = [n for n in newermap[prec] if n]
1043 newer = [n for n in newermap[prec] if n]
1060 if len(newer) > 1:
1044 if len(newer) > 1:
1061 divergent.add(ctx.rev())
1045 divergent.add(ctx.rev())
1062 break
1046 break
1063 toprocess.update(obsstore.precursors.get(prec, ()))
1047 toprocess.update(obsstore.precursors.get(prec, ()))
1064 return divergent
1048 return divergent
1065
1049
1066
1050
1067 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1051 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
1068 operation=None):
1052 operation=None):
1069 """Add obsolete markers between changesets in a repo
1053 """Add obsolete markers between changesets in a repo
1070
1054
1071 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1055 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1072 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1056 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1073 containing metadata for this marker only. It is merged with the global
1057 containing metadata for this marker only. It is merged with the global
1074 metadata specified through the `metadata` argument of this function,
1058 metadata specified through the `metadata` argument of this function,
1075
1059
1076 Trying to obsolete a public changeset will raise an exception.
1060 Trying to obsolete a public changeset will raise an exception.
1077
1061
1078 Current user and date are used except if specified otherwise in the
1062 Current user and date are used except if specified otherwise in the
1079 metadata attribute.
1063 metadata attribute.
1080
1064
1081 This function operates within a transaction of its own, but does
1065 This function operates within a transaction of its own, but does
1082 not take any lock on the repo.
1066 not take any lock on the repo.
1083 """
1067 """
1084 # prepare metadata
1068 # prepare metadata
1085 if metadata is None:
1069 if metadata is None:
1086 metadata = {}
1070 metadata = {}
1087 if 'user' not in metadata:
1071 if 'user' not in metadata:
1088 metadata['user'] = repo.ui.username()
1072 metadata['user'] = repo.ui.username()
1089 useoperation = repo.ui.configbool('experimental',
1073 useoperation = repo.ui.configbool('experimental',
1090 'evolution.track-operation',
1074 'evolution.track-operation',
1091 False)
1075 False)
1092 if useoperation and operation:
1076 if useoperation and operation:
1093 metadata['operation'] = operation
1077 metadata['operation'] = operation
1094 tr = repo.transaction('add-obsolescence-marker')
1078 tr = repo.transaction('add-obsolescence-marker')
1095 try:
1079 try:
1096 markerargs = []
1080 markerargs = []
1097 for rel in relations:
1081 for rel in relations:
1098 prec = rel[0]
1082 prec = rel[0]
1099 sucs = rel[1]
1083 sucs = rel[1]
1100 localmetadata = metadata.copy()
1084 localmetadata = metadata.copy()
1101 if 2 < len(rel):
1085 if 2 < len(rel):
1102 localmetadata.update(rel[2])
1086 localmetadata.update(rel[2])
1103
1087
1104 if not prec.mutable():
1088 if not prec.mutable():
1105 raise error.Abort(_("cannot obsolete public changeset: %s")
1089 raise error.Abort(_("cannot obsolete public changeset: %s")
1106 % prec,
1090 % prec,
1107 hint="see 'hg help phases' for details")
1091 hint="see 'hg help phases' for details")
1108 nprec = prec.node()
1092 nprec = prec.node()
1109 nsucs = tuple(s.node() for s in sucs)
1093 nsucs = tuple(s.node() for s in sucs)
1110 npare = None
1094 npare = None
1111 if not nsucs:
1095 if not nsucs:
1112 npare = tuple(p.node() for p in prec.parents())
1096 npare = tuple(p.node() for p in prec.parents())
1113 if nprec in nsucs:
1097 if nprec in nsucs:
1114 raise error.Abort(_("changeset %s cannot obsolete itself")
1098 raise error.Abort(_("changeset %s cannot obsolete itself")
1115 % prec)
1099 % prec)
1116
1100
1117 # Creating the marker causes the hidden cache to become invalid,
1101 # Creating the marker causes the hidden cache to become invalid,
1118 # which causes recomputation when we ask for prec.parents() above.
1102 # which causes recomputation when we ask for prec.parents() above.
1119 # Resulting in n^2 behavior. So let's prepare all of the args
1103 # Resulting in n^2 behavior. So let's prepare all of the args
1120 # first, then create the markers.
1104 # first, then create the markers.
1121 markerargs.append((nprec, nsucs, npare, localmetadata))
1105 markerargs.append((nprec, nsucs, npare, localmetadata))
1122
1106
1123 for args in markerargs:
1107 for args in markerargs:
1124 nprec, nsucs, npare, localmetadata = args
1108 nprec, nsucs, npare, localmetadata = args
1125 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1109 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1126 date=date, metadata=localmetadata,
1110 date=date, metadata=localmetadata,
1127 ui=repo.ui)
1111 ui=repo.ui)
1128 repo.filteredrevcache.clear()
1112 repo.filteredrevcache.clear()
1129 tr.close()
1113 tr.close()
1130 finally:
1114 finally:
1131 tr.release()
1115 tr.release()
@@ -1,388 +1,409 b''
1 # obsutil.py - utility functions for obsolescence
1 # obsutil.py - utility functions for obsolescence
2 #
2 #
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
3 # Copyright 2017 Boris Feld <boris.feld@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 def closestpredecessors(repo, nodeid):
10 def closestpredecessors(repo, nodeid):
11 """yield the list of next predecessors pointing on visible changectx nodes
11 """yield the list of next predecessors pointing on visible changectx nodes
12
12
13 This function respect the repoview filtering, filtered revision will be
13 This function respect the repoview filtering, filtered revision will be
14 considered missing.
14 considered missing.
15 """
15 """
16
16
17 precursors = repo.obsstore.precursors
17 precursors = repo.obsstore.precursors
18 stack = [nodeid]
18 stack = [nodeid]
19 seen = set(stack)
19 seen = set(stack)
20
20
21 while stack:
21 while stack:
22 current = stack.pop()
22 current = stack.pop()
23 currentpreccs = precursors.get(current, ())
23 currentpreccs = precursors.get(current, ())
24
24
25 for prec in currentpreccs:
25 for prec in currentpreccs:
26 precnodeid = prec[0]
26 precnodeid = prec[0]
27
27
28 # Basic cycle protection
28 # Basic cycle protection
29 if precnodeid in seen:
29 if precnodeid in seen:
30 continue
30 continue
31 seen.add(precnodeid)
31 seen.add(precnodeid)
32
32
33 if precnodeid in repo:
33 if precnodeid in repo:
34 yield precnodeid
34 yield precnodeid
35 else:
35 else:
36 stack.append(precnodeid)
36 stack.append(precnodeid)
37
37
38 def allprecursors(obsstore, nodes, ignoreflags=0):
38 def allprecursors(obsstore, nodes, ignoreflags=0):
39 """Yield node for every precursors of <nodes>.
39 """Yield node for every precursors of <nodes>.
40
40
41 Some precursors may be unknown locally.
41 Some precursors may be unknown locally.
42
42
43 This is a linear yield unsuited to detecting folded changesets. It includes
43 This is a linear yield unsuited to detecting folded changesets. It includes
44 initial nodes too."""
44 initial nodes too."""
45
45
46 remaining = set(nodes)
46 remaining = set(nodes)
47 seen = set(remaining)
47 seen = set(remaining)
48 while remaining:
48 while remaining:
49 current = remaining.pop()
49 current = remaining.pop()
50 yield current
50 yield current
51 for mark in obsstore.precursors.get(current, ()):
51 for mark in obsstore.precursors.get(current, ()):
52 # ignore marker flagged with specified flag
52 # ignore marker flagged with specified flag
53 if mark[2] & ignoreflags:
53 if mark[2] & ignoreflags:
54 continue
54 continue
55 suc = mark[0]
55 suc = mark[0]
56 if suc not in seen:
56 if suc not in seen:
57 seen.add(suc)
57 seen.add(suc)
58 remaining.add(suc)
58 remaining.add(suc)
59
59
60 def allsuccessors(obsstore, nodes, ignoreflags=0):
61 """Yield node for every successor of <nodes>.
62
63 Some successors may be unknown locally.
64
65 This is a linear yield unsuited to detecting split changesets. It includes
66 initial nodes too."""
67 remaining = set(nodes)
68 seen = set(remaining)
69 while remaining:
70 current = remaining.pop()
71 yield current
72 for mark in obsstore.successors.get(current, ()):
73 # ignore marker flagged with specified flag
74 if mark[2] & ignoreflags:
75 continue
76 for suc in mark[1]:
77 if suc not in seen:
78 seen.add(suc)
79 remaining.add(suc)
80
60 def _filterprunes(markers):
81 def _filterprunes(markers):
61 """return a set with no prune markers"""
82 """return a set with no prune markers"""
62 return set(m for m in markers if m[1])
83 return set(m for m in markers if m[1])
63
84
64 def exclusivemarkers(repo, nodes):
85 def exclusivemarkers(repo, nodes):
65 """set of markers relevant to "nodes" but no other locally-known nodes
86 """set of markers relevant to "nodes" but no other locally-known nodes
66
87
67 This function compute the set of markers "exclusive" to a locally-known
88 This function compute the set of markers "exclusive" to a locally-known
68 node. This means we walk the markers starting from <nodes> until we reach a
89 node. This means we walk the markers starting from <nodes> until we reach a
69 locally-known precursors outside of <nodes>. Element of <nodes> with
90 locally-known precursors outside of <nodes>. Element of <nodes> with
70 locally-known successors outside of <nodes> are ignored (since their
91 locally-known successors outside of <nodes> are ignored (since their
71 precursors markers are also relevant to these successors).
92 precursors markers are also relevant to these successors).
72
93
73 For example:
94 For example:
74
95
75 # (A0 rewritten as A1)
96 # (A0 rewritten as A1)
76 #
97 #
77 # A0 <-1- A1 # Marker "1" is exclusive to A1
98 # A0 <-1- A1 # Marker "1" is exclusive to A1
78
99
79 or
100 or
80
101
81 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
102 # (A0 rewritten as AX; AX rewritten as A1; AX is unkown locally)
82 #
103 #
83 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
104 # <-1- A0 <-2- AX <-3- A1 # Marker "2,3" are exclusive to A1
84
105
85 or
106 or
86
107
87 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
108 # (A0 has unknown precursors, A0 rewritten as A1 and A2 (divergence))
88 #
109 #
89 # <-2- A1 # Marker "2" is exclusive to A0,A1
110 # <-2- A1 # Marker "2" is exclusive to A0,A1
90 # /
111 # /
91 # <-1- A0
112 # <-1- A0
92 # \
113 # \
93 # <-3- A2 # Marker "3" is exclusive to A0,A2
114 # <-3- A2 # Marker "3" is exclusive to A0,A2
94 #
115 #
95 # in addition:
116 # in addition:
96 #
117 #
97 # Markers "2,3" are exclusive to A1,A2
118 # Markers "2,3" are exclusive to A1,A2
98 # Markers "1,2,3" are exclusive to A0,A1,A2
119 # Markers "1,2,3" are exclusive to A0,A1,A2
99
120
100 See test/test-obsolete-bundle-strip.t for more examples.
121 See test/test-obsolete-bundle-strip.t for more examples.
101
122
102 An example usage is strip. When stripping a changeset, we also want to
123 An example usage is strip. When stripping a changeset, we also want to
103 strip the markers exclusive to this changeset. Otherwise we would have
124 strip the markers exclusive to this changeset. Otherwise we would have
104 "dangling"" obsolescence markers from its precursors: Obsolescence markers
125 "dangling"" obsolescence markers from its precursors: Obsolescence markers
105 marking a node as obsolete without any successors available locally.
126 marking a node as obsolete without any successors available locally.
106
127
107 As for relevant markers, the prune markers for children will be followed.
128 As for relevant markers, the prune markers for children will be followed.
108 Of course, they will only be followed if the pruned children is
129 Of course, they will only be followed if the pruned children is
109 locally-known. Since the prune markers are relevant to the pruned node.
130 locally-known. Since the prune markers are relevant to the pruned node.
110 However, while prune markers are considered relevant to the parent of the
131 However, while prune markers are considered relevant to the parent of the
111 pruned changesets, prune markers for locally-known changeset (with no
132 pruned changesets, prune markers for locally-known changeset (with no
112 successors) are considered exclusive to the pruned nodes. This allows
133 successors) are considered exclusive to the pruned nodes. This allows
113 to strip the prune markers (with the rest of the exclusive chain) alongside
134 to strip the prune markers (with the rest of the exclusive chain) alongside
114 the pruned changesets.
135 the pruned changesets.
115 """
136 """
116 # running on a filtered repository would be dangerous as markers could be
137 # running on a filtered repository would be dangerous as markers could be
117 # reported as exclusive when they are relevant for other filtered nodes.
138 # reported as exclusive when they are relevant for other filtered nodes.
118 unfi = repo.unfiltered()
139 unfi = repo.unfiltered()
119
140
120 # shortcut to various useful item
141 # shortcut to various useful item
121 nm = unfi.changelog.nodemap
142 nm = unfi.changelog.nodemap
122 precursorsmarkers = unfi.obsstore.precursors
143 precursorsmarkers = unfi.obsstore.precursors
123 successormarkers = unfi.obsstore.successors
144 successormarkers = unfi.obsstore.successors
124 childrenmarkers = unfi.obsstore.children
145 childrenmarkers = unfi.obsstore.children
125
146
126 # exclusive markers (return of the function)
147 # exclusive markers (return of the function)
127 exclmarkers = set()
148 exclmarkers = set()
128 # we need fast membership testing
149 # we need fast membership testing
129 nodes = set(nodes)
150 nodes = set(nodes)
130 # looking for head in the obshistory
151 # looking for head in the obshistory
131 #
152 #
132 # XXX we are ignoring all issues in regard with cycle for now.
153 # XXX we are ignoring all issues in regard with cycle for now.
133 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
154 stack = [n for n in nodes if not _filterprunes(successormarkers.get(n, ()))]
134 stack.sort()
155 stack.sort()
135 # nodes already stacked
156 # nodes already stacked
136 seennodes = set(stack)
157 seennodes = set(stack)
137 while stack:
158 while stack:
138 current = stack.pop()
159 current = stack.pop()
139 # fetch precursors markers
160 # fetch precursors markers
140 markers = list(precursorsmarkers.get(current, ()))
161 markers = list(precursorsmarkers.get(current, ()))
141 # extend the list with prune markers
162 # extend the list with prune markers
142 for mark in successormarkers.get(current, ()):
163 for mark in successormarkers.get(current, ()):
143 if not mark[1]:
164 if not mark[1]:
144 markers.append(mark)
165 markers.append(mark)
145 # and markers from children (looking for prune)
166 # and markers from children (looking for prune)
146 for mark in childrenmarkers.get(current, ()):
167 for mark in childrenmarkers.get(current, ()):
147 if not mark[1]:
168 if not mark[1]:
148 markers.append(mark)
169 markers.append(mark)
149 # traverse the markers
170 # traverse the markers
150 for mark in markers:
171 for mark in markers:
151 if mark in exclmarkers:
172 if mark in exclmarkers:
152 # markers already selected
173 # markers already selected
153 continue
174 continue
154
175
155 # If the markers is about the current node, select it
176 # If the markers is about the current node, select it
156 #
177 #
157 # (this delay the addition of markers from children)
178 # (this delay the addition of markers from children)
158 if mark[1] or mark[0] == current:
179 if mark[1] or mark[0] == current:
159 exclmarkers.add(mark)
180 exclmarkers.add(mark)
160
181
161 # should we keep traversing through the precursors?
182 # should we keep traversing through the precursors?
162 prec = mark[0]
183 prec = mark[0]
163
184
164 # nodes in the stack or already processed
185 # nodes in the stack or already processed
165 if prec in seennodes:
186 if prec in seennodes:
166 continue
187 continue
167
188
168 # is this a locally known node ?
189 # is this a locally known node ?
169 known = prec in nm
190 known = prec in nm
170 # if locally-known and not in the <nodes> set the traversal
191 # if locally-known and not in the <nodes> set the traversal
171 # stop here.
192 # stop here.
172 if known and prec not in nodes:
193 if known and prec not in nodes:
173 continue
194 continue
174
195
175 # do not keep going if there are unselected markers pointing to this
196 # do not keep going if there are unselected markers pointing to this
176 # nodes. If we end up traversing these unselected markers later the
197 # nodes. If we end up traversing these unselected markers later the
177 # node will be taken care of at that point.
198 # node will be taken care of at that point.
178 precmarkers = _filterprunes(successormarkers.get(prec))
199 precmarkers = _filterprunes(successormarkers.get(prec))
179 if precmarkers.issubset(exclmarkers):
200 if precmarkers.issubset(exclmarkers):
180 seennodes.add(prec)
201 seennodes.add(prec)
181 stack.append(prec)
202 stack.append(prec)
182
203
183 return exclmarkers
204 return exclmarkers
184
205
185 def successorssets(repo, initialnode, cache=None):
206 def successorssets(repo, initialnode, cache=None):
186 """Return set of all latest successors of initial nodes
207 """Return set of all latest successors of initial nodes
187
208
188 The successors set of a changeset A are the group of revisions that succeed
209 The successors set of a changeset A are the group of revisions that succeed
189 A. It succeeds A as a consistent whole, each revision being only a partial
210 A. It succeeds A as a consistent whole, each revision being only a partial
190 replacement. The successors set contains non-obsolete changesets only.
211 replacement. The successors set contains non-obsolete changesets only.
191
212
192 This function returns the full list of successor sets which is why it
213 This function returns the full list of successor sets which is why it
193 returns a list of tuples and not just a single tuple. Each tuple is a valid
214 returns a list of tuples and not just a single tuple. Each tuple is a valid
194 successors set. Note that (A,) may be a valid successors set for changeset A
215 successors set. Note that (A,) may be a valid successors set for changeset A
195 (see below).
216 (see below).
196
217
197 In most cases, a changeset A will have a single element (e.g. the changeset
218 In most cases, a changeset A will have a single element (e.g. the changeset
198 A is replaced by A') in its successors set. Though, it is also common for a
219 A is replaced by A') in its successors set. Though, it is also common for a
199 changeset A to have no elements in its successor set (e.g. the changeset
220 changeset A to have no elements in its successor set (e.g. the changeset
200 has been pruned). Therefore, the returned list of successors sets will be
221 has been pruned). Therefore, the returned list of successors sets will be
201 [(A',)] or [], respectively.
222 [(A',)] or [], respectively.
202
223
203 When a changeset A is split into A' and B', however, it will result in a
224 When a changeset A is split into A' and B', however, it will result in a
204 successors set containing more than a single element, i.e. [(A',B')].
225 successors set containing more than a single element, i.e. [(A',B')].
205 Divergent changesets will result in multiple successors sets, i.e. [(A',),
226 Divergent changesets will result in multiple successors sets, i.e. [(A',),
206 (A'')].
227 (A'')].
207
228
208 If a changeset A is not obsolete, then it will conceptually have no
229 If a changeset A is not obsolete, then it will conceptually have no
209 successors set. To distinguish this from a pruned changeset, the successor
230 successors set. To distinguish this from a pruned changeset, the successor
210 set will contain itself only, i.e. [(A,)].
231 set will contain itself only, i.e. [(A,)].
211
232
212 Finally, successors unknown locally are considered to be pruned (obsoleted
233 Finally, successors unknown locally are considered to be pruned (obsoleted
213 without any successors).
234 without any successors).
214
235
215 The optional `cache` parameter is a dictionary that may contain precomputed
236 The optional `cache` parameter is a dictionary that may contain precomputed
216 successors sets. It is meant to reuse the computation of a previous call to
237 successors sets. It is meant to reuse the computation of a previous call to
217 `successorssets` when multiple calls are made at the same time. The cache
238 `successorssets` when multiple calls are made at the same time. The cache
218 dictionary is updated in place. The caller is responsible for its life
239 dictionary is updated in place. The caller is responsible for its life
219 span. Code that makes multiple calls to `successorssets` *must* use this
240 span. Code that makes multiple calls to `successorssets` *must* use this
220 cache mechanism or suffer terrible performance.
241 cache mechanism or suffer terrible performance.
221 """
242 """
222
243
223 succmarkers = repo.obsstore.successors
244 succmarkers = repo.obsstore.successors
224
245
225 # Stack of nodes we search successors sets for
246 # Stack of nodes we search successors sets for
226 toproceed = [initialnode]
247 toproceed = [initialnode]
227 # set version of above list for fast loop detection
248 # set version of above list for fast loop detection
228 # element added to "toproceed" must be added here
249 # element added to "toproceed" must be added here
229 stackedset = set(toproceed)
250 stackedset = set(toproceed)
230 if cache is None:
251 if cache is None:
231 cache = {}
252 cache = {}
232
253
233 # This while loop is the flattened version of a recursive search for
254 # This while loop is the flattened version of a recursive search for
234 # successors sets
255 # successors sets
235 #
256 #
236 # def successorssets(x):
257 # def successorssets(x):
237 # successors = directsuccessors(x)
258 # successors = directsuccessors(x)
238 # ss = [[]]
259 # ss = [[]]
239 # for succ in directsuccessors(x):
260 # for succ in directsuccessors(x):
240 # # product as in itertools cartesian product
261 # # product as in itertools cartesian product
241 # ss = product(ss, successorssets(succ))
262 # ss = product(ss, successorssets(succ))
242 # return ss
263 # return ss
243 #
264 #
244 # But we can not use plain recursive calls here:
265 # But we can not use plain recursive calls here:
245 # - that would blow the python call stack
266 # - that would blow the python call stack
246 # - obsolescence markers may have cycles, we need to handle them.
267 # - obsolescence markers may have cycles, we need to handle them.
247 #
268 #
248 # The `toproceed` list act as our call stack. Every node we search
269 # The `toproceed` list act as our call stack. Every node we search
249 # successors set for are stacked there.
270 # successors set for are stacked there.
250 #
271 #
251 # The `stackedset` is set version of this stack used to check if a node is
272 # The `stackedset` is set version of this stack used to check if a node is
252 # already stacked. This check is used to detect cycles and prevent infinite
273 # already stacked. This check is used to detect cycles and prevent infinite
253 # loop.
274 # loop.
254 #
275 #
255 # successors set of all nodes are stored in the `cache` dictionary.
276 # successors set of all nodes are stored in the `cache` dictionary.
256 #
277 #
257 # After this while loop ends we use the cache to return the successors sets
278 # After this while loop ends we use the cache to return the successors sets
258 # for the node requested by the caller.
279 # for the node requested by the caller.
259 while toproceed:
280 while toproceed:
260 # Every iteration tries to compute the successors sets of the topmost
281 # Every iteration tries to compute the successors sets of the topmost
261 # node of the stack: CURRENT.
282 # node of the stack: CURRENT.
262 #
283 #
263 # There are four possible outcomes:
284 # There are four possible outcomes:
264 #
285 #
265 # 1) We already know the successors sets of CURRENT:
286 # 1) We already know the successors sets of CURRENT:
266 # -> mission accomplished, pop it from the stack.
287 # -> mission accomplished, pop it from the stack.
267 # 2) Node is not obsolete:
288 # 2) Node is not obsolete:
268 # -> the node is its own successors sets. Add it to the cache.
289 # -> the node is its own successors sets. Add it to the cache.
269 # 3) We do not know successors set of direct successors of CURRENT:
290 # 3) We do not know successors set of direct successors of CURRENT:
270 # -> We add those successors to the stack.
291 # -> We add those successors to the stack.
271 # 4) We know successors sets of all direct successors of CURRENT:
292 # 4) We know successors sets of all direct successors of CURRENT:
272 # -> We can compute CURRENT successors set and add it to the
293 # -> We can compute CURRENT successors set and add it to the
273 # cache.
294 # cache.
274 #
295 #
275 current = toproceed[-1]
296 current = toproceed[-1]
276 if current in cache:
297 if current in cache:
277 # case (1): We already know the successors sets
298 # case (1): We already know the successors sets
278 stackedset.remove(toproceed.pop())
299 stackedset.remove(toproceed.pop())
279 elif current not in succmarkers:
300 elif current not in succmarkers:
280 # case (2): The node is not obsolete.
301 # case (2): The node is not obsolete.
281 if current in repo:
302 if current in repo:
282 # We have a valid last successors.
303 # We have a valid last successors.
283 cache[current] = [(current,)]
304 cache[current] = [(current,)]
284 else:
305 else:
285 # Final obsolete version is unknown locally.
306 # Final obsolete version is unknown locally.
286 # Do not count that as a valid successors
307 # Do not count that as a valid successors
287 cache[current] = []
308 cache[current] = []
288 else:
309 else:
289 # cases (3) and (4)
310 # cases (3) and (4)
290 #
311 #
291 # We proceed in two phases. Phase 1 aims to distinguish case (3)
312 # We proceed in two phases. Phase 1 aims to distinguish case (3)
292 # from case (4):
313 # from case (4):
293 #
314 #
294 # For each direct successors of CURRENT, we check whether its
315 # For each direct successors of CURRENT, we check whether its
295 # successors sets are known. If they are not, we stack the
316 # successors sets are known. If they are not, we stack the
296 # unknown node and proceed to the next iteration of the while
317 # unknown node and proceed to the next iteration of the while
297 # loop. (case 3)
318 # loop. (case 3)
298 #
319 #
299 # During this step, we may detect obsolescence cycles: a node
320 # During this step, we may detect obsolescence cycles: a node
300 # with unknown successors sets but already in the call stack.
321 # with unknown successors sets but already in the call stack.
301 # In such a situation, we arbitrary set the successors sets of
322 # In such a situation, we arbitrary set the successors sets of
302 # the node to nothing (node pruned) to break the cycle.
323 # the node to nothing (node pruned) to break the cycle.
303 #
324 #
304 # If no break was encountered we proceed to phase 2.
325 # If no break was encountered we proceed to phase 2.
305 #
326 #
306 # Phase 2 computes successors sets of CURRENT (case 4); see details
327 # Phase 2 computes successors sets of CURRENT (case 4); see details
307 # in phase 2 itself.
328 # in phase 2 itself.
308 #
329 #
309 # Note the two levels of iteration in each phase.
330 # Note the two levels of iteration in each phase.
310 # - The first one handles obsolescence markers using CURRENT as
331 # - The first one handles obsolescence markers using CURRENT as
311 # precursor (successors markers of CURRENT).
332 # precursor (successors markers of CURRENT).
312 #
333 #
313 # Having multiple entry here means divergence.
334 # Having multiple entry here means divergence.
314 #
335 #
315 # - The second one handles successors defined in each marker.
336 # - The second one handles successors defined in each marker.
316 #
337 #
317 # Having none means pruned node, multiple successors means split,
338 # Having none means pruned node, multiple successors means split,
318 # single successors are standard replacement.
339 # single successors are standard replacement.
319 #
340 #
320 for mark in sorted(succmarkers[current]):
341 for mark in sorted(succmarkers[current]):
321 for suc in mark[1]:
342 for suc in mark[1]:
322 if suc not in cache:
343 if suc not in cache:
323 if suc in stackedset:
344 if suc in stackedset:
324 # cycle breaking
345 # cycle breaking
325 cache[suc] = []
346 cache[suc] = []
326 else:
347 else:
327 # case (3) If we have not computed successors sets
348 # case (3) If we have not computed successors sets
328 # of one of those successors we add it to the
349 # of one of those successors we add it to the
329 # `toproceed` stack and stop all work for this
350 # `toproceed` stack and stop all work for this
330 # iteration.
351 # iteration.
331 toproceed.append(suc)
352 toproceed.append(suc)
332 stackedset.add(suc)
353 stackedset.add(suc)
333 break
354 break
334 else:
355 else:
335 continue
356 continue
336 break
357 break
337 else:
358 else:
338 # case (4): we know all successors sets of all direct
359 # case (4): we know all successors sets of all direct
339 # successors
360 # successors
340 #
361 #
341 # Successors set contributed by each marker depends on the
362 # Successors set contributed by each marker depends on the
342 # successors sets of all its "successors" node.
363 # successors sets of all its "successors" node.
343 #
364 #
344 # Each different marker is a divergence in the obsolescence
365 # Each different marker is a divergence in the obsolescence
345 # history. It contributes successors sets distinct from other
366 # history. It contributes successors sets distinct from other
346 # markers.
367 # markers.
347 #
368 #
348 # Within a marker, a successor may have divergent successors
369 # Within a marker, a successor may have divergent successors
349 # sets. In such a case, the marker will contribute multiple
370 # sets. In such a case, the marker will contribute multiple
350 # divergent successors sets. If multiple successors have
371 # divergent successors sets. If multiple successors have
351 # divergent successors sets, a Cartesian product is used.
372 # divergent successors sets, a Cartesian product is used.
352 #
373 #
353 # At the end we post-process successors sets to remove
374 # At the end we post-process successors sets to remove
354 # duplicated entry and successors set that are strict subset of
375 # duplicated entry and successors set that are strict subset of
355 # another one.
376 # another one.
356 succssets = []
377 succssets = []
357 for mark in sorted(succmarkers[current]):
378 for mark in sorted(succmarkers[current]):
358 # successors sets contributed by this marker
379 # successors sets contributed by this marker
359 markss = [[]]
380 markss = [[]]
360 for suc in mark[1]:
381 for suc in mark[1]:
361 # cardinal product with previous successors
382 # cardinal product with previous successors
362 productresult = []
383 productresult = []
363 for prefix in markss:
384 for prefix in markss:
364 for suffix in cache[suc]:
385 for suffix in cache[suc]:
365 newss = list(prefix)
386 newss = list(prefix)
366 for part in suffix:
387 for part in suffix:
367 # do not duplicated entry in successors set
388 # do not duplicated entry in successors set
368 # first entry wins.
389 # first entry wins.
369 if part not in newss:
390 if part not in newss:
370 newss.append(part)
391 newss.append(part)
371 productresult.append(newss)
392 productresult.append(newss)
372 markss = productresult
393 markss = productresult
373 succssets.extend(markss)
394 succssets.extend(markss)
374 # remove duplicated and subset
395 # remove duplicated and subset
375 seen = []
396 seen = []
376 final = []
397 final = []
377 candidate = sorted(((set(s), s) for s in succssets if s),
398 candidate = sorted(((set(s), s) for s in succssets if s),
378 key=lambda x: len(x[1]), reverse=True)
399 key=lambda x: len(x[1]), reverse=True)
379 for setversion, listversion in candidate:
400 for setversion, listversion in candidate:
380 for seenset in seen:
401 for seenset in seen:
381 if setversion.issubset(seenset):
402 if setversion.issubset(seenset):
382 break
403 break
383 else:
404 else:
384 final.append(listversion)
405 final.append(listversion)
385 seen.append(setversion)
406 seen.append(setversion)
386 final.reverse() # put small successors set first
407 final.reverse() # put small successors set first
387 cache[current] = final
408 cache[current] = final
388 return cache[initialnode]
409 return cache[initialnode]
General Comments 0
You need to be logged in to leave comments. Login now