##// END OF EJS Templates
py3: use "%d" to convert integers to bytes...
Pulkit Goyal -
r36203:acc8e6e5 default
parent child Browse files
Show More
@@ -1,1868 +1,1868
1 # rebase.py - rebasing feature for mercurial
1 # rebase.py - rebasing feature for mercurial
2 #
2 #
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to move sets of revisions to a different ancestor
8 '''command to move sets of revisions to a different ancestor
9
9
10 This extension lets you rebase changesets in an existing Mercurial
10 This extension lets you rebase changesets in an existing Mercurial
11 repository.
11 repository.
12
12
13 For more information:
13 For more information:
14 https://mercurial-scm.org/wiki/RebaseExtension
14 https://mercurial-scm.org/wiki/RebaseExtension
15 '''
15 '''
16
16
17 from __future__ import absolute_import
17 from __future__ import absolute_import
18
18
19 import errno
19 import errno
20 import os
20 import os
21
21
22 from mercurial.i18n import _
22 from mercurial.i18n import _
23 from mercurial.node import (
23 from mercurial.node import (
24 nullid,
24 nullid,
25 nullrev,
25 nullrev,
26 short,
26 short,
27 )
27 )
28 from mercurial import (
28 from mercurial import (
29 bookmarks,
29 bookmarks,
30 cmdutil,
30 cmdutil,
31 commands,
31 commands,
32 copies,
32 copies,
33 destutil,
33 destutil,
34 dirstateguard,
34 dirstateguard,
35 error,
35 error,
36 extensions,
36 extensions,
37 hg,
37 hg,
38 lock,
38 lock,
39 merge as mergemod,
39 merge as mergemod,
40 mergeutil,
40 mergeutil,
41 obsolete,
41 obsolete,
42 obsutil,
42 obsutil,
43 patch,
43 patch,
44 phases,
44 phases,
45 pycompat,
45 pycompat,
46 registrar,
46 registrar,
47 repair,
47 repair,
48 revset,
48 revset,
49 revsetlang,
49 revsetlang,
50 scmutil,
50 scmutil,
51 smartset,
51 smartset,
52 util,
52 util,
53 )
53 )
54
54
55 release = lock.release
55 release = lock.release
56
56
57 # The following constants are used throughout the rebase module. The ordering of
57 # The following constants are used throughout the rebase module. The ordering of
58 # their values must be maintained.
58 # their values must be maintained.
59
59
60 # Indicates that a revision needs to be rebased
60 # Indicates that a revision needs to be rebased
61 revtodo = -1
61 revtodo = -1
62 revtodostr = '-1'
62 revtodostr = '-1'
63
63
64 # legacy revstates no longer needed in current code
64 # legacy revstates no longer needed in current code
65 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
65 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
66 legacystates = {'-2', '-3', '-4', '-5'}
66 legacystates = {'-2', '-3', '-4', '-5'}
67
67
68 cmdtable = {}
68 cmdtable = {}
69 command = registrar.command(cmdtable)
69 command = registrar.command(cmdtable)
70 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
70 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
71 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
71 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
72 # be specifying the version(s) of Mercurial they are tested with, or
72 # be specifying the version(s) of Mercurial they are tested with, or
73 # leave the attribute unspecified.
73 # leave the attribute unspecified.
74 testedwith = 'ships-with-hg-core'
74 testedwith = 'ships-with-hg-core'
75
75
76 def _nothingtorebase():
76 def _nothingtorebase():
77 return 1
77 return 1
78
78
79 def _savegraft(ctx, extra):
79 def _savegraft(ctx, extra):
80 s = ctx.extra().get('source', None)
80 s = ctx.extra().get('source', None)
81 if s is not None:
81 if s is not None:
82 extra['source'] = s
82 extra['source'] = s
83 s = ctx.extra().get('intermediate-source', None)
83 s = ctx.extra().get('intermediate-source', None)
84 if s is not None:
84 if s is not None:
85 extra['intermediate-source'] = s
85 extra['intermediate-source'] = s
86
86
87 def _savebranch(ctx, extra):
87 def _savebranch(ctx, extra):
88 extra['branch'] = ctx.branch()
88 extra['branch'] = ctx.branch()
89
89
90 def _makeextrafn(copiers):
90 def _makeextrafn(copiers):
91 """make an extrafn out of the given copy-functions.
91 """make an extrafn out of the given copy-functions.
92
92
93 A copy function takes a context and an extra dict, and mutates the
93 A copy function takes a context and an extra dict, and mutates the
94 extra dict as needed based on the given context.
94 extra dict as needed based on the given context.
95 """
95 """
96 def extrafn(ctx, extra):
96 def extrafn(ctx, extra):
97 for c in copiers:
97 for c in copiers:
98 c(ctx, extra)
98 c(ctx, extra)
99 return extrafn
99 return extrafn
100
100
101 def _destrebase(repo, sourceset, destspace=None):
101 def _destrebase(repo, sourceset, destspace=None):
102 """small wrapper around destmerge to pass the right extra args
102 """small wrapper around destmerge to pass the right extra args
103
103
104 Please wrap destutil.destmerge instead."""
104 Please wrap destutil.destmerge instead."""
105 return destutil.destmerge(repo, action='rebase', sourceset=sourceset,
105 return destutil.destmerge(repo, action='rebase', sourceset=sourceset,
106 onheadcheck=False, destspace=destspace)
106 onheadcheck=False, destspace=destspace)
107
107
108 revsetpredicate = registrar.revsetpredicate()
108 revsetpredicate = registrar.revsetpredicate()
109
109
110 @revsetpredicate('_destrebase')
110 @revsetpredicate('_destrebase')
111 def _revsetdestrebase(repo, subset, x):
111 def _revsetdestrebase(repo, subset, x):
112 # ``_rebasedefaultdest()``
112 # ``_rebasedefaultdest()``
113
113
114 # default destination for rebase.
114 # default destination for rebase.
115 # # XXX: Currently private because I expect the signature to change.
115 # # XXX: Currently private because I expect the signature to change.
116 # # XXX: - bailing out in case of ambiguity vs returning all data.
116 # # XXX: - bailing out in case of ambiguity vs returning all data.
117 # i18n: "_rebasedefaultdest" is a keyword
117 # i18n: "_rebasedefaultdest" is a keyword
118 sourceset = None
118 sourceset = None
119 if x is not None:
119 if x is not None:
120 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
120 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
121 return subset & smartset.baseset([_destrebase(repo, sourceset)])
121 return subset & smartset.baseset([_destrebase(repo, sourceset)])
122
122
123 def _ctxdesc(ctx):
123 def _ctxdesc(ctx):
124 """short description for a context"""
124 """short description for a context"""
125 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
125 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
126 ctx.description().split('\n', 1)[0])
126 ctx.description().split('\n', 1)[0])
127 repo = ctx.repo()
127 repo = ctx.repo()
128 names = []
128 names = []
129 for nsname, ns in repo.names.iteritems():
129 for nsname, ns in repo.names.iteritems():
130 if nsname == 'branches':
130 if nsname == 'branches':
131 continue
131 continue
132 names.extend(ns.names(repo, ctx.node()))
132 names.extend(ns.names(repo, ctx.node()))
133 if names:
133 if names:
134 desc += ' (%s)' % ' '.join(names)
134 desc += ' (%s)' % ' '.join(names)
135 return desc
135 return desc
136
136
137 class rebaseruntime(object):
137 class rebaseruntime(object):
138 """This class is a container for rebase runtime state"""
138 """This class is a container for rebase runtime state"""
139 def __init__(self, repo, ui, inmemory=False, opts=None):
139 def __init__(self, repo, ui, inmemory=False, opts=None):
140 if opts is None:
140 if opts is None:
141 opts = {}
141 opts = {}
142
142
143 # prepared: whether we have rebasestate prepared or not. Currently it
143 # prepared: whether we have rebasestate prepared or not. Currently it
144 # decides whether "self.repo" is unfiltered or not.
144 # decides whether "self.repo" is unfiltered or not.
145 # The rebasestate has explicit hash to hash instructions not depending
145 # The rebasestate has explicit hash to hash instructions not depending
146 # on visibility. If rebasestate exists (in-memory or on-disk), use
146 # on visibility. If rebasestate exists (in-memory or on-disk), use
147 # unfiltered repo to avoid visibility issues.
147 # unfiltered repo to avoid visibility issues.
148 # Before knowing rebasestate (i.e. when starting a new rebase (not
148 # Before knowing rebasestate (i.e. when starting a new rebase (not
149 # --continue or --abort)), the original repo should be used so
149 # --continue or --abort)), the original repo should be used so
150 # visibility-dependent revsets are correct.
150 # visibility-dependent revsets are correct.
151 self.prepared = False
151 self.prepared = False
152 self._repo = repo
152 self._repo = repo
153
153
154 self.ui = ui
154 self.ui = ui
155 self.opts = opts
155 self.opts = opts
156 self.originalwd = None
156 self.originalwd = None
157 self.external = nullrev
157 self.external = nullrev
158 # Mapping between the old revision id and either what is the new rebased
158 # Mapping between the old revision id and either what is the new rebased
159 # revision or what needs to be done with the old revision. The state
159 # revision or what needs to be done with the old revision. The state
160 # dict will be what contains most of the rebase progress state.
160 # dict will be what contains most of the rebase progress state.
161 self.state = {}
161 self.state = {}
162 self.activebookmark = None
162 self.activebookmark = None
163 self.destmap = {}
163 self.destmap = {}
164 self.skipped = set()
164 self.skipped = set()
165
165
166 self.collapsef = opts.get('collapse', False)
166 self.collapsef = opts.get('collapse', False)
167 self.collapsemsg = cmdutil.logmessage(ui, opts)
167 self.collapsemsg = cmdutil.logmessage(ui, opts)
168 self.date = opts.get('date', None)
168 self.date = opts.get('date', None)
169
169
170 e = opts.get('extrafn') # internal, used by e.g. hgsubversion
170 e = opts.get('extrafn') # internal, used by e.g. hgsubversion
171 self.extrafns = [_savegraft]
171 self.extrafns = [_savegraft]
172 if e:
172 if e:
173 self.extrafns = [e]
173 self.extrafns = [e]
174
174
175 self.keepf = opts.get('keep', False)
175 self.keepf = opts.get('keep', False)
176 self.keepbranchesf = opts.get('keepbranches', False)
176 self.keepbranchesf = opts.get('keepbranches', False)
177 # keepopen is not meant for use on the command line, but by
177 # keepopen is not meant for use on the command line, but by
178 # other extensions
178 # other extensions
179 self.keepopen = opts.get('keepopen', False)
179 self.keepopen = opts.get('keepopen', False)
180 self.obsoletenotrebased = {}
180 self.obsoletenotrebased = {}
181 self.obsoletewithoutsuccessorindestination = set()
181 self.obsoletewithoutsuccessorindestination = set()
182 self.inmemory = inmemory
182 self.inmemory = inmemory
183
183
184 @property
184 @property
185 def repo(self):
185 def repo(self):
186 if self.prepared:
186 if self.prepared:
187 return self._repo.unfiltered()
187 return self._repo.unfiltered()
188 else:
188 else:
189 return self._repo
189 return self._repo
190
190
191 def storestatus(self, tr=None):
191 def storestatus(self, tr=None):
192 """Store the current status to allow recovery"""
192 """Store the current status to allow recovery"""
193 if tr:
193 if tr:
194 tr.addfilegenerator('rebasestate', ('rebasestate',),
194 tr.addfilegenerator('rebasestate', ('rebasestate',),
195 self._writestatus, location='plain')
195 self._writestatus, location='plain')
196 else:
196 else:
197 with self.repo.vfs("rebasestate", "w") as f:
197 with self.repo.vfs("rebasestate", "w") as f:
198 self._writestatus(f)
198 self._writestatus(f)
199
199
200 def _writestatus(self, f):
200 def _writestatus(self, f):
201 repo = self.repo
201 repo = self.repo
202 assert repo.filtername is None
202 assert repo.filtername is None
203 f.write(repo[self.originalwd].hex() + '\n')
203 f.write(repo[self.originalwd].hex() + '\n')
204 # was "dest". we now write dest per src root below.
204 # was "dest". we now write dest per src root below.
205 f.write('\n')
205 f.write('\n')
206 f.write(repo[self.external].hex() + '\n')
206 f.write(repo[self.external].hex() + '\n')
207 f.write('%d\n' % int(self.collapsef))
207 f.write('%d\n' % int(self.collapsef))
208 f.write('%d\n' % int(self.keepf))
208 f.write('%d\n' % int(self.keepf))
209 f.write('%d\n' % int(self.keepbranchesf))
209 f.write('%d\n' % int(self.keepbranchesf))
210 f.write('%s\n' % (self.activebookmark or ''))
210 f.write('%s\n' % (self.activebookmark or ''))
211 destmap = self.destmap
211 destmap = self.destmap
212 for d, v in self.state.iteritems():
212 for d, v in self.state.iteritems():
213 oldrev = repo[d].hex()
213 oldrev = repo[d].hex()
214 if v >= 0:
214 if v >= 0:
215 newrev = repo[v].hex()
215 newrev = repo[v].hex()
216 else:
216 else:
217 newrev = "%d" % v
217 newrev = "%d" % v
218 destnode = repo[destmap[d]].hex()
218 destnode = repo[destmap[d]].hex()
219 f.write("%s:%s:%s\n" % (oldrev, newrev, destnode))
219 f.write("%s:%s:%s\n" % (oldrev, newrev, destnode))
220 repo.ui.debug('rebase status stored\n')
220 repo.ui.debug('rebase status stored\n')
221
221
222 def restorestatus(self):
222 def restorestatus(self):
223 """Restore a previously stored status"""
223 """Restore a previously stored status"""
224 self.prepared = True
224 self.prepared = True
225 repo = self.repo
225 repo = self.repo
226 assert repo.filtername is None
226 assert repo.filtername is None
227 keepbranches = None
227 keepbranches = None
228 legacydest = None
228 legacydest = None
229 collapse = False
229 collapse = False
230 external = nullrev
230 external = nullrev
231 activebookmark = None
231 activebookmark = None
232 state = {}
232 state = {}
233 destmap = {}
233 destmap = {}
234
234
235 try:
235 try:
236 f = repo.vfs("rebasestate")
236 f = repo.vfs("rebasestate")
237 for i, l in enumerate(f.read().splitlines()):
237 for i, l in enumerate(f.read().splitlines()):
238 if i == 0:
238 if i == 0:
239 originalwd = repo[l].rev()
239 originalwd = repo[l].rev()
240 elif i == 1:
240 elif i == 1:
241 # this line should be empty in newer version. but legacy
241 # this line should be empty in newer version. but legacy
242 # clients may still use it
242 # clients may still use it
243 if l:
243 if l:
244 legacydest = repo[l].rev()
244 legacydest = repo[l].rev()
245 elif i == 2:
245 elif i == 2:
246 external = repo[l].rev()
246 external = repo[l].rev()
247 elif i == 3:
247 elif i == 3:
248 collapse = bool(int(l))
248 collapse = bool(int(l))
249 elif i == 4:
249 elif i == 4:
250 keep = bool(int(l))
250 keep = bool(int(l))
251 elif i == 5:
251 elif i == 5:
252 keepbranches = bool(int(l))
252 keepbranches = bool(int(l))
253 elif i == 6 and not (len(l) == 81 and ':' in l):
253 elif i == 6 and not (len(l) == 81 and ':' in l):
254 # line 6 is a recent addition, so for backwards
254 # line 6 is a recent addition, so for backwards
255 # compatibility check that the line doesn't look like the
255 # compatibility check that the line doesn't look like the
256 # oldrev:newrev lines
256 # oldrev:newrev lines
257 activebookmark = l
257 activebookmark = l
258 else:
258 else:
259 args = l.split(':')
259 args = l.split(':')
260 oldrev = args[0]
260 oldrev = args[0]
261 newrev = args[1]
261 newrev = args[1]
262 if newrev in legacystates:
262 if newrev in legacystates:
263 continue
263 continue
264 if len(args) > 2:
264 if len(args) > 2:
265 destnode = args[2]
265 destnode = args[2]
266 else:
266 else:
267 destnode = legacydest
267 destnode = legacydest
268 destmap[repo[oldrev].rev()] = repo[destnode].rev()
268 destmap[repo[oldrev].rev()] = repo[destnode].rev()
269 if newrev in (nullid, revtodostr):
269 if newrev in (nullid, revtodostr):
270 state[repo[oldrev].rev()] = revtodo
270 state[repo[oldrev].rev()] = revtodo
271 # Legacy compat special case
271 # Legacy compat special case
272 else:
272 else:
273 state[repo[oldrev].rev()] = repo[newrev].rev()
273 state[repo[oldrev].rev()] = repo[newrev].rev()
274
274
275 except IOError as err:
275 except IOError as err:
276 if err.errno != errno.ENOENT:
276 if err.errno != errno.ENOENT:
277 raise
277 raise
278 cmdutil.wrongtooltocontinue(repo, _('rebase'))
278 cmdutil.wrongtooltocontinue(repo, _('rebase'))
279
279
280 if keepbranches is None:
280 if keepbranches is None:
281 raise error.Abort(_('.hg/rebasestate is incomplete'))
281 raise error.Abort(_('.hg/rebasestate is incomplete'))
282
282
283 skipped = set()
283 skipped = set()
284 # recompute the set of skipped revs
284 # recompute the set of skipped revs
285 if not collapse:
285 if not collapse:
286 seen = set(destmap.values())
286 seen = set(destmap.values())
287 for old, new in sorted(state.items()):
287 for old, new in sorted(state.items()):
288 if new != revtodo and new in seen:
288 if new != revtodo and new in seen:
289 skipped.add(old)
289 skipped.add(old)
290 seen.add(new)
290 seen.add(new)
291 repo.ui.debug('computed skipped revs: %s\n' %
291 repo.ui.debug('computed skipped revs: %s\n' %
292 (' '.join('%d' % r for r in sorted(skipped)) or ''))
292 (' '.join('%d' % r for r in sorted(skipped)) or ''))
293 repo.ui.debug('rebase status resumed\n')
293 repo.ui.debug('rebase status resumed\n')
294
294
295 self.originalwd = originalwd
295 self.originalwd = originalwd
296 self.destmap = destmap
296 self.destmap = destmap
297 self.state = state
297 self.state = state
298 self.skipped = skipped
298 self.skipped = skipped
299 self.collapsef = collapse
299 self.collapsef = collapse
300 self.keepf = keep
300 self.keepf = keep
301 self.keepbranchesf = keepbranches
301 self.keepbranchesf = keepbranches
302 self.external = external
302 self.external = external
303 self.activebookmark = activebookmark
303 self.activebookmark = activebookmark
304
304
305 def _handleskippingobsolete(self, obsoleterevs, destmap):
305 def _handleskippingobsolete(self, obsoleterevs, destmap):
306 """Compute structures necessary for skipping obsolete revisions
306 """Compute structures necessary for skipping obsolete revisions
307
307
308 obsoleterevs: iterable of all obsolete revisions in rebaseset
308 obsoleterevs: iterable of all obsolete revisions in rebaseset
309 destmap: {srcrev: destrev} destination revisions
309 destmap: {srcrev: destrev} destination revisions
310 """
310 """
311 self.obsoletenotrebased = {}
311 self.obsoletenotrebased = {}
312 if not self.ui.configbool('experimental', 'rebaseskipobsolete'):
312 if not self.ui.configbool('experimental', 'rebaseskipobsolete'):
313 return
313 return
314 obsoleteset = set(obsoleterevs)
314 obsoleteset = set(obsoleterevs)
315 (self.obsoletenotrebased,
315 (self.obsoletenotrebased,
316 self.obsoletewithoutsuccessorindestination,
316 self.obsoletewithoutsuccessorindestination,
317 obsoleteextinctsuccessors) = _computeobsoletenotrebased(
317 obsoleteextinctsuccessors) = _computeobsoletenotrebased(
318 self.repo, obsoleteset, destmap)
318 self.repo, obsoleteset, destmap)
319 skippedset = set(self.obsoletenotrebased)
319 skippedset = set(self.obsoletenotrebased)
320 skippedset.update(self.obsoletewithoutsuccessorindestination)
320 skippedset.update(self.obsoletewithoutsuccessorindestination)
321 skippedset.update(obsoleteextinctsuccessors)
321 skippedset.update(obsoleteextinctsuccessors)
322 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
322 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
323
323
324 def _prepareabortorcontinue(self, isabort):
324 def _prepareabortorcontinue(self, isabort):
325 try:
325 try:
326 self.restorestatus()
326 self.restorestatus()
327 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
327 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
328 except error.RepoLookupError:
328 except error.RepoLookupError:
329 if isabort:
329 if isabort:
330 clearstatus(self.repo)
330 clearstatus(self.repo)
331 clearcollapsemsg(self.repo)
331 clearcollapsemsg(self.repo)
332 self.repo.ui.warn(_('rebase aborted (no revision is removed,'
332 self.repo.ui.warn(_('rebase aborted (no revision is removed,'
333 ' only broken state is cleared)\n'))
333 ' only broken state is cleared)\n'))
334 return 0
334 return 0
335 else:
335 else:
336 msg = _('cannot continue inconsistent rebase')
336 msg = _('cannot continue inconsistent rebase')
337 hint = _('use "hg rebase --abort" to clear broken state')
337 hint = _('use "hg rebase --abort" to clear broken state')
338 raise error.Abort(msg, hint=hint)
338 raise error.Abort(msg, hint=hint)
339 if isabort:
339 if isabort:
340 return abort(self.repo, self.originalwd, self.destmap,
340 return abort(self.repo, self.originalwd, self.destmap,
341 self.state, activebookmark=self.activebookmark)
341 self.state, activebookmark=self.activebookmark)
342
342
343 def _preparenewrebase(self, destmap):
343 def _preparenewrebase(self, destmap):
344 if not destmap:
344 if not destmap:
345 return _nothingtorebase()
345 return _nothingtorebase()
346
346
347 rebaseset = destmap.keys()
347 rebaseset = destmap.keys()
348 allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt)
348 allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt)
349 if (not (self.keepf or allowunstable)
349 if (not (self.keepf or allowunstable)
350 and self.repo.revs('first(children(%ld) - %ld)',
350 and self.repo.revs('first(children(%ld) - %ld)',
351 rebaseset, rebaseset)):
351 rebaseset, rebaseset)):
352 raise error.Abort(
352 raise error.Abort(
353 _("can't remove original changesets with"
353 _("can't remove original changesets with"
354 " unrebased descendants"),
354 " unrebased descendants"),
355 hint=_('use --keep to keep original changesets'))
355 hint=_('use --keep to keep original changesets'))
356
356
357 result = buildstate(self.repo, destmap, self.collapsef)
357 result = buildstate(self.repo, destmap, self.collapsef)
358
358
359 if not result:
359 if not result:
360 # Empty state built, nothing to rebase
360 # Empty state built, nothing to rebase
361 self.ui.status(_('nothing to rebase\n'))
361 self.ui.status(_('nothing to rebase\n'))
362 return _nothingtorebase()
362 return _nothingtorebase()
363
363
364 for root in self.repo.set('roots(%ld)', rebaseset):
364 for root in self.repo.set('roots(%ld)', rebaseset):
365 if not self.keepf and not root.mutable():
365 if not self.keepf and not root.mutable():
366 raise error.Abort(_("can't rebase public changeset %s")
366 raise error.Abort(_("can't rebase public changeset %s")
367 % root,
367 % root,
368 hint=_("see 'hg help phases' for details"))
368 hint=_("see 'hg help phases' for details"))
369
369
370 (self.originalwd, self.destmap, self.state) = result
370 (self.originalwd, self.destmap, self.state) = result
371 if self.collapsef:
371 if self.collapsef:
372 dests = set(self.destmap.values())
372 dests = set(self.destmap.values())
373 if len(dests) != 1:
373 if len(dests) != 1:
374 raise error.Abort(
374 raise error.Abort(
375 _('--collapse does not work with multiple destinations'))
375 _('--collapse does not work with multiple destinations'))
376 destrev = next(iter(dests))
376 destrev = next(iter(dests))
377 destancestors = self.repo.changelog.ancestors([destrev],
377 destancestors = self.repo.changelog.ancestors([destrev],
378 inclusive=True)
378 inclusive=True)
379 self.external = externalparent(self.repo, self.state, destancestors)
379 self.external = externalparent(self.repo, self.state, destancestors)
380
380
381 for destrev in sorted(set(destmap.values())):
381 for destrev in sorted(set(destmap.values())):
382 dest = self.repo[destrev]
382 dest = self.repo[destrev]
383 if dest.closesbranch() and not self.keepbranchesf:
383 if dest.closesbranch() and not self.keepbranchesf:
384 self.ui.status(_('reopening closed branch head %s\n') % dest)
384 self.ui.status(_('reopening closed branch head %s\n') % dest)
385
385
386 self.prepared = True
386 self.prepared = True
387
387
388 def _assignworkingcopy(self):
388 def _assignworkingcopy(self):
389 if self.inmemory:
389 if self.inmemory:
390 from mercurial.context import overlayworkingctx
390 from mercurial.context import overlayworkingctx
391 self.wctx = overlayworkingctx(self.repo)
391 self.wctx = overlayworkingctx(self.repo)
392 self.repo.ui.debug("rebasing in-memory\n")
392 self.repo.ui.debug("rebasing in-memory\n")
393 else:
393 else:
394 self.wctx = self.repo[None]
394 self.wctx = self.repo[None]
395 self.repo.ui.debug("rebasing on disk\n")
395 self.repo.ui.debug("rebasing on disk\n")
396 self.repo.ui.log("rebase", "", rebase_imm_used=self.wctx.isinmemory())
396 self.repo.ui.log("rebase", "", rebase_imm_used=self.wctx.isinmemory())
397
397
398 def _performrebase(self, tr):
398 def _performrebase(self, tr):
399 self._assignworkingcopy()
399 self._assignworkingcopy()
400 repo, ui = self.repo, self.ui
400 repo, ui = self.repo, self.ui
401 if self.keepbranchesf:
401 if self.keepbranchesf:
402 # insert _savebranch at the start of extrafns so if
402 # insert _savebranch at the start of extrafns so if
403 # there's a user-provided extrafn it can clobber branch if
403 # there's a user-provided extrafn it can clobber branch if
404 # desired
404 # desired
405 self.extrafns.insert(0, _savebranch)
405 self.extrafns.insert(0, _savebranch)
406 if self.collapsef:
406 if self.collapsef:
407 branches = set()
407 branches = set()
408 for rev in self.state:
408 for rev in self.state:
409 branches.add(repo[rev].branch())
409 branches.add(repo[rev].branch())
410 if len(branches) > 1:
410 if len(branches) > 1:
411 raise error.Abort(_('cannot collapse multiple named '
411 raise error.Abort(_('cannot collapse multiple named '
412 'branches'))
412 'branches'))
413
413
414 # Calculate self.obsoletenotrebased
414 # Calculate self.obsoletenotrebased
415 obsrevs = _filterobsoleterevs(self.repo, self.state)
415 obsrevs = _filterobsoleterevs(self.repo, self.state)
416 self._handleskippingobsolete(obsrevs, self.destmap)
416 self._handleskippingobsolete(obsrevs, self.destmap)
417
417
418 # Keep track of the active bookmarks in order to reset them later
418 # Keep track of the active bookmarks in order to reset them later
419 self.activebookmark = self.activebookmark or repo._activebookmark
419 self.activebookmark = self.activebookmark or repo._activebookmark
420 if self.activebookmark:
420 if self.activebookmark:
421 bookmarks.deactivate(repo)
421 bookmarks.deactivate(repo)
422
422
423 # Store the state before we begin so users can run 'hg rebase --abort'
423 # Store the state before we begin so users can run 'hg rebase --abort'
424 # if we fail before the transaction closes.
424 # if we fail before the transaction closes.
425 self.storestatus()
425 self.storestatus()
426
426
427 cands = [k for k, v in self.state.iteritems() if v == revtodo]
427 cands = [k for k, v in self.state.iteritems() if v == revtodo]
428 total = len(cands)
428 total = len(cands)
429 pos = 0
429 pos = 0
430 for subset in sortsource(self.destmap):
430 for subset in sortsource(self.destmap):
431 pos = self._performrebasesubset(tr, subset, pos, total)
431 pos = self._performrebasesubset(tr, subset, pos, total)
432 ui.progress(_('rebasing'), None)
432 ui.progress(_('rebasing'), None)
433 ui.note(_('rebase merging completed\n'))
433 ui.note(_('rebase merging completed\n'))
434
434
435 def _performrebasesubset(self, tr, subset, pos, total):
435 def _performrebasesubset(self, tr, subset, pos, total):
436 repo, ui, opts = self.repo, self.ui, self.opts
436 repo, ui, opts = self.repo, self.ui, self.opts
437 sortedrevs = repo.revs('sort(%ld, -topo)', subset)
437 sortedrevs = repo.revs('sort(%ld, -topo)', subset)
438 allowdivergence = self.ui.configbool(
438 allowdivergence = self.ui.configbool(
439 'experimental', 'evolution.allowdivergence')
439 'experimental', 'evolution.allowdivergence')
440 if not allowdivergence:
440 if not allowdivergence:
441 sortedrevs -= repo.revs(
441 sortedrevs -= repo.revs(
442 'descendants(%ld) and not %ld',
442 'descendants(%ld) and not %ld',
443 self.obsoletewithoutsuccessorindestination,
443 self.obsoletewithoutsuccessorindestination,
444 self.obsoletewithoutsuccessorindestination,
444 self.obsoletewithoutsuccessorindestination,
445 )
445 )
446 for rev in sortedrevs:
446 for rev in sortedrevs:
447 dest = self.destmap[rev]
447 dest = self.destmap[rev]
448 ctx = repo[rev]
448 ctx = repo[rev]
449 desc = _ctxdesc(ctx)
449 desc = _ctxdesc(ctx)
450 if self.state[rev] == rev:
450 if self.state[rev] == rev:
451 ui.status(_('already rebased %s\n') % desc)
451 ui.status(_('already rebased %s\n') % desc)
452 elif (not allowdivergence
452 elif (not allowdivergence
453 and rev in self.obsoletewithoutsuccessorindestination):
453 and rev in self.obsoletewithoutsuccessorindestination):
454 msg = _('note: not rebasing %s and its descendants as '
454 msg = _('note: not rebasing %s and its descendants as '
455 'this would cause divergence\n') % desc
455 'this would cause divergence\n') % desc
456 repo.ui.status(msg)
456 repo.ui.status(msg)
457 self.skipped.add(rev)
457 self.skipped.add(rev)
458 elif rev in self.obsoletenotrebased:
458 elif rev in self.obsoletenotrebased:
459 succ = self.obsoletenotrebased[rev]
459 succ = self.obsoletenotrebased[rev]
460 if succ is None:
460 if succ is None:
461 msg = _('note: not rebasing %s, it has no '
461 msg = _('note: not rebasing %s, it has no '
462 'successor\n') % desc
462 'successor\n') % desc
463 else:
463 else:
464 succdesc = _ctxdesc(repo[succ])
464 succdesc = _ctxdesc(repo[succ])
465 msg = (_('note: not rebasing %s, already in '
465 msg = (_('note: not rebasing %s, already in '
466 'destination as %s\n') % (desc, succdesc))
466 'destination as %s\n') % (desc, succdesc))
467 repo.ui.status(msg)
467 repo.ui.status(msg)
468 # Make clearrebased aware state[rev] is not a true successor
468 # Make clearrebased aware state[rev] is not a true successor
469 self.skipped.add(rev)
469 self.skipped.add(rev)
470 # Record rev as moved to its desired destination in self.state.
470 # Record rev as moved to its desired destination in self.state.
471 # This helps bookmark and working parent movement.
471 # This helps bookmark and working parent movement.
472 dest = max(adjustdest(repo, rev, self.destmap, self.state,
472 dest = max(adjustdest(repo, rev, self.destmap, self.state,
473 self.skipped))
473 self.skipped))
474 self.state[rev] = dest
474 self.state[rev] = dest
475 elif self.state[rev] == revtodo:
475 elif self.state[rev] == revtodo:
476 pos += 1
476 pos += 1
477 ui.status(_('rebasing %s\n') % desc)
477 ui.status(_('rebasing %s\n') % desc)
478 ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)),
478 ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, ctx)),
479 _('changesets'), total)
479 _('changesets'), total)
480 p1, p2, base = defineparents(repo, rev, self.destmap,
480 p1, p2, base = defineparents(repo, rev, self.destmap,
481 self.state, self.skipped,
481 self.state, self.skipped,
482 self.obsoletenotrebased)
482 self.obsoletenotrebased)
483 self.storestatus(tr=tr)
483 self.storestatus(tr=tr)
484 storecollapsemsg(repo, self.collapsemsg)
484 storecollapsemsg(repo, self.collapsemsg)
485 if len(repo[None].parents()) == 2:
485 if len(repo[None].parents()) == 2:
486 repo.ui.debug('resuming interrupted rebase\n')
486 repo.ui.debug('resuming interrupted rebase\n')
487 else:
487 else:
488 try:
488 try:
489 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
489 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
490 'rebase')
490 'rebase')
491 stats = rebasenode(repo, rev, p1, base, self.state,
491 stats = rebasenode(repo, rev, p1, base, self.state,
492 self.collapsef, dest, wctx=self.wctx)
492 self.collapsef, dest, wctx=self.wctx)
493 if stats and stats[3] > 0:
493 if stats and stats[3] > 0:
494 if self.wctx.isinmemory():
494 if self.wctx.isinmemory():
495 raise error.InMemoryMergeConflictsError()
495 raise error.InMemoryMergeConflictsError()
496 else:
496 else:
497 raise error.InterventionRequired(
497 raise error.InterventionRequired(
498 _('unresolved conflicts (see hg '
498 _('unresolved conflicts (see hg '
499 'resolve, then hg rebase --continue)'))
499 'resolve, then hg rebase --continue)'))
500 finally:
500 finally:
501 ui.setconfig('ui', 'forcemerge', '', 'rebase')
501 ui.setconfig('ui', 'forcemerge', '', 'rebase')
502 if not self.collapsef:
502 if not self.collapsef:
503 merging = p2 != nullrev
503 merging = p2 != nullrev
504 editform = cmdutil.mergeeditform(merging, 'rebase')
504 editform = cmdutil.mergeeditform(merging, 'rebase')
505 editor = cmdutil.getcommiteditor(editform=editform,
505 editor = cmdutil.getcommiteditor(editform=editform,
506 **pycompat.strkwargs(opts))
506 **pycompat.strkwargs(opts))
507 if self.wctx.isinmemory():
507 if self.wctx.isinmemory():
508 newnode = concludememorynode(repo, rev, p1, p2,
508 newnode = concludememorynode(repo, rev, p1, p2,
509 wctx=self.wctx,
509 wctx=self.wctx,
510 extrafn=_makeextrafn(self.extrafns),
510 extrafn=_makeextrafn(self.extrafns),
511 editor=editor,
511 editor=editor,
512 keepbranches=self.keepbranchesf,
512 keepbranches=self.keepbranchesf,
513 date=self.date)
513 date=self.date)
514 mergemod.mergestate.clean(repo)
514 mergemod.mergestate.clean(repo)
515 else:
515 else:
516 newnode = concludenode(repo, rev, p1, p2,
516 newnode = concludenode(repo, rev, p1, p2,
517 extrafn=_makeextrafn(self.extrafns),
517 extrafn=_makeextrafn(self.extrafns),
518 editor=editor,
518 editor=editor,
519 keepbranches=self.keepbranchesf,
519 keepbranches=self.keepbranchesf,
520 date=self.date)
520 date=self.date)
521
521
522 if newnode is None:
522 if newnode is None:
523 # If it ended up being a no-op commit, then the normal
523 # If it ended up being a no-op commit, then the normal
524 # merge state clean-up path doesn't happen, so do it
524 # merge state clean-up path doesn't happen, so do it
525 # here. Fix issue5494
525 # here. Fix issue5494
526 mergemod.mergestate.clean(repo)
526 mergemod.mergestate.clean(repo)
527 else:
527 else:
528 # Skip commit if we are collapsing
528 # Skip commit if we are collapsing
529 if self.wctx.isinmemory():
529 if self.wctx.isinmemory():
530 self.wctx.setbase(repo[p1])
530 self.wctx.setbase(repo[p1])
531 else:
531 else:
532 repo.setparents(repo[p1].node())
532 repo.setparents(repo[p1].node())
533 newnode = None
533 newnode = None
534 # Update the state
534 # Update the state
535 if newnode is not None:
535 if newnode is not None:
536 self.state[rev] = repo[newnode].rev()
536 self.state[rev] = repo[newnode].rev()
537 ui.debug('rebased as %s\n' % short(newnode))
537 ui.debug('rebased as %s\n' % short(newnode))
538 else:
538 else:
539 if not self.collapsef:
539 if not self.collapsef:
540 ui.warn(_('note: rebase of %d:%s created no changes '
540 ui.warn(_('note: rebase of %d:%s created no changes '
541 'to commit\n') % (rev, ctx))
541 'to commit\n') % (rev, ctx))
542 self.skipped.add(rev)
542 self.skipped.add(rev)
543 self.state[rev] = p1
543 self.state[rev] = p1
544 ui.debug('next revision set to %s\n' % p1)
544 ui.debug('next revision set to %d\n' % p1)
545 else:
545 else:
546 ui.status(_('already rebased %s as %s\n') %
546 ui.status(_('already rebased %s as %s\n') %
547 (desc, repo[self.state[rev]]))
547 (desc, repo[self.state[rev]]))
548 return pos
548 return pos
549
549
550 def _finishrebase(self):
550 def _finishrebase(self):
551 repo, ui, opts = self.repo, self.ui, self.opts
551 repo, ui, opts = self.repo, self.ui, self.opts
552 fm = ui.formatter('rebase', opts)
552 fm = ui.formatter('rebase', opts)
553 fm.startitem()
553 fm.startitem()
554 if self.collapsef and not self.keepopen:
554 if self.collapsef and not self.keepopen:
555 p1, p2, _base = defineparents(repo, min(self.state), self.destmap,
555 p1, p2, _base = defineparents(repo, min(self.state), self.destmap,
556 self.state, self.skipped,
556 self.state, self.skipped,
557 self.obsoletenotrebased)
557 self.obsoletenotrebased)
558 editopt = opts.get('edit')
558 editopt = opts.get('edit')
559 editform = 'rebase.collapse'
559 editform = 'rebase.collapse'
560 if self.collapsemsg:
560 if self.collapsemsg:
561 commitmsg = self.collapsemsg
561 commitmsg = self.collapsemsg
562 else:
562 else:
563 commitmsg = 'Collapsed revision'
563 commitmsg = 'Collapsed revision'
564 for rebased in sorted(self.state):
564 for rebased in sorted(self.state):
565 if rebased not in self.skipped:
565 if rebased not in self.skipped:
566 commitmsg += '\n* %s' % repo[rebased].description()
566 commitmsg += '\n* %s' % repo[rebased].description()
567 editopt = True
567 editopt = True
568 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
568 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
569 revtoreuse = max(self.state)
569 revtoreuse = max(self.state)
570
570
571 dsguard = None
571 dsguard = None
572 if self.inmemory:
572 if self.inmemory:
573 newnode = concludememorynode(repo, revtoreuse, p1,
573 newnode = concludememorynode(repo, revtoreuse, p1,
574 self.external,
574 self.external,
575 commitmsg=commitmsg,
575 commitmsg=commitmsg,
576 extrafn=_makeextrafn(self.extrafns),
576 extrafn=_makeextrafn(self.extrafns),
577 editor=editor,
577 editor=editor,
578 keepbranches=self.keepbranchesf,
578 keepbranches=self.keepbranchesf,
579 date=self.date, wctx=self.wctx)
579 date=self.date, wctx=self.wctx)
580 else:
580 else:
581 if ui.configbool('rebase', 'singletransaction'):
581 if ui.configbool('rebase', 'singletransaction'):
582 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
582 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
583 with util.acceptintervention(dsguard):
583 with util.acceptintervention(dsguard):
584 newnode = concludenode(repo, revtoreuse, p1, self.external,
584 newnode = concludenode(repo, revtoreuse, p1, self.external,
585 commitmsg=commitmsg,
585 commitmsg=commitmsg,
586 extrafn=_makeextrafn(self.extrafns),
586 extrafn=_makeextrafn(self.extrafns),
587 editor=editor,
587 editor=editor,
588 keepbranches=self.keepbranchesf,
588 keepbranches=self.keepbranchesf,
589 date=self.date)
589 date=self.date)
590 if newnode is not None:
590 if newnode is not None:
591 newrev = repo[newnode].rev()
591 newrev = repo[newnode].rev()
592 for oldrev in self.state.iterkeys():
592 for oldrev in self.state.iterkeys():
593 self.state[oldrev] = newrev
593 self.state[oldrev] = newrev
594
594
595 if 'qtip' in repo.tags():
595 if 'qtip' in repo.tags():
596 updatemq(repo, self.state, self.skipped, **opts)
596 updatemq(repo, self.state, self.skipped, **opts)
597
597
598 # restore original working directory
598 # restore original working directory
599 # (we do this before stripping)
599 # (we do this before stripping)
600 newwd = self.state.get(self.originalwd, self.originalwd)
600 newwd = self.state.get(self.originalwd, self.originalwd)
601 if newwd < 0:
601 if newwd < 0:
602 # original directory is a parent of rebase set root or ignored
602 # original directory is a parent of rebase set root or ignored
603 newwd = self.originalwd
603 newwd = self.originalwd
604 if (newwd not in [c.rev() for c in repo[None].parents()] and
604 if (newwd not in [c.rev() for c in repo[None].parents()] and
605 not self.inmemory):
605 not self.inmemory):
606 ui.note(_("update back to initial working directory parent\n"))
606 ui.note(_("update back to initial working directory parent\n"))
607 hg.updaterepo(repo, newwd, False)
607 hg.updaterepo(repo, newwd, False)
608
608
609 collapsedas = None
609 collapsedas = None
610 if not self.keepf:
610 if not self.keepf:
611 if self.collapsef:
611 if self.collapsef:
612 collapsedas = newnode
612 collapsedas = newnode
613 clearrebased(ui, repo, self.destmap, self.state, self.skipped,
613 clearrebased(ui, repo, self.destmap, self.state, self.skipped,
614 collapsedas, self.keepf, fm=fm)
614 collapsedas, self.keepf, fm=fm)
615
615
616 clearstatus(repo)
616 clearstatus(repo)
617 clearcollapsemsg(repo)
617 clearcollapsemsg(repo)
618
618
619 ui.note(_("rebase completed\n"))
619 ui.note(_("rebase completed\n"))
620 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
620 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
621 if self.skipped:
621 if self.skipped:
622 skippedlen = len(self.skipped)
622 skippedlen = len(self.skipped)
623 ui.note(_("%d revisions have been skipped\n") % skippedlen)
623 ui.note(_("%d revisions have been skipped\n") % skippedlen)
624 fm.end()
624 fm.end()
625
625
626 if (self.activebookmark and self.activebookmark in repo._bookmarks and
626 if (self.activebookmark and self.activebookmark in repo._bookmarks and
627 repo['.'].node() == repo._bookmarks[self.activebookmark]):
627 repo['.'].node() == repo._bookmarks[self.activebookmark]):
628 bookmarks.activate(repo, self.activebookmark)
628 bookmarks.activate(repo, self.activebookmark)
629
629
630 @command('rebase',
630 @command('rebase',
631 [('s', 'source', '',
631 [('s', 'source', '',
632 _('rebase the specified changeset and descendants'), _('REV')),
632 _('rebase the specified changeset and descendants'), _('REV')),
633 ('b', 'base', '',
633 ('b', 'base', '',
634 _('rebase everything from branching point of specified changeset'),
634 _('rebase everything from branching point of specified changeset'),
635 _('REV')),
635 _('REV')),
636 ('r', 'rev', [],
636 ('r', 'rev', [],
637 _('rebase these revisions'),
637 _('rebase these revisions'),
638 _('REV')),
638 _('REV')),
639 ('d', 'dest', '',
639 ('d', 'dest', '',
640 _('rebase onto the specified changeset'), _('REV')),
640 _('rebase onto the specified changeset'), _('REV')),
641 ('', 'collapse', False, _('collapse the rebased changesets')),
641 ('', 'collapse', False, _('collapse the rebased changesets')),
642 ('m', 'message', '',
642 ('m', 'message', '',
643 _('use text as collapse commit message'), _('TEXT')),
643 _('use text as collapse commit message'), _('TEXT')),
644 ('e', 'edit', False, _('invoke editor on commit messages')),
644 ('e', 'edit', False, _('invoke editor on commit messages')),
645 ('l', 'logfile', '',
645 ('l', 'logfile', '',
646 _('read collapse commit message from file'), _('FILE')),
646 _('read collapse commit message from file'), _('FILE')),
647 ('k', 'keep', False, _('keep original changesets')),
647 ('k', 'keep', False, _('keep original changesets')),
648 ('', 'keepbranches', False, _('keep original branch names')),
648 ('', 'keepbranches', False, _('keep original branch names')),
649 ('D', 'detach', False, _('(DEPRECATED)')),
649 ('D', 'detach', False, _('(DEPRECATED)')),
650 ('i', 'interactive', False, _('(DEPRECATED)')),
650 ('i', 'interactive', False, _('(DEPRECATED)')),
651 ('t', 'tool', '', _('specify merge tool')),
651 ('t', 'tool', '', _('specify merge tool')),
652 ('c', 'continue', False, _('continue an interrupted rebase')),
652 ('c', 'continue', False, _('continue an interrupted rebase')),
653 ('a', 'abort', False, _('abort an interrupted rebase'))] +
653 ('a', 'abort', False, _('abort an interrupted rebase'))] +
654 cmdutil.formatteropts,
654 cmdutil.formatteropts,
655 _('[-s REV | -b REV] [-d REV] [OPTION]'))
655 _('[-s REV | -b REV] [-d REV] [OPTION]'))
656 def rebase(ui, repo, **opts):
656 def rebase(ui, repo, **opts):
657 """move changeset (and descendants) to a different branch
657 """move changeset (and descendants) to a different branch
658
658
659 Rebase uses repeated merging to graft changesets from one part of
659 Rebase uses repeated merging to graft changesets from one part of
660 history (the source) onto another (the destination). This can be
660 history (the source) onto another (the destination). This can be
661 useful for linearizing *local* changes relative to a master
661 useful for linearizing *local* changes relative to a master
662 development tree.
662 development tree.
663
663
664 Published commits cannot be rebased (see :hg:`help phases`).
664 Published commits cannot be rebased (see :hg:`help phases`).
665 To copy commits, see :hg:`help graft`.
665 To copy commits, see :hg:`help graft`.
666
666
667 If you don't specify a destination changeset (``-d/--dest``), rebase
667 If you don't specify a destination changeset (``-d/--dest``), rebase
668 will use the same logic as :hg:`merge` to pick a destination. if
668 will use the same logic as :hg:`merge` to pick a destination. if
669 the current branch contains exactly one other head, the other head
669 the current branch contains exactly one other head, the other head
670 is merged with by default. Otherwise, an explicit revision with
670 is merged with by default. Otherwise, an explicit revision with
671 which to merge with must be provided. (destination changeset is not
671 which to merge with must be provided. (destination changeset is not
672 modified by rebasing, but new changesets are added as its
672 modified by rebasing, but new changesets are added as its
673 descendants.)
673 descendants.)
674
674
675 Here are the ways to select changesets:
675 Here are the ways to select changesets:
676
676
677 1. Explicitly select them using ``--rev``.
677 1. Explicitly select them using ``--rev``.
678
678
679 2. Use ``--source`` to select a root changeset and include all of its
679 2. Use ``--source`` to select a root changeset and include all of its
680 descendants.
680 descendants.
681
681
682 3. Use ``--base`` to select a changeset; rebase will find ancestors
682 3. Use ``--base`` to select a changeset; rebase will find ancestors
683 and their descendants which are not also ancestors of the destination.
683 and their descendants which are not also ancestors of the destination.
684
684
685 4. If you do not specify any of ``--rev``, ``source``, or ``--base``,
685 4. If you do not specify any of ``--rev``, ``source``, or ``--base``,
686 rebase will use ``--base .`` as above.
686 rebase will use ``--base .`` as above.
687
687
688 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
688 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
689 can be used in ``--dest``. Destination would be calculated per source
689 can be used in ``--dest``. Destination would be calculated per source
690 revision with ``SRC`` substituted by that single source revision and
690 revision with ``SRC`` substituted by that single source revision and
691 ``ALLSRC`` substituted by all source revisions.
691 ``ALLSRC`` substituted by all source revisions.
692
692
693 Rebase will destroy original changesets unless you use ``--keep``.
693 Rebase will destroy original changesets unless you use ``--keep``.
694 It will also move your bookmarks (even if you do).
694 It will also move your bookmarks (even if you do).
695
695
696 Some changesets may be dropped if they do not contribute changes
696 Some changesets may be dropped if they do not contribute changes
697 (e.g. merges from the destination branch).
697 (e.g. merges from the destination branch).
698
698
699 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
699 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
700 a named branch with two heads. You will need to explicitly specify source
700 a named branch with two heads. You will need to explicitly specify source
701 and/or destination.
701 and/or destination.
702
702
703 If you need to use a tool to automate merge/conflict decisions, you
703 If you need to use a tool to automate merge/conflict decisions, you
704 can specify one with ``--tool``, see :hg:`help merge-tools`.
704 can specify one with ``--tool``, see :hg:`help merge-tools`.
705 As a caveat: the tool will not be used to mediate when a file was
705 As a caveat: the tool will not be used to mediate when a file was
706 deleted, there is no hook presently available for this.
706 deleted, there is no hook presently available for this.
707
707
708 If a rebase is interrupted to manually resolve a conflict, it can be
708 If a rebase is interrupted to manually resolve a conflict, it can be
709 continued with --continue/-c or aborted with --abort/-a.
709 continued with --continue/-c or aborted with --abort/-a.
710
710
711 .. container:: verbose
711 .. container:: verbose
712
712
713 Examples:
713 Examples:
714
714
715 - move "local changes" (current commit back to branching point)
715 - move "local changes" (current commit back to branching point)
716 to the current branch tip after a pull::
716 to the current branch tip after a pull::
717
717
718 hg rebase
718 hg rebase
719
719
720 - move a single changeset to the stable branch::
720 - move a single changeset to the stable branch::
721
721
722 hg rebase -r 5f493448 -d stable
722 hg rebase -r 5f493448 -d stable
723
723
724 - splice a commit and all its descendants onto another part of history::
724 - splice a commit and all its descendants onto another part of history::
725
725
726 hg rebase --source c0c3 --dest 4cf9
726 hg rebase --source c0c3 --dest 4cf9
727
727
728 - rebase everything on a branch marked by a bookmark onto the
728 - rebase everything on a branch marked by a bookmark onto the
729 default branch::
729 default branch::
730
730
731 hg rebase --base myfeature --dest default
731 hg rebase --base myfeature --dest default
732
732
733 - collapse a sequence of changes into a single commit::
733 - collapse a sequence of changes into a single commit::
734
734
735 hg rebase --collapse -r 1520:1525 -d .
735 hg rebase --collapse -r 1520:1525 -d .
736
736
737 - move a named branch while preserving its name::
737 - move a named branch while preserving its name::
738
738
739 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
739 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
740
740
741 - stabilize orphaned changesets so history looks linear::
741 - stabilize orphaned changesets so history looks linear::
742
742
743 hg rebase -r 'orphan()-obsolete()'\
743 hg rebase -r 'orphan()-obsolete()'\
744 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
744 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
745 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
745 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
746
746
747 Configuration Options:
747 Configuration Options:
748
748
749 You can make rebase require a destination if you set the following config
749 You can make rebase require a destination if you set the following config
750 option::
750 option::
751
751
752 [commands]
752 [commands]
753 rebase.requiredest = True
753 rebase.requiredest = True
754
754
755 By default, rebase will close the transaction after each commit. For
755 By default, rebase will close the transaction after each commit. For
756 performance purposes, you can configure rebase to use a single transaction
756 performance purposes, you can configure rebase to use a single transaction
757 across the entire rebase. WARNING: This setting introduces a significant
757 across the entire rebase. WARNING: This setting introduces a significant
758 risk of losing the work you've done in a rebase if the rebase aborts
758 risk of losing the work you've done in a rebase if the rebase aborts
759 unexpectedly::
759 unexpectedly::
760
760
761 [rebase]
761 [rebase]
762 singletransaction = True
762 singletransaction = True
763
763
764 By default, rebase writes to the working copy, but you can configure it to
764 By default, rebase writes to the working copy, but you can configure it to
765 run in-memory for for better performance, and to allow it to run if the
765 run in-memory for for better performance, and to allow it to run if the
766 working copy is dirty::
766 working copy is dirty::
767
767
768 [rebase]
768 [rebase]
769 experimental.inmemory = True
769 experimental.inmemory = True
770
770
771 Return Values:
771 Return Values:
772
772
773 Returns 0 on success, 1 if nothing to rebase or there are
773 Returns 0 on success, 1 if nothing to rebase or there are
774 unresolved conflicts.
774 unresolved conflicts.
775
775
776 """
776 """
777 inmemory = ui.configbool('rebase', 'experimental.inmemory')
777 inmemory = ui.configbool('rebase', 'experimental.inmemory')
778 if (opts.get('continue') or opts.get('abort') or
778 if (opts.get('continue') or opts.get('abort') or
779 repo.currenttransaction() is not None):
779 repo.currenttransaction() is not None):
780 # in-memory rebase is not compatible with resuming rebases.
780 # in-memory rebase is not compatible with resuming rebases.
781 # (Or if it is run within a transaction, since the restart logic can
781 # (Or if it is run within a transaction, since the restart logic can
782 # fail the entire transaction.)
782 # fail the entire transaction.)
783 inmemory = False
783 inmemory = False
784
784
785 if inmemory:
785 if inmemory:
786 try:
786 try:
787 # in-memory merge doesn't support conflicts, so if we hit any, abort
787 # in-memory merge doesn't support conflicts, so if we hit any, abort
788 # and re-run as an on-disk merge.
788 # and re-run as an on-disk merge.
789 return _origrebase(ui, repo, inmemory=inmemory, **opts)
789 return _origrebase(ui, repo, inmemory=inmemory, **opts)
790 except error.InMemoryMergeConflictsError:
790 except error.InMemoryMergeConflictsError:
791 ui.warn(_('hit merge conflicts; re-running rebase without in-memory'
791 ui.warn(_('hit merge conflicts; re-running rebase without in-memory'
792 ' merge\n'))
792 ' merge\n'))
793 _origrebase(ui, repo, **{'abort': True})
793 _origrebase(ui, repo, **{'abort': True})
794 return _origrebase(ui, repo, inmemory=False, **opts)
794 return _origrebase(ui, repo, inmemory=False, **opts)
795 else:
795 else:
796 return _origrebase(ui, repo, **opts)
796 return _origrebase(ui, repo, **opts)
797
797
798 def _origrebase(ui, repo, inmemory=False, **opts):
798 def _origrebase(ui, repo, inmemory=False, **opts):
799 opts = pycompat.byteskwargs(opts)
799 opts = pycompat.byteskwargs(opts)
800 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
800 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
801
801
802 with repo.wlock(), repo.lock():
802 with repo.wlock(), repo.lock():
803 # Validate input and define rebasing points
803 # Validate input and define rebasing points
804 destf = opts.get('dest', None)
804 destf = opts.get('dest', None)
805 srcf = opts.get('source', None)
805 srcf = opts.get('source', None)
806 basef = opts.get('base', None)
806 basef = opts.get('base', None)
807 revf = opts.get('rev', [])
807 revf = opts.get('rev', [])
808 # search default destination in this space
808 # search default destination in this space
809 # used in the 'hg pull --rebase' case, see issue 5214.
809 # used in the 'hg pull --rebase' case, see issue 5214.
810 destspace = opts.get('_destspace')
810 destspace = opts.get('_destspace')
811 contf = opts.get('continue')
811 contf = opts.get('continue')
812 abortf = opts.get('abort')
812 abortf = opts.get('abort')
813 if opts.get('interactive'):
813 if opts.get('interactive'):
814 try:
814 try:
815 if extensions.find('histedit'):
815 if extensions.find('histedit'):
816 enablehistedit = ''
816 enablehistedit = ''
817 except KeyError:
817 except KeyError:
818 enablehistedit = " --config extensions.histedit="
818 enablehistedit = " --config extensions.histedit="
819 help = "hg%s help -e histedit" % enablehistedit
819 help = "hg%s help -e histedit" % enablehistedit
820 msg = _("interactive history editing is supported by the "
820 msg = _("interactive history editing is supported by the "
821 "'histedit' extension (see \"%s\")") % help
821 "'histedit' extension (see \"%s\")") % help
822 raise error.Abort(msg)
822 raise error.Abort(msg)
823
823
824 if rbsrt.collapsemsg and not rbsrt.collapsef:
824 if rbsrt.collapsemsg and not rbsrt.collapsef:
825 raise error.Abort(
825 raise error.Abort(
826 _('message can only be specified with collapse'))
826 _('message can only be specified with collapse'))
827
827
828 if contf or abortf:
828 if contf or abortf:
829 if contf and abortf:
829 if contf and abortf:
830 raise error.Abort(_('cannot use both abort and continue'))
830 raise error.Abort(_('cannot use both abort and continue'))
831 if rbsrt.collapsef:
831 if rbsrt.collapsef:
832 raise error.Abort(
832 raise error.Abort(
833 _('cannot use collapse with continue or abort'))
833 _('cannot use collapse with continue or abort'))
834 if srcf or basef or destf:
834 if srcf or basef or destf:
835 raise error.Abort(
835 raise error.Abort(
836 _('abort and continue do not allow specifying revisions'))
836 _('abort and continue do not allow specifying revisions'))
837 if abortf and opts.get('tool', False):
837 if abortf and opts.get('tool', False):
838 ui.warn(_('tool option will be ignored\n'))
838 ui.warn(_('tool option will be ignored\n'))
839 if contf:
839 if contf:
840 ms = mergemod.mergestate.read(repo)
840 ms = mergemod.mergestate.read(repo)
841 mergeutil.checkunresolved(ms)
841 mergeutil.checkunresolved(ms)
842
842
843 retcode = rbsrt._prepareabortorcontinue(abortf)
843 retcode = rbsrt._prepareabortorcontinue(abortf)
844 if retcode is not None:
844 if retcode is not None:
845 return retcode
845 return retcode
846 else:
846 else:
847 destmap = _definedestmap(ui, repo, rbsrt, destf, srcf, basef, revf,
847 destmap = _definedestmap(ui, repo, rbsrt, destf, srcf, basef, revf,
848 destspace=destspace)
848 destspace=destspace)
849 retcode = rbsrt._preparenewrebase(destmap)
849 retcode = rbsrt._preparenewrebase(destmap)
850 if retcode is not None:
850 if retcode is not None:
851 return retcode
851 return retcode
852
852
853 tr = None
853 tr = None
854 dsguard = None
854 dsguard = None
855
855
856 singletr = ui.configbool('rebase', 'singletransaction')
856 singletr = ui.configbool('rebase', 'singletransaction')
857 if singletr:
857 if singletr:
858 tr = repo.transaction('rebase')
858 tr = repo.transaction('rebase')
859
859
860 # If `rebase.singletransaction` is enabled, wrap the entire operation in
860 # If `rebase.singletransaction` is enabled, wrap the entire operation in
861 # one transaction here. Otherwise, transactions are obtained when
861 # one transaction here. Otherwise, transactions are obtained when
862 # committing each node, which is slower but allows partial success.
862 # committing each node, which is slower but allows partial success.
863 with util.acceptintervention(tr):
863 with util.acceptintervention(tr):
864 # Same logic for the dirstate guard, except we don't create one when
864 # Same logic for the dirstate guard, except we don't create one when
865 # rebasing in-memory (it's not needed).
865 # rebasing in-memory (it's not needed).
866 if singletr and not inmemory:
866 if singletr and not inmemory:
867 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
867 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
868 with util.acceptintervention(dsguard):
868 with util.acceptintervention(dsguard):
869 rbsrt._performrebase(tr)
869 rbsrt._performrebase(tr)
870
870
871 rbsrt._finishrebase()
871 rbsrt._finishrebase()
872
872
873 def _definedestmap(ui, repo, rbsrt, destf=None, srcf=None, basef=None,
873 def _definedestmap(ui, repo, rbsrt, destf=None, srcf=None, basef=None,
874 revf=None, destspace=None):
874 revf=None, destspace=None):
875 """use revisions argument to define destmap {srcrev: destrev}"""
875 """use revisions argument to define destmap {srcrev: destrev}"""
876 if revf is None:
876 if revf is None:
877 revf = []
877 revf = []
878
878
879 # destspace is here to work around issues with `hg pull --rebase` see
879 # destspace is here to work around issues with `hg pull --rebase` see
880 # issue5214 for details
880 # issue5214 for details
881 if srcf and basef:
881 if srcf and basef:
882 raise error.Abort(_('cannot specify both a source and a base'))
882 raise error.Abort(_('cannot specify both a source and a base'))
883 if revf and basef:
883 if revf and basef:
884 raise error.Abort(_('cannot specify both a revision and a base'))
884 raise error.Abort(_('cannot specify both a revision and a base'))
885 if revf and srcf:
885 if revf and srcf:
886 raise error.Abort(_('cannot specify both a revision and a source'))
886 raise error.Abort(_('cannot specify both a revision and a source'))
887
887
888 if not rbsrt.inmemory:
888 if not rbsrt.inmemory:
889 cmdutil.checkunfinished(repo)
889 cmdutil.checkunfinished(repo)
890 cmdutil.bailifchanged(repo)
890 cmdutil.bailifchanged(repo)
891
891
892 if ui.configbool('commands', 'rebase.requiredest') and not destf:
892 if ui.configbool('commands', 'rebase.requiredest') and not destf:
893 raise error.Abort(_('you must specify a destination'),
893 raise error.Abort(_('you must specify a destination'),
894 hint=_('use: hg rebase -d REV'))
894 hint=_('use: hg rebase -d REV'))
895
895
896 dest = None
896 dest = None
897
897
898 if revf:
898 if revf:
899 rebaseset = scmutil.revrange(repo, revf)
899 rebaseset = scmutil.revrange(repo, revf)
900 if not rebaseset:
900 if not rebaseset:
901 ui.status(_('empty "rev" revision set - nothing to rebase\n'))
901 ui.status(_('empty "rev" revision set - nothing to rebase\n'))
902 return None
902 return None
903 elif srcf:
903 elif srcf:
904 src = scmutil.revrange(repo, [srcf])
904 src = scmutil.revrange(repo, [srcf])
905 if not src:
905 if not src:
906 ui.status(_('empty "source" revision set - nothing to rebase\n'))
906 ui.status(_('empty "source" revision set - nothing to rebase\n'))
907 return None
907 return None
908 rebaseset = repo.revs('(%ld)::', src)
908 rebaseset = repo.revs('(%ld)::', src)
909 assert rebaseset
909 assert rebaseset
910 else:
910 else:
911 base = scmutil.revrange(repo, [basef or '.'])
911 base = scmutil.revrange(repo, [basef or '.'])
912 if not base:
912 if not base:
913 ui.status(_('empty "base" revision set - '
913 ui.status(_('empty "base" revision set - '
914 "can't compute rebase set\n"))
914 "can't compute rebase set\n"))
915 return None
915 return None
916 if destf:
916 if destf:
917 # --base does not support multiple destinations
917 # --base does not support multiple destinations
918 dest = scmutil.revsingle(repo, destf)
918 dest = scmutil.revsingle(repo, destf)
919 else:
919 else:
920 dest = repo[_destrebase(repo, base, destspace=destspace)]
920 dest = repo[_destrebase(repo, base, destspace=destspace)]
921 destf = str(dest)
921 destf = str(dest)
922
922
923 roots = [] # selected children of branching points
923 roots = [] # selected children of branching points
924 bpbase = {} # {branchingpoint: [origbase]}
924 bpbase = {} # {branchingpoint: [origbase]}
925 for b in base: # group bases by branching points
925 for b in base: # group bases by branching points
926 bp = repo.revs('ancestor(%d, %d)', b, dest).first()
926 bp = repo.revs('ancestor(%d, %d)', b, dest).first()
927 bpbase[bp] = bpbase.get(bp, []) + [b]
927 bpbase[bp] = bpbase.get(bp, []) + [b]
928 if None in bpbase:
928 if None in bpbase:
929 # emulate the old behavior, showing "nothing to rebase" (a better
929 # emulate the old behavior, showing "nothing to rebase" (a better
930 # behavior may be abort with "cannot find branching point" error)
930 # behavior may be abort with "cannot find branching point" error)
931 bpbase.clear()
931 bpbase.clear()
932 for bp, bs in bpbase.iteritems(): # calculate roots
932 for bp, bs in bpbase.iteritems(): # calculate roots
933 roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs))
933 roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs))
934
934
935 rebaseset = repo.revs('%ld::', roots)
935 rebaseset = repo.revs('%ld::', roots)
936
936
937 if not rebaseset:
937 if not rebaseset:
938 # transform to list because smartsets are not comparable to
938 # transform to list because smartsets are not comparable to
939 # lists. This should be improved to honor laziness of
939 # lists. This should be improved to honor laziness of
940 # smartset.
940 # smartset.
941 if list(base) == [dest.rev()]:
941 if list(base) == [dest.rev()]:
942 if basef:
942 if basef:
943 ui.status(_('nothing to rebase - %s is both "base"'
943 ui.status(_('nothing to rebase - %s is both "base"'
944 ' and destination\n') % dest)
944 ' and destination\n') % dest)
945 else:
945 else:
946 ui.status(_('nothing to rebase - working directory '
946 ui.status(_('nothing to rebase - working directory '
947 'parent is also destination\n'))
947 'parent is also destination\n'))
948 elif not repo.revs('%ld - ::%d', base, dest):
948 elif not repo.revs('%ld - ::%d', base, dest):
949 if basef:
949 if basef:
950 ui.status(_('nothing to rebase - "base" %s is '
950 ui.status(_('nothing to rebase - "base" %s is '
951 'already an ancestor of destination '
951 'already an ancestor of destination '
952 '%s\n') %
952 '%s\n') %
953 ('+'.join(str(repo[r]) for r in base),
953 ('+'.join(str(repo[r]) for r in base),
954 dest))
954 dest))
955 else:
955 else:
956 ui.status(_('nothing to rebase - working '
956 ui.status(_('nothing to rebase - working '
957 'directory parent is already an '
957 'directory parent is already an '
958 'ancestor of destination %s\n') % dest)
958 'ancestor of destination %s\n') % dest)
959 else: # can it happen?
959 else: # can it happen?
960 ui.status(_('nothing to rebase from %s to %s\n') %
960 ui.status(_('nothing to rebase from %s to %s\n') %
961 ('+'.join(str(repo[r]) for r in base), dest))
961 ('+'.join(str(repo[r]) for r in base), dest))
962 return None
962 return None
963 # If rebasing the working copy parent, force in-memory merge to be off.
963 # If rebasing the working copy parent, force in-memory merge to be off.
964 #
964 #
965 # This is because the extra work of checking out the newly rebased commit
965 # This is because the extra work of checking out the newly rebased commit
966 # outweights the benefits of rebasing in-memory, and executing an extra
966 # outweights the benefits of rebasing in-memory, and executing an extra
967 # update command adds a bit of overhead, so better to just do it on disk. In
967 # update command adds a bit of overhead, so better to just do it on disk. In
968 # all other cases leave it on.
968 # all other cases leave it on.
969 #
969 #
970 # Note that there are cases where this isn't true -- e.g., rebasing large
970 # Note that there are cases where this isn't true -- e.g., rebasing large
971 # stacks that include the WCP. However, I'm not yet sure where the cutoff
971 # stacks that include the WCP. However, I'm not yet sure where the cutoff
972 # is.
972 # is.
973 rebasingwcp = repo['.'].rev() in rebaseset
973 rebasingwcp = repo['.'].rev() in rebaseset
974 ui.log("rebase", "", rebase_rebasing_wcp=rebasingwcp)
974 ui.log("rebase", "", rebase_rebasing_wcp=rebasingwcp)
975 if rbsrt.inmemory and rebasingwcp:
975 if rbsrt.inmemory and rebasingwcp:
976 rbsrt.inmemory = False
976 rbsrt.inmemory = False
977 # Check these since we did not before.
977 # Check these since we did not before.
978 cmdutil.checkunfinished(repo)
978 cmdutil.checkunfinished(repo)
979 cmdutil.bailifchanged(repo)
979 cmdutil.bailifchanged(repo)
980
980
981 if not destf:
981 if not destf:
982 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
982 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
983 destf = str(dest)
983 destf = str(dest)
984
984
985 allsrc = revsetlang.formatspec('%ld', rebaseset)
985 allsrc = revsetlang.formatspec('%ld', rebaseset)
986 alias = {'ALLSRC': allsrc}
986 alias = {'ALLSRC': allsrc}
987
987
988 if dest is None:
988 if dest is None:
989 try:
989 try:
990 # fast path: try to resolve dest without SRC alias
990 # fast path: try to resolve dest without SRC alias
991 dest = scmutil.revsingle(repo, destf, localalias=alias)
991 dest = scmutil.revsingle(repo, destf, localalias=alias)
992 except error.RepoLookupError:
992 except error.RepoLookupError:
993 # multi-dest path: resolve dest for each SRC separately
993 # multi-dest path: resolve dest for each SRC separately
994 destmap = {}
994 destmap = {}
995 for r in rebaseset:
995 for r in rebaseset:
996 alias['SRC'] = revsetlang.formatspec('%d', r)
996 alias['SRC'] = revsetlang.formatspec('%d', r)
997 # use repo.anyrevs instead of scmutil.revsingle because we
997 # use repo.anyrevs instead of scmutil.revsingle because we
998 # don't want to abort if destset is empty.
998 # don't want to abort if destset is empty.
999 destset = repo.anyrevs([destf], user=True, localalias=alias)
999 destset = repo.anyrevs([destf], user=True, localalias=alias)
1000 size = len(destset)
1000 size = len(destset)
1001 if size == 1:
1001 if size == 1:
1002 destmap[r] = destset.first()
1002 destmap[r] = destset.first()
1003 elif size == 0:
1003 elif size == 0:
1004 ui.note(_('skipping %s - empty destination\n') % repo[r])
1004 ui.note(_('skipping %s - empty destination\n') % repo[r])
1005 else:
1005 else:
1006 raise error.Abort(_('rebase destination for %s is not '
1006 raise error.Abort(_('rebase destination for %s is not '
1007 'unique') % repo[r])
1007 'unique') % repo[r])
1008
1008
1009 if dest is not None:
1009 if dest is not None:
1010 # single-dest case: assign dest to each rev in rebaseset
1010 # single-dest case: assign dest to each rev in rebaseset
1011 destrev = dest.rev()
1011 destrev = dest.rev()
1012 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1012 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1013
1013
1014 if not destmap:
1014 if not destmap:
1015 ui.status(_('nothing to rebase - empty destination\n'))
1015 ui.status(_('nothing to rebase - empty destination\n'))
1016 return None
1016 return None
1017
1017
1018 return destmap
1018 return destmap
1019
1019
1020 def externalparent(repo, state, destancestors):
1020 def externalparent(repo, state, destancestors):
1021 """Return the revision that should be used as the second parent
1021 """Return the revision that should be used as the second parent
1022 when the revisions in state is collapsed on top of destancestors.
1022 when the revisions in state is collapsed on top of destancestors.
1023 Abort if there is more than one parent.
1023 Abort if there is more than one parent.
1024 """
1024 """
1025 parents = set()
1025 parents = set()
1026 source = min(state)
1026 source = min(state)
1027 for rev in state:
1027 for rev in state:
1028 if rev == source:
1028 if rev == source:
1029 continue
1029 continue
1030 for p in repo[rev].parents():
1030 for p in repo[rev].parents():
1031 if (p.rev() not in state
1031 if (p.rev() not in state
1032 and p.rev() not in destancestors):
1032 and p.rev() not in destancestors):
1033 parents.add(p.rev())
1033 parents.add(p.rev())
1034 if not parents:
1034 if not parents:
1035 return nullrev
1035 return nullrev
1036 if len(parents) == 1:
1036 if len(parents) == 1:
1037 return parents.pop()
1037 return parents.pop()
1038 raise error.Abort(_('unable to collapse on top of %s, there is more '
1038 raise error.Abort(_('unable to collapse on top of %s, there is more '
1039 'than one external parent: %s') %
1039 'than one external parent: %s') %
1040 (max(destancestors),
1040 (max(destancestors),
1041 ', '.join(str(p) for p in sorted(parents))))
1041 ', '.join(str(p) for p in sorted(parents))))
1042
1042
1043 def concludememorynode(repo, rev, p1, p2, wctx=None,
1043 def concludememorynode(repo, rev, p1, p2, wctx=None,
1044 commitmsg=None, editor=None, extrafn=None,
1044 commitmsg=None, editor=None, extrafn=None,
1045 keepbranches=False, date=None):
1045 keepbranches=False, date=None):
1046 '''Commit the memory changes with parents p1 and p2. Reuse commit info from
1046 '''Commit the memory changes with parents p1 and p2. Reuse commit info from
1047 rev but also store useful information in extra.
1047 rev but also store useful information in extra.
1048 Return node of committed revision.'''
1048 Return node of committed revision.'''
1049 ctx = repo[rev]
1049 ctx = repo[rev]
1050 if commitmsg is None:
1050 if commitmsg is None:
1051 commitmsg = ctx.description()
1051 commitmsg = ctx.description()
1052 keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
1052 keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
1053 extra = {'rebase_source': ctx.hex()}
1053 extra = {'rebase_source': ctx.hex()}
1054 if extrafn:
1054 if extrafn:
1055 extrafn(ctx, extra)
1055 extrafn(ctx, extra)
1056
1056
1057 destphase = max(ctx.phase(), phases.draft)
1057 destphase = max(ctx.phase(), phases.draft)
1058 overrides = {('phases', 'new-commit'): destphase}
1058 overrides = {('phases', 'new-commit'): destphase}
1059 with repo.ui.configoverride(overrides, 'rebase'):
1059 with repo.ui.configoverride(overrides, 'rebase'):
1060 if keepbranch:
1060 if keepbranch:
1061 repo.ui.setconfig('ui', 'allowemptycommit', True)
1061 repo.ui.setconfig('ui', 'allowemptycommit', True)
1062 # Replicates the empty check in ``repo.commit``.
1062 # Replicates the empty check in ``repo.commit``.
1063 if wctx.isempty() and not repo.ui.configbool('ui', 'allowemptycommit'):
1063 if wctx.isempty() and not repo.ui.configbool('ui', 'allowemptycommit'):
1064 return None
1064 return None
1065
1065
1066 if date is None:
1066 if date is None:
1067 date = ctx.date()
1067 date = ctx.date()
1068
1068
1069 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1069 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1070 # ``branch`` (used when passing ``--keepbranches``).
1070 # ``branch`` (used when passing ``--keepbranches``).
1071 branch = repo[p1].branch()
1071 branch = repo[p1].branch()
1072 if 'branch' in extra:
1072 if 'branch' in extra:
1073 branch = extra['branch']
1073 branch = extra['branch']
1074
1074
1075 memctx = wctx.tomemctx(commitmsg, parents=(p1, p2), date=date,
1075 memctx = wctx.tomemctx(commitmsg, parents=(p1, p2), date=date,
1076 extra=extra, user=ctx.user(), branch=branch, editor=editor)
1076 extra=extra, user=ctx.user(), branch=branch, editor=editor)
1077 commitres = repo.commitctx(memctx)
1077 commitres = repo.commitctx(memctx)
1078 wctx.clean() # Might be reused
1078 wctx.clean() # Might be reused
1079 return commitres
1079 return commitres
1080
1080
1081 def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None,
1081 def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None,
1082 keepbranches=False, date=None):
1082 keepbranches=False, date=None):
1083 '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev
1083 '''Commit the wd changes with parents p1 and p2. Reuse commit info from rev
1084 but also store useful information in extra.
1084 but also store useful information in extra.
1085 Return node of committed revision.'''
1085 Return node of committed revision.'''
1086 dsguard = util.nullcontextmanager()
1086 dsguard = util.nullcontextmanager()
1087 if not repo.ui.configbool('rebase', 'singletransaction'):
1087 if not repo.ui.configbool('rebase', 'singletransaction'):
1088 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
1088 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
1089 with dsguard:
1089 with dsguard:
1090 repo.setparents(repo[p1].node(), repo[p2].node())
1090 repo.setparents(repo[p1].node(), repo[p2].node())
1091 ctx = repo[rev]
1091 ctx = repo[rev]
1092 if commitmsg is None:
1092 if commitmsg is None:
1093 commitmsg = ctx.description()
1093 commitmsg = ctx.description()
1094 keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
1094 keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
1095 extra = {'rebase_source': ctx.hex()}
1095 extra = {'rebase_source': ctx.hex()}
1096 if extrafn:
1096 if extrafn:
1097 extrafn(ctx, extra)
1097 extrafn(ctx, extra)
1098
1098
1099 destphase = max(ctx.phase(), phases.draft)
1099 destphase = max(ctx.phase(), phases.draft)
1100 overrides = {('phases', 'new-commit'): destphase}
1100 overrides = {('phases', 'new-commit'): destphase}
1101 with repo.ui.configoverride(overrides, 'rebase'):
1101 with repo.ui.configoverride(overrides, 'rebase'):
1102 if keepbranch:
1102 if keepbranch:
1103 repo.ui.setconfig('ui', 'allowemptycommit', True)
1103 repo.ui.setconfig('ui', 'allowemptycommit', True)
1104 # Commit might fail if unresolved files exist
1104 # Commit might fail if unresolved files exist
1105 if date is None:
1105 if date is None:
1106 date = ctx.date()
1106 date = ctx.date()
1107 newnode = repo.commit(text=commitmsg, user=ctx.user(),
1107 newnode = repo.commit(text=commitmsg, user=ctx.user(),
1108 date=date, extra=extra, editor=editor)
1108 date=date, extra=extra, editor=editor)
1109
1109
1110 repo.dirstate.setbranch(repo[newnode].branch())
1110 repo.dirstate.setbranch(repo[newnode].branch())
1111 return newnode
1111 return newnode
1112
1112
1113 def rebasenode(repo, rev, p1, base, state, collapse, dest, wctx):
1113 def rebasenode(repo, rev, p1, base, state, collapse, dest, wctx):
1114 'Rebase a single revision rev on top of p1 using base as merge ancestor'
1114 'Rebase a single revision rev on top of p1 using base as merge ancestor'
1115 # Merge phase
1115 # Merge phase
1116 # Update to destination and merge it with local
1116 # Update to destination and merge it with local
1117 if wctx.isinmemory():
1117 if wctx.isinmemory():
1118 wctx.setbase(repo[p1])
1118 wctx.setbase(repo[p1])
1119 else:
1119 else:
1120 if repo['.'].rev() != p1:
1120 if repo['.'].rev() != p1:
1121 repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
1121 repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
1122 mergemod.update(repo, p1, False, True)
1122 mergemod.update(repo, p1, False, True)
1123 else:
1123 else:
1124 repo.ui.debug(" already in destination\n")
1124 repo.ui.debug(" already in destination\n")
1125 # This is, alas, necessary to invalidate workingctx's manifest cache,
1125 # This is, alas, necessary to invalidate workingctx's manifest cache,
1126 # as well as other data we litter on it in other places.
1126 # as well as other data we litter on it in other places.
1127 wctx = repo[None]
1127 wctx = repo[None]
1128 repo.dirstate.write(repo.currenttransaction())
1128 repo.dirstate.write(repo.currenttransaction())
1129 repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
1129 repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
1130 if base is not None:
1130 if base is not None:
1131 repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
1131 repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
1132 # When collapsing in-place, the parent is the common ancestor, we
1132 # When collapsing in-place, the parent is the common ancestor, we
1133 # have to allow merging with it.
1133 # have to allow merging with it.
1134 stats = mergemod.update(repo, rev, True, True, base, collapse,
1134 stats = mergemod.update(repo, rev, True, True, base, collapse,
1135 labels=['dest', 'source'], wc=wctx)
1135 labels=['dest', 'source'], wc=wctx)
1136 if collapse:
1136 if collapse:
1137 copies.duplicatecopies(repo, wctx, rev, dest)
1137 copies.duplicatecopies(repo, wctx, rev, dest)
1138 else:
1138 else:
1139 # If we're not using --collapse, we need to
1139 # If we're not using --collapse, we need to
1140 # duplicate copies between the revision we're
1140 # duplicate copies between the revision we're
1141 # rebasing and its first parent, but *not*
1141 # rebasing and its first parent, but *not*
1142 # duplicate any copies that have already been
1142 # duplicate any copies that have already been
1143 # performed in the destination.
1143 # performed in the destination.
1144 p1rev = repo[rev].p1().rev()
1144 p1rev = repo[rev].p1().rev()
1145 copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest)
1145 copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest)
1146 return stats
1146 return stats
1147
1147
1148 def adjustdest(repo, rev, destmap, state, skipped):
1148 def adjustdest(repo, rev, destmap, state, skipped):
1149 """adjust rebase destination given the current rebase state
1149 """adjust rebase destination given the current rebase state
1150
1150
1151 rev is what is being rebased. Return a list of two revs, which are the
1151 rev is what is being rebased. Return a list of two revs, which are the
1152 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1152 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1153 nullrev, return dest without adjustment for it.
1153 nullrev, return dest without adjustment for it.
1154
1154
1155 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1155 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1156 to B1, and E's destination will be adjusted from F to B1.
1156 to B1, and E's destination will be adjusted from F to B1.
1157
1157
1158 B1 <- written during rebasing B
1158 B1 <- written during rebasing B
1159 |
1159 |
1160 F <- original destination of B, E
1160 F <- original destination of B, E
1161 |
1161 |
1162 | E <- rev, which is being rebased
1162 | E <- rev, which is being rebased
1163 | |
1163 | |
1164 | D <- prev, one parent of rev being checked
1164 | D <- prev, one parent of rev being checked
1165 | |
1165 | |
1166 | x <- skipped, ex. no successor or successor in (::dest)
1166 | x <- skipped, ex. no successor or successor in (::dest)
1167 | |
1167 | |
1168 | C <- rebased as C', different destination
1168 | C <- rebased as C', different destination
1169 | |
1169 | |
1170 | B <- rebased as B1 C'
1170 | B <- rebased as B1 C'
1171 |/ |
1171 |/ |
1172 A G <- destination of C, different
1172 A G <- destination of C, different
1173
1173
1174 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1174 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1175 first move C to C1, G to G1, and when it's checking H, the adjusted
1175 first move C to C1, G to G1, and when it's checking H, the adjusted
1176 destinations will be [C1, G1].
1176 destinations will be [C1, G1].
1177
1177
1178 H C1 G1
1178 H C1 G1
1179 /| | /
1179 /| | /
1180 F G |/
1180 F G |/
1181 K | | -> K
1181 K | | -> K
1182 | C D |
1182 | C D |
1183 | |/ |
1183 | |/ |
1184 | B | ...
1184 | B | ...
1185 |/ |/
1185 |/ |/
1186 A A
1186 A A
1187
1187
1188 Besides, adjust dest according to existing rebase information. For example,
1188 Besides, adjust dest according to existing rebase information. For example,
1189
1189
1190 B C D B needs to be rebased on top of C, C needs to be rebased on top
1190 B C D B needs to be rebased on top of C, C needs to be rebased on top
1191 \|/ of D. We will rebase C first.
1191 \|/ of D. We will rebase C first.
1192 A
1192 A
1193
1193
1194 C' After rebasing C, when considering B's destination, use C'
1194 C' After rebasing C, when considering B's destination, use C'
1195 | instead of the original C.
1195 | instead of the original C.
1196 B D
1196 B D
1197 \ /
1197 \ /
1198 A
1198 A
1199 """
1199 """
1200 # pick already rebased revs with same dest from state as interesting source
1200 # pick already rebased revs with same dest from state as interesting source
1201 dest = destmap[rev]
1201 dest = destmap[rev]
1202 source = [s for s, d in state.items()
1202 source = [s for s, d in state.items()
1203 if d > 0 and destmap[s] == dest and s not in skipped]
1203 if d > 0 and destmap[s] == dest and s not in skipped]
1204
1204
1205 result = []
1205 result = []
1206 for prev in repo.changelog.parentrevs(rev):
1206 for prev in repo.changelog.parentrevs(rev):
1207 adjusted = dest
1207 adjusted = dest
1208 if prev != nullrev:
1208 if prev != nullrev:
1209 candidate = repo.revs('max(%ld and (::%d))', source, prev).first()
1209 candidate = repo.revs('max(%ld and (::%d))', source, prev).first()
1210 if candidate is not None:
1210 if candidate is not None:
1211 adjusted = state[candidate]
1211 adjusted = state[candidate]
1212 if adjusted == dest and dest in state:
1212 if adjusted == dest and dest in state:
1213 adjusted = state[dest]
1213 adjusted = state[dest]
1214 if adjusted == revtodo:
1214 if adjusted == revtodo:
1215 # sortsource should produce an order that makes this impossible
1215 # sortsource should produce an order that makes this impossible
1216 raise error.ProgrammingError(
1216 raise error.ProgrammingError(
1217 'rev %d should be rebased already at this time' % dest)
1217 'rev %d should be rebased already at this time' % dest)
1218 result.append(adjusted)
1218 result.append(adjusted)
1219 return result
1219 return result
1220
1220
1221 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1221 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1222 """
1222 """
1223 Abort if rebase will create divergence or rebase is noop because of markers
1223 Abort if rebase will create divergence or rebase is noop because of markers
1224
1224
1225 `rebaseobsrevs`: set of obsolete revision in source
1225 `rebaseobsrevs`: set of obsolete revision in source
1226 `rebaseobsskipped`: set of revisions from source skipped because they have
1226 `rebaseobsskipped`: set of revisions from source skipped because they have
1227 successors in destination or no non-obsolete successor.
1227 successors in destination or no non-obsolete successor.
1228 """
1228 """
1229 # Obsolete node with successors not in dest leads to divergence
1229 # Obsolete node with successors not in dest leads to divergence
1230 divergenceok = ui.configbool('experimental',
1230 divergenceok = ui.configbool('experimental',
1231 'evolution.allowdivergence')
1231 'evolution.allowdivergence')
1232 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1232 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1233
1233
1234 if divergencebasecandidates and not divergenceok:
1234 if divergencebasecandidates and not divergenceok:
1235 divhashes = (str(repo[r])
1235 divhashes = (str(repo[r])
1236 for r in divergencebasecandidates)
1236 for r in divergencebasecandidates)
1237 msg = _("this rebase will cause "
1237 msg = _("this rebase will cause "
1238 "divergences from: %s")
1238 "divergences from: %s")
1239 h = _("to force the rebase please set "
1239 h = _("to force the rebase please set "
1240 "experimental.evolution.allowdivergence=True")
1240 "experimental.evolution.allowdivergence=True")
1241 raise error.Abort(msg % (",".join(divhashes),), hint=h)
1241 raise error.Abort(msg % (",".join(divhashes),), hint=h)
1242
1242
1243 def successorrevs(unfi, rev):
1243 def successorrevs(unfi, rev):
1244 """yield revision numbers for successors of rev"""
1244 """yield revision numbers for successors of rev"""
1245 assert unfi.filtername is None
1245 assert unfi.filtername is None
1246 nodemap = unfi.changelog.nodemap
1246 nodemap = unfi.changelog.nodemap
1247 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1247 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1248 if s in nodemap:
1248 if s in nodemap:
1249 yield nodemap[s]
1249 yield nodemap[s]
1250
1250
1251 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1251 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1252 """Return new parents and optionally a merge base for rev being rebased
1252 """Return new parents and optionally a merge base for rev being rebased
1253
1253
1254 The destination specified by "dest" cannot always be used directly because
1254 The destination specified by "dest" cannot always be used directly because
1255 previously rebase result could affect destination. For example,
1255 previously rebase result could affect destination. For example,
1256
1256
1257 D E rebase -r C+D+E -d B
1257 D E rebase -r C+D+E -d B
1258 |/ C will be rebased to C'
1258 |/ C will be rebased to C'
1259 B C D's new destination will be C' instead of B
1259 B C D's new destination will be C' instead of B
1260 |/ E's new destination will be C' instead of B
1260 |/ E's new destination will be C' instead of B
1261 A
1261 A
1262
1262
1263 The new parents of a merge is slightly more complicated. See the comment
1263 The new parents of a merge is slightly more complicated. See the comment
1264 block below.
1264 block below.
1265 """
1265 """
1266 # use unfiltered changelog since successorrevs may return filtered nodes
1266 # use unfiltered changelog since successorrevs may return filtered nodes
1267 assert repo.filtername is None
1267 assert repo.filtername is None
1268 cl = repo.changelog
1268 cl = repo.changelog
1269 def isancestor(a, b):
1269 def isancestor(a, b):
1270 # take revision numbers instead of nodes
1270 # take revision numbers instead of nodes
1271 if a == b:
1271 if a == b:
1272 return True
1272 return True
1273 elif a > b:
1273 elif a > b:
1274 return False
1274 return False
1275 return cl.isancestor(cl.node(a), cl.node(b))
1275 return cl.isancestor(cl.node(a), cl.node(b))
1276
1276
1277 dest = destmap[rev]
1277 dest = destmap[rev]
1278 oldps = repo.changelog.parentrevs(rev) # old parents
1278 oldps = repo.changelog.parentrevs(rev) # old parents
1279 newps = [nullrev, nullrev] # new parents
1279 newps = [nullrev, nullrev] # new parents
1280 dests = adjustdest(repo, rev, destmap, state, skipped)
1280 dests = adjustdest(repo, rev, destmap, state, skipped)
1281 bases = list(oldps) # merge base candidates, initially just old parents
1281 bases = list(oldps) # merge base candidates, initially just old parents
1282
1282
1283 if all(r == nullrev for r in oldps[1:]):
1283 if all(r == nullrev for r in oldps[1:]):
1284 # For non-merge changeset, just move p to adjusted dest as requested.
1284 # For non-merge changeset, just move p to adjusted dest as requested.
1285 newps[0] = dests[0]
1285 newps[0] = dests[0]
1286 else:
1286 else:
1287 # For merge changeset, if we move p to dests[i] unconditionally, both
1287 # For merge changeset, if we move p to dests[i] unconditionally, both
1288 # parents may change and the end result looks like "the merge loses a
1288 # parents may change and the end result looks like "the merge loses a
1289 # parent", which is a surprise. This is a limit because "--dest" only
1289 # parent", which is a surprise. This is a limit because "--dest" only
1290 # accepts one dest per src.
1290 # accepts one dest per src.
1291 #
1291 #
1292 # Therefore, only move p with reasonable conditions (in this order):
1292 # Therefore, only move p with reasonable conditions (in this order):
1293 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1293 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1294 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1294 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1295 #
1295 #
1296 # Comparing with adjustdest, the logic here does some additional work:
1296 # Comparing with adjustdest, the logic here does some additional work:
1297 # 1. decide which parents will not be moved towards dest
1297 # 1. decide which parents will not be moved towards dest
1298 # 2. if the above decision is "no", should a parent still be moved
1298 # 2. if the above decision is "no", should a parent still be moved
1299 # because it was rebased?
1299 # because it was rebased?
1300 #
1300 #
1301 # For example:
1301 # For example:
1302 #
1302 #
1303 # C # "rebase -r C -d D" is an error since none of the parents
1303 # C # "rebase -r C -d D" is an error since none of the parents
1304 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1304 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1305 # A B D # B (using rule "2."), since B will be rebased.
1305 # A B D # B (using rule "2."), since B will be rebased.
1306 #
1306 #
1307 # The loop tries to be not rely on the fact that a Mercurial node has
1307 # The loop tries to be not rely on the fact that a Mercurial node has
1308 # at most 2 parents.
1308 # at most 2 parents.
1309 for i, p in enumerate(oldps):
1309 for i, p in enumerate(oldps):
1310 np = p # new parent
1310 np = p # new parent
1311 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1311 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1312 np = dests[i]
1312 np = dests[i]
1313 elif p in state and state[p] > 0:
1313 elif p in state and state[p] > 0:
1314 np = state[p]
1314 np = state[p]
1315
1315
1316 # "bases" only record "special" merge bases that cannot be
1316 # "bases" only record "special" merge bases that cannot be
1317 # calculated from changelog DAG (i.e. isancestor(p, np) is False).
1317 # calculated from changelog DAG (i.e. isancestor(p, np) is False).
1318 # For example:
1318 # For example:
1319 #
1319 #
1320 # B' # rebase -s B -d D, when B was rebased to B'. dest for C
1320 # B' # rebase -s B -d D, when B was rebased to B'. dest for C
1321 # | C # is B', but merge base for C is B, instead of
1321 # | C # is B', but merge base for C is B, instead of
1322 # D | # changelog.ancestor(C, B') == A. If changelog DAG and
1322 # D | # changelog.ancestor(C, B') == A. If changelog DAG and
1323 # | B # "state" edges are merged (so there will be an edge from
1323 # | B # "state" edges are merged (so there will be an edge from
1324 # |/ # B to B'), the merge base is still ancestor(C, B') in
1324 # |/ # B to B'), the merge base is still ancestor(C, B') in
1325 # A # the merged graph.
1325 # A # the merged graph.
1326 #
1326 #
1327 # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8
1327 # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8
1328 # which uses "virtual null merge" to explain this situation.
1328 # which uses "virtual null merge" to explain this situation.
1329 if isancestor(p, np):
1329 if isancestor(p, np):
1330 bases[i] = nullrev
1330 bases[i] = nullrev
1331
1331
1332 # If one parent becomes an ancestor of the other, drop the ancestor
1332 # If one parent becomes an ancestor of the other, drop the ancestor
1333 for j, x in enumerate(newps[:i]):
1333 for j, x in enumerate(newps[:i]):
1334 if x == nullrev:
1334 if x == nullrev:
1335 continue
1335 continue
1336 if isancestor(np, x): # CASE-1
1336 if isancestor(np, x): # CASE-1
1337 np = nullrev
1337 np = nullrev
1338 elif isancestor(x, np): # CASE-2
1338 elif isancestor(x, np): # CASE-2
1339 newps[j] = np
1339 newps[j] = np
1340 np = nullrev
1340 np = nullrev
1341 # New parents forming an ancestor relationship does not
1341 # New parents forming an ancestor relationship does not
1342 # mean the old parents have a similar relationship. Do not
1342 # mean the old parents have a similar relationship. Do not
1343 # set bases[x] to nullrev.
1343 # set bases[x] to nullrev.
1344 bases[j], bases[i] = bases[i], bases[j]
1344 bases[j], bases[i] = bases[i], bases[j]
1345
1345
1346 newps[i] = np
1346 newps[i] = np
1347
1347
1348 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1348 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1349 # base. If only p2 changes, merging using unchanged p1 as merge base is
1349 # base. If only p2 changes, merging using unchanged p1 as merge base is
1350 # suboptimal. Therefore swap parents to make the merge sane.
1350 # suboptimal. Therefore swap parents to make the merge sane.
1351 if newps[1] != nullrev and oldps[0] == newps[0]:
1351 if newps[1] != nullrev and oldps[0] == newps[0]:
1352 assert len(newps) == 2 and len(oldps) == 2
1352 assert len(newps) == 2 and len(oldps) == 2
1353 newps.reverse()
1353 newps.reverse()
1354 bases.reverse()
1354 bases.reverse()
1355
1355
1356 # No parent change might be an error because we fail to make rev a
1356 # No parent change might be an error because we fail to make rev a
1357 # descendent of requested dest. This can happen, for example:
1357 # descendent of requested dest. This can happen, for example:
1358 #
1358 #
1359 # C # rebase -r C -d D
1359 # C # rebase -r C -d D
1360 # /| # None of A and B will be changed to D and rebase fails.
1360 # /| # None of A and B will be changed to D and rebase fails.
1361 # A B D
1361 # A B D
1362 if set(newps) == set(oldps) and dest not in newps:
1362 if set(newps) == set(oldps) and dest not in newps:
1363 raise error.Abort(_('cannot rebase %d:%s without '
1363 raise error.Abort(_('cannot rebase %d:%s without '
1364 'moving at least one of its parents')
1364 'moving at least one of its parents')
1365 % (rev, repo[rev]))
1365 % (rev, repo[rev]))
1366
1366
1367 # Source should not be ancestor of dest. The check here guarantees it's
1367 # Source should not be ancestor of dest. The check here guarantees it's
1368 # impossible. With multi-dest, the initial check does not cover complex
1368 # impossible. With multi-dest, the initial check does not cover complex
1369 # cases since we don't have abstractions to dry-run rebase cheaply.
1369 # cases since we don't have abstractions to dry-run rebase cheaply.
1370 if any(p != nullrev and isancestor(rev, p) for p in newps):
1370 if any(p != nullrev and isancestor(rev, p) for p in newps):
1371 raise error.Abort(_('source is ancestor of destination'))
1371 raise error.Abort(_('source is ancestor of destination'))
1372
1372
1373 # "rebasenode" updates to new p1, use the corresponding merge base.
1373 # "rebasenode" updates to new p1, use the corresponding merge base.
1374 if bases[0] != nullrev:
1374 if bases[0] != nullrev:
1375 base = bases[0]
1375 base = bases[0]
1376 else:
1376 else:
1377 base = None
1377 base = None
1378
1378
1379 # Check if the merge will contain unwanted changes. That may happen if
1379 # Check if the merge will contain unwanted changes. That may happen if
1380 # there are multiple special (non-changelog ancestor) merge bases, which
1380 # there are multiple special (non-changelog ancestor) merge bases, which
1381 # cannot be handled well by the 3-way merge algorithm. For example:
1381 # cannot be handled well by the 3-way merge algorithm. For example:
1382 #
1382 #
1383 # F
1383 # F
1384 # /|
1384 # /|
1385 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1385 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1386 # | | # as merge base, the difference between D and F will include
1386 # | | # as merge base, the difference between D and F will include
1387 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1387 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1388 # |/ # chosen, the rebased F will contain B.
1388 # |/ # chosen, the rebased F will contain B.
1389 # A Z
1389 # A Z
1390 #
1390 #
1391 # But our merge base candidates (D and E in above case) could still be
1391 # But our merge base candidates (D and E in above case) could still be
1392 # better than the default (ancestor(F, Z) == null). Therefore still
1392 # better than the default (ancestor(F, Z) == null). Therefore still
1393 # pick one (so choose p1 above).
1393 # pick one (so choose p1 above).
1394 if sum(1 for b in bases if b != nullrev) > 1:
1394 if sum(1 for b in bases if b != nullrev) > 1:
1395 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1395 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1396 for i, base in enumerate(bases):
1396 for i, base in enumerate(bases):
1397 if base == nullrev:
1397 if base == nullrev:
1398 continue
1398 continue
1399 # Revisions in the side (not chosen as merge base) branch that
1399 # Revisions in the side (not chosen as merge base) branch that
1400 # might contain "surprising" contents
1400 # might contain "surprising" contents
1401 siderevs = list(repo.revs('((%ld-%d) %% (%d+%d))',
1401 siderevs = list(repo.revs('((%ld-%d) %% (%d+%d))',
1402 bases, base, base, dest))
1402 bases, base, base, dest))
1403
1403
1404 # If those revisions are covered by rebaseset, the result is good.
1404 # If those revisions are covered by rebaseset, the result is good.
1405 # A merge in rebaseset would be considered to cover its ancestors.
1405 # A merge in rebaseset would be considered to cover its ancestors.
1406 if siderevs:
1406 if siderevs:
1407 rebaseset = [r for r, d in state.items()
1407 rebaseset = [r for r, d in state.items()
1408 if d > 0 and r not in obsskipped]
1408 if d > 0 and r not in obsskipped]
1409 merges = [r for r in rebaseset
1409 merges = [r for r in rebaseset
1410 if cl.parentrevs(r)[1] != nullrev]
1410 if cl.parentrevs(r)[1] != nullrev]
1411 unwanted[i] = list(repo.revs('%ld - (::%ld) - %ld',
1411 unwanted[i] = list(repo.revs('%ld - (::%ld) - %ld',
1412 siderevs, merges, rebaseset))
1412 siderevs, merges, rebaseset))
1413
1413
1414 # Choose a merge base that has a minimal number of unwanted revs.
1414 # Choose a merge base that has a minimal number of unwanted revs.
1415 l, i = min((len(revs), i)
1415 l, i = min((len(revs), i)
1416 for i, revs in enumerate(unwanted) if revs is not None)
1416 for i, revs in enumerate(unwanted) if revs is not None)
1417 base = bases[i]
1417 base = bases[i]
1418
1418
1419 # newps[0] should match merge base if possible. Currently, if newps[i]
1419 # newps[0] should match merge base if possible. Currently, if newps[i]
1420 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1420 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1421 # the other's ancestor. In that case, it's fine to not swap newps here.
1421 # the other's ancestor. In that case, it's fine to not swap newps here.
1422 # (see CASE-1 and CASE-2 above)
1422 # (see CASE-1 and CASE-2 above)
1423 if i != 0 and newps[i] != nullrev:
1423 if i != 0 and newps[i] != nullrev:
1424 newps[0], newps[i] = newps[i], newps[0]
1424 newps[0], newps[i] = newps[i], newps[0]
1425
1425
1426 # The merge will include unwanted revisions. Abort now. Revisit this if
1426 # The merge will include unwanted revisions. Abort now. Revisit this if
1427 # we have a more advanced merge algorithm that handles multiple bases.
1427 # we have a more advanced merge algorithm that handles multiple bases.
1428 if l > 0:
1428 if l > 0:
1429 unwanteddesc = _(' or ').join(
1429 unwanteddesc = _(' or ').join(
1430 (', '.join('%d:%s' % (r, repo[r]) for r in revs)
1430 (', '.join('%d:%s' % (r, repo[r]) for r in revs)
1431 for revs in unwanted if revs is not None))
1431 for revs in unwanted if revs is not None))
1432 raise error.Abort(
1432 raise error.Abort(
1433 _('rebasing %d:%s will include unwanted changes from %s')
1433 _('rebasing %d:%s will include unwanted changes from %s')
1434 % (rev, repo[rev], unwanteddesc))
1434 % (rev, repo[rev], unwanteddesc))
1435
1435
1436 repo.ui.debug(" future parents are %d and %d\n" % tuple(newps))
1436 repo.ui.debug(" future parents are %d and %d\n" % tuple(newps))
1437
1437
1438 return newps[0], newps[1], base
1438 return newps[0], newps[1], base
1439
1439
1440 def isagitpatch(repo, patchname):
1440 def isagitpatch(repo, patchname):
1441 'Return true if the given patch is in git format'
1441 'Return true if the given patch is in git format'
1442 mqpatch = os.path.join(repo.mq.path, patchname)
1442 mqpatch = os.path.join(repo.mq.path, patchname)
1443 for line in patch.linereader(file(mqpatch, 'rb')):
1443 for line in patch.linereader(file(mqpatch, 'rb')):
1444 if line.startswith('diff --git'):
1444 if line.startswith('diff --git'):
1445 return True
1445 return True
1446 return False
1446 return False
1447
1447
1448 def updatemq(repo, state, skipped, **opts):
1448 def updatemq(repo, state, skipped, **opts):
1449 'Update rebased mq patches - finalize and then import them'
1449 'Update rebased mq patches - finalize and then import them'
1450 mqrebase = {}
1450 mqrebase = {}
1451 mq = repo.mq
1451 mq = repo.mq
1452 original_series = mq.fullseries[:]
1452 original_series = mq.fullseries[:]
1453 skippedpatches = set()
1453 skippedpatches = set()
1454
1454
1455 for p in mq.applied:
1455 for p in mq.applied:
1456 rev = repo[p.node].rev()
1456 rev = repo[p.node].rev()
1457 if rev in state:
1457 if rev in state:
1458 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
1458 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
1459 (rev, p.name))
1459 (rev, p.name))
1460 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1460 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1461 else:
1461 else:
1462 # Applied but not rebased, not sure this should happen
1462 # Applied but not rebased, not sure this should happen
1463 skippedpatches.add(p.name)
1463 skippedpatches.add(p.name)
1464
1464
1465 if mqrebase:
1465 if mqrebase:
1466 mq.finish(repo, mqrebase.keys())
1466 mq.finish(repo, mqrebase.keys())
1467
1467
1468 # We must start import from the newest revision
1468 # We must start import from the newest revision
1469 for rev in sorted(mqrebase, reverse=True):
1469 for rev in sorted(mqrebase, reverse=True):
1470 if rev not in skipped:
1470 if rev not in skipped:
1471 name, isgit = mqrebase[rev]
1471 name, isgit = mqrebase[rev]
1472 repo.ui.note(_('updating mq patch %s to %s:%s\n') %
1472 repo.ui.note(_('updating mq patch %s to %s:%s\n') %
1473 (name, state[rev], repo[state[rev]]))
1473 (name, state[rev], repo[state[rev]]))
1474 mq.qimport(repo, (), patchname=name, git=isgit,
1474 mq.qimport(repo, (), patchname=name, git=isgit,
1475 rev=[str(state[rev])])
1475 rev=[str(state[rev])])
1476 else:
1476 else:
1477 # Rebased and skipped
1477 # Rebased and skipped
1478 skippedpatches.add(mqrebase[rev][0])
1478 skippedpatches.add(mqrebase[rev][0])
1479
1479
1480 # Patches were either applied and rebased and imported in
1480 # Patches were either applied and rebased and imported in
1481 # order, applied and removed or unapplied. Discard the removed
1481 # order, applied and removed or unapplied. Discard the removed
1482 # ones while preserving the original series order and guards.
1482 # ones while preserving the original series order and guards.
1483 newseries = [s for s in original_series
1483 newseries = [s for s in original_series
1484 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
1484 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
1485 mq.fullseries[:] = newseries
1485 mq.fullseries[:] = newseries
1486 mq.seriesdirty = True
1486 mq.seriesdirty = True
1487 mq.savedirty()
1487 mq.savedirty()
1488
1488
1489 def storecollapsemsg(repo, collapsemsg):
1489 def storecollapsemsg(repo, collapsemsg):
1490 'Store the collapse message to allow recovery'
1490 'Store the collapse message to allow recovery'
1491 collapsemsg = collapsemsg or ''
1491 collapsemsg = collapsemsg or ''
1492 f = repo.vfs("last-message.txt", "w")
1492 f = repo.vfs("last-message.txt", "w")
1493 f.write("%s\n" % collapsemsg)
1493 f.write("%s\n" % collapsemsg)
1494 f.close()
1494 f.close()
1495
1495
1496 def clearcollapsemsg(repo):
1496 def clearcollapsemsg(repo):
1497 'Remove collapse message file'
1497 'Remove collapse message file'
1498 repo.vfs.unlinkpath("last-message.txt", ignoremissing=True)
1498 repo.vfs.unlinkpath("last-message.txt", ignoremissing=True)
1499
1499
1500 def restorecollapsemsg(repo, isabort):
1500 def restorecollapsemsg(repo, isabort):
1501 'Restore previously stored collapse message'
1501 'Restore previously stored collapse message'
1502 try:
1502 try:
1503 f = repo.vfs("last-message.txt")
1503 f = repo.vfs("last-message.txt")
1504 collapsemsg = f.readline().strip()
1504 collapsemsg = f.readline().strip()
1505 f.close()
1505 f.close()
1506 except IOError as err:
1506 except IOError as err:
1507 if err.errno != errno.ENOENT:
1507 if err.errno != errno.ENOENT:
1508 raise
1508 raise
1509 if isabort:
1509 if isabort:
1510 # Oh well, just abort like normal
1510 # Oh well, just abort like normal
1511 collapsemsg = ''
1511 collapsemsg = ''
1512 else:
1512 else:
1513 raise error.Abort(_('missing .hg/last-message.txt for rebase'))
1513 raise error.Abort(_('missing .hg/last-message.txt for rebase'))
1514 return collapsemsg
1514 return collapsemsg
1515
1515
1516 def clearstatus(repo):
1516 def clearstatus(repo):
1517 'Remove the status files'
1517 'Remove the status files'
1518 # Make sure the active transaction won't write the state file
1518 # Make sure the active transaction won't write the state file
1519 tr = repo.currenttransaction()
1519 tr = repo.currenttransaction()
1520 if tr:
1520 if tr:
1521 tr.removefilegenerator('rebasestate')
1521 tr.removefilegenerator('rebasestate')
1522 repo.vfs.unlinkpath("rebasestate", ignoremissing=True)
1522 repo.vfs.unlinkpath("rebasestate", ignoremissing=True)
1523
1523
1524 def needupdate(repo, state):
1524 def needupdate(repo, state):
1525 '''check whether we should `update --clean` away from a merge, or if
1525 '''check whether we should `update --clean` away from a merge, or if
1526 somehow the working dir got forcibly updated, e.g. by older hg'''
1526 somehow the working dir got forcibly updated, e.g. by older hg'''
1527 parents = [p.rev() for p in repo[None].parents()]
1527 parents = [p.rev() for p in repo[None].parents()]
1528
1528
1529 # Are we in a merge state at all?
1529 # Are we in a merge state at all?
1530 if len(parents) < 2:
1530 if len(parents) < 2:
1531 return False
1531 return False
1532
1532
1533 # We should be standing on the first as-of-yet unrebased commit.
1533 # We should be standing on the first as-of-yet unrebased commit.
1534 firstunrebased = min([old for old, new in state.iteritems()
1534 firstunrebased = min([old for old, new in state.iteritems()
1535 if new == nullrev])
1535 if new == nullrev])
1536 if firstunrebased in parents:
1536 if firstunrebased in parents:
1537 return True
1537 return True
1538
1538
1539 return False
1539 return False
1540
1540
1541 def abort(repo, originalwd, destmap, state, activebookmark=None):
1541 def abort(repo, originalwd, destmap, state, activebookmark=None):
1542 '''Restore the repository to its original state. Additional args:
1542 '''Restore the repository to its original state. Additional args:
1543
1543
1544 activebookmark: the name of the bookmark that should be active after the
1544 activebookmark: the name of the bookmark that should be active after the
1545 restore'''
1545 restore'''
1546
1546
1547 try:
1547 try:
1548 # If the first commits in the rebased set get skipped during the rebase,
1548 # If the first commits in the rebased set get skipped during the rebase,
1549 # their values within the state mapping will be the dest rev id. The
1549 # their values within the state mapping will be the dest rev id. The
1550 # dstates list must must not contain the dest rev (issue4896)
1550 # dstates list must must not contain the dest rev (issue4896)
1551 dstates = [s for r, s in state.items() if s >= 0 and s != destmap[r]]
1551 dstates = [s for r, s in state.items() if s >= 0 and s != destmap[r]]
1552 immutable = [d for d in dstates if not repo[d].mutable()]
1552 immutable = [d for d in dstates if not repo[d].mutable()]
1553 cleanup = True
1553 cleanup = True
1554 if immutable:
1554 if immutable:
1555 repo.ui.warn(_("warning: can't clean up public changesets %s\n")
1555 repo.ui.warn(_("warning: can't clean up public changesets %s\n")
1556 % ', '.join(str(repo[r]) for r in immutable),
1556 % ', '.join(str(repo[r]) for r in immutable),
1557 hint=_("see 'hg help phases' for details"))
1557 hint=_("see 'hg help phases' for details"))
1558 cleanup = False
1558 cleanup = False
1559
1559
1560 descendants = set()
1560 descendants = set()
1561 if dstates:
1561 if dstates:
1562 descendants = set(repo.changelog.descendants(dstates))
1562 descendants = set(repo.changelog.descendants(dstates))
1563 if descendants - set(dstates):
1563 if descendants - set(dstates):
1564 repo.ui.warn(_("warning: new changesets detected on destination "
1564 repo.ui.warn(_("warning: new changesets detected on destination "
1565 "branch, can't strip\n"))
1565 "branch, can't strip\n"))
1566 cleanup = False
1566 cleanup = False
1567
1567
1568 if cleanup:
1568 if cleanup:
1569 shouldupdate = False
1569 shouldupdate = False
1570 rebased = [s for r, s in state.items()
1570 rebased = [s for r, s in state.items()
1571 if s >= 0 and s != destmap[r]]
1571 if s >= 0 and s != destmap[r]]
1572 if rebased:
1572 if rebased:
1573 strippoints = [
1573 strippoints = [
1574 c.node() for c in repo.set('roots(%ld)', rebased)]
1574 c.node() for c in repo.set('roots(%ld)', rebased)]
1575
1575
1576 updateifonnodes = set(rebased)
1576 updateifonnodes = set(rebased)
1577 updateifonnodes.update(destmap.values())
1577 updateifonnodes.update(destmap.values())
1578 updateifonnodes.add(originalwd)
1578 updateifonnodes.add(originalwd)
1579 shouldupdate = repo['.'].rev() in updateifonnodes
1579 shouldupdate = repo['.'].rev() in updateifonnodes
1580
1580
1581 # Update away from the rebase if necessary
1581 # Update away from the rebase if necessary
1582 if shouldupdate or needupdate(repo, state):
1582 if shouldupdate or needupdate(repo, state):
1583 mergemod.update(repo, originalwd, False, True)
1583 mergemod.update(repo, originalwd, False, True)
1584
1584
1585 # Strip from the first rebased revision
1585 # Strip from the first rebased revision
1586 if rebased:
1586 if rebased:
1587 # no backup of rebased cset versions needed
1587 # no backup of rebased cset versions needed
1588 repair.strip(repo.ui, repo, strippoints)
1588 repair.strip(repo.ui, repo, strippoints)
1589
1589
1590 if activebookmark and activebookmark in repo._bookmarks:
1590 if activebookmark and activebookmark in repo._bookmarks:
1591 bookmarks.activate(repo, activebookmark)
1591 bookmarks.activate(repo, activebookmark)
1592
1592
1593 finally:
1593 finally:
1594 clearstatus(repo)
1594 clearstatus(repo)
1595 clearcollapsemsg(repo)
1595 clearcollapsemsg(repo)
1596 repo.ui.warn(_('rebase aborted\n'))
1596 repo.ui.warn(_('rebase aborted\n'))
1597 return 0
1597 return 0
1598
1598
1599 def sortsource(destmap):
1599 def sortsource(destmap):
1600 """yield source revisions in an order that we only rebase things once
1600 """yield source revisions in an order that we only rebase things once
1601
1601
1602 If source and destination overlaps, we should filter out revisions
1602 If source and destination overlaps, we should filter out revisions
1603 depending on other revisions which hasn't been rebased yet.
1603 depending on other revisions which hasn't been rebased yet.
1604
1604
1605 Yield a sorted list of revisions each time.
1605 Yield a sorted list of revisions each time.
1606
1606
1607 For example, when rebasing A to B, B to C. This function yields [B], then
1607 For example, when rebasing A to B, B to C. This function yields [B], then
1608 [A], indicating B needs to be rebased first.
1608 [A], indicating B needs to be rebased first.
1609
1609
1610 Raise if there is a cycle so the rebase is impossible.
1610 Raise if there is a cycle so the rebase is impossible.
1611 """
1611 """
1612 srcset = set(destmap)
1612 srcset = set(destmap)
1613 while srcset:
1613 while srcset:
1614 srclist = sorted(srcset)
1614 srclist = sorted(srcset)
1615 result = []
1615 result = []
1616 for r in srclist:
1616 for r in srclist:
1617 if destmap[r] not in srcset:
1617 if destmap[r] not in srcset:
1618 result.append(r)
1618 result.append(r)
1619 if not result:
1619 if not result:
1620 raise error.Abort(_('source and destination form a cycle'))
1620 raise error.Abort(_('source and destination form a cycle'))
1621 srcset -= set(result)
1621 srcset -= set(result)
1622 yield result
1622 yield result
1623
1623
1624 def buildstate(repo, destmap, collapse):
1624 def buildstate(repo, destmap, collapse):
1625 '''Define which revisions are going to be rebased and where
1625 '''Define which revisions are going to be rebased and where
1626
1626
1627 repo: repo
1627 repo: repo
1628 destmap: {srcrev: destrev}
1628 destmap: {srcrev: destrev}
1629 '''
1629 '''
1630 rebaseset = destmap.keys()
1630 rebaseset = destmap.keys()
1631 originalwd = repo['.'].rev()
1631 originalwd = repo['.'].rev()
1632
1632
1633 # This check isn't strictly necessary, since mq detects commits over an
1633 # This check isn't strictly necessary, since mq detects commits over an
1634 # applied patch. But it prevents messing up the working directory when
1634 # applied patch. But it prevents messing up the working directory when
1635 # a partially completed rebase is blocked by mq.
1635 # a partially completed rebase is blocked by mq.
1636 if 'qtip' in repo.tags():
1636 if 'qtip' in repo.tags():
1637 mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
1637 mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
1638 if set(destmap.values()) & mqapplied:
1638 if set(destmap.values()) & mqapplied:
1639 raise error.Abort(_('cannot rebase onto an applied mq patch'))
1639 raise error.Abort(_('cannot rebase onto an applied mq patch'))
1640
1640
1641 # Get "cycle" error early by exhausting the generator.
1641 # Get "cycle" error early by exhausting the generator.
1642 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1642 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1643 if not sortedsrc:
1643 if not sortedsrc:
1644 raise error.Abort(_('no matching revisions'))
1644 raise error.Abort(_('no matching revisions'))
1645
1645
1646 # Only check the first batch of revisions to rebase not depending on other
1646 # Only check the first batch of revisions to rebase not depending on other
1647 # rebaseset. This means "source is ancestor of destination" for the second
1647 # rebaseset. This means "source is ancestor of destination" for the second
1648 # (and following) batches of revisions are not checked here. We rely on
1648 # (and following) batches of revisions are not checked here. We rely on
1649 # "defineparents" to do that check.
1649 # "defineparents" to do that check.
1650 roots = list(repo.set('roots(%ld)', sortedsrc[0]))
1650 roots = list(repo.set('roots(%ld)', sortedsrc[0]))
1651 if not roots:
1651 if not roots:
1652 raise error.Abort(_('no matching revisions'))
1652 raise error.Abort(_('no matching revisions'))
1653 roots.sort()
1653 roots.sort()
1654 state = dict.fromkeys(rebaseset, revtodo)
1654 state = dict.fromkeys(rebaseset, revtodo)
1655 emptyrebase = (len(sortedsrc) == 1)
1655 emptyrebase = (len(sortedsrc) == 1)
1656 for root in roots:
1656 for root in roots:
1657 dest = repo[destmap[root.rev()]]
1657 dest = repo[destmap[root.rev()]]
1658 commonbase = root.ancestor(dest)
1658 commonbase = root.ancestor(dest)
1659 if commonbase == root:
1659 if commonbase == root:
1660 raise error.Abort(_('source is ancestor of destination'))
1660 raise error.Abort(_('source is ancestor of destination'))
1661 if commonbase == dest:
1661 if commonbase == dest:
1662 wctx = repo[None]
1662 wctx = repo[None]
1663 if dest == wctx.p1():
1663 if dest == wctx.p1():
1664 # when rebasing to '.', it will use the current wd branch name
1664 # when rebasing to '.', it will use the current wd branch name
1665 samebranch = root.branch() == wctx.branch()
1665 samebranch = root.branch() == wctx.branch()
1666 else:
1666 else:
1667 samebranch = root.branch() == dest.branch()
1667 samebranch = root.branch() == dest.branch()
1668 if not collapse and samebranch and dest in root.parents():
1668 if not collapse and samebranch and dest in root.parents():
1669 # mark the revision as done by setting its new revision
1669 # mark the revision as done by setting its new revision
1670 # equal to its old (current) revisions
1670 # equal to its old (current) revisions
1671 state[root.rev()] = root.rev()
1671 state[root.rev()] = root.rev()
1672 repo.ui.debug('source is a child of destination\n')
1672 repo.ui.debug('source is a child of destination\n')
1673 continue
1673 continue
1674
1674
1675 emptyrebase = False
1675 emptyrebase = False
1676 repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root))
1676 repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root))
1677 if emptyrebase:
1677 if emptyrebase:
1678 return None
1678 return None
1679 for rev in sorted(state):
1679 for rev in sorted(state):
1680 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1680 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1681 # if all parents of this revision are done, then so is this revision
1681 # if all parents of this revision are done, then so is this revision
1682 if parents and all((state.get(p) == p for p in parents)):
1682 if parents and all((state.get(p) == p for p in parents)):
1683 state[rev] = rev
1683 state[rev] = rev
1684 return originalwd, destmap, state
1684 return originalwd, destmap, state
1685
1685
1686 def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None,
1686 def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None,
1687 keepf=False, fm=None):
1687 keepf=False, fm=None):
1688 """dispose of rebased revision at the end of the rebase
1688 """dispose of rebased revision at the end of the rebase
1689
1689
1690 If `collapsedas` is not None, the rebase was a collapse whose result if the
1690 If `collapsedas` is not None, the rebase was a collapse whose result if the
1691 `collapsedas` node.
1691 `collapsedas` node.
1692
1692
1693 If `keepf` is not True, the rebase has --keep set and no nodes should be
1693 If `keepf` is not True, the rebase has --keep set and no nodes should be
1694 removed (but bookmarks still need to be moved).
1694 removed (but bookmarks still need to be moved).
1695 """
1695 """
1696 tonode = repo.changelog.node
1696 tonode = repo.changelog.node
1697 replacements = {}
1697 replacements = {}
1698 moves = {}
1698 moves = {}
1699 for rev, newrev in sorted(state.items()):
1699 for rev, newrev in sorted(state.items()):
1700 if newrev >= 0 and newrev != rev:
1700 if newrev >= 0 and newrev != rev:
1701 oldnode = tonode(rev)
1701 oldnode = tonode(rev)
1702 newnode = collapsedas or tonode(newrev)
1702 newnode = collapsedas or tonode(newrev)
1703 moves[oldnode] = newnode
1703 moves[oldnode] = newnode
1704 if not keepf:
1704 if not keepf:
1705 if rev in skipped:
1705 if rev in skipped:
1706 succs = ()
1706 succs = ()
1707 else:
1707 else:
1708 succs = (newnode,)
1708 succs = (newnode,)
1709 replacements[oldnode] = succs
1709 replacements[oldnode] = succs
1710 scmutil.cleanupnodes(repo, replacements, 'rebase', moves)
1710 scmutil.cleanupnodes(repo, replacements, 'rebase', moves)
1711 if fm:
1711 if fm:
1712 hf = fm.hexfunc
1712 hf = fm.hexfunc
1713 fl = fm.formatlist
1713 fl = fm.formatlist
1714 fd = fm.formatdict
1714 fd = fm.formatdict
1715 nodechanges = fd({hf(oldn): fl([hf(n) for n in newn], name='node')
1715 nodechanges = fd({hf(oldn): fl([hf(n) for n in newn], name='node')
1716 for oldn, newn in replacements.iteritems()},
1716 for oldn, newn in replacements.iteritems()},
1717 key="oldnode", value="newnodes")
1717 key="oldnode", value="newnodes")
1718 fm.data(nodechanges=nodechanges)
1718 fm.data(nodechanges=nodechanges)
1719
1719
1720 def pullrebase(orig, ui, repo, *args, **opts):
1720 def pullrebase(orig, ui, repo, *args, **opts):
1721 'Call rebase after pull if the latter has been invoked with --rebase'
1721 'Call rebase after pull if the latter has been invoked with --rebase'
1722 ret = None
1722 ret = None
1723 if opts.get(r'rebase'):
1723 if opts.get(r'rebase'):
1724 if ui.configbool('commands', 'rebase.requiredest'):
1724 if ui.configbool('commands', 'rebase.requiredest'):
1725 msg = _('rebase destination required by configuration')
1725 msg = _('rebase destination required by configuration')
1726 hint = _('use hg pull followed by hg rebase -d DEST')
1726 hint = _('use hg pull followed by hg rebase -d DEST')
1727 raise error.Abort(msg, hint=hint)
1727 raise error.Abort(msg, hint=hint)
1728
1728
1729 with repo.wlock(), repo.lock():
1729 with repo.wlock(), repo.lock():
1730 if opts.get(r'update'):
1730 if opts.get(r'update'):
1731 del opts[r'update']
1731 del opts[r'update']
1732 ui.debug('--update and --rebase are not compatible, ignoring '
1732 ui.debug('--update and --rebase are not compatible, ignoring '
1733 'the update flag\n')
1733 'the update flag\n')
1734
1734
1735 cmdutil.checkunfinished(repo)
1735 cmdutil.checkunfinished(repo)
1736 cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
1736 cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
1737 'please commit or shelve your changes first'))
1737 'please commit or shelve your changes first'))
1738
1738
1739 revsprepull = len(repo)
1739 revsprepull = len(repo)
1740 origpostincoming = commands.postincoming
1740 origpostincoming = commands.postincoming
1741 def _dummy(*args, **kwargs):
1741 def _dummy(*args, **kwargs):
1742 pass
1742 pass
1743 commands.postincoming = _dummy
1743 commands.postincoming = _dummy
1744 try:
1744 try:
1745 ret = orig(ui, repo, *args, **opts)
1745 ret = orig(ui, repo, *args, **opts)
1746 finally:
1746 finally:
1747 commands.postincoming = origpostincoming
1747 commands.postincoming = origpostincoming
1748 revspostpull = len(repo)
1748 revspostpull = len(repo)
1749 if revspostpull > revsprepull:
1749 if revspostpull > revsprepull:
1750 # --rev option from pull conflict with rebase own --rev
1750 # --rev option from pull conflict with rebase own --rev
1751 # dropping it
1751 # dropping it
1752 if r'rev' in opts:
1752 if r'rev' in opts:
1753 del opts[r'rev']
1753 del opts[r'rev']
1754 # positional argument from pull conflicts with rebase's own
1754 # positional argument from pull conflicts with rebase's own
1755 # --source.
1755 # --source.
1756 if r'source' in opts:
1756 if r'source' in opts:
1757 del opts[r'source']
1757 del opts[r'source']
1758 # revsprepull is the len of the repo, not revnum of tip.
1758 # revsprepull is the len of the repo, not revnum of tip.
1759 destspace = list(repo.changelog.revs(start=revsprepull))
1759 destspace = list(repo.changelog.revs(start=revsprepull))
1760 opts[r'_destspace'] = destspace
1760 opts[r'_destspace'] = destspace
1761 try:
1761 try:
1762 rebase(ui, repo, **opts)
1762 rebase(ui, repo, **opts)
1763 except error.NoMergeDestAbort:
1763 except error.NoMergeDestAbort:
1764 # we can maybe update instead
1764 # we can maybe update instead
1765 rev, _a, _b = destutil.destupdate(repo)
1765 rev, _a, _b = destutil.destupdate(repo)
1766 if rev == repo['.'].rev():
1766 if rev == repo['.'].rev():
1767 ui.status(_('nothing to rebase\n'))
1767 ui.status(_('nothing to rebase\n'))
1768 else:
1768 else:
1769 ui.status(_('nothing to rebase - updating instead\n'))
1769 ui.status(_('nothing to rebase - updating instead\n'))
1770 # not passing argument to get the bare update behavior
1770 # not passing argument to get the bare update behavior
1771 # with warning and trumpets
1771 # with warning and trumpets
1772 commands.update(ui, repo)
1772 commands.update(ui, repo)
1773 else:
1773 else:
1774 if opts.get(r'tool'):
1774 if opts.get(r'tool'):
1775 raise error.Abort(_('--tool can only be used with --rebase'))
1775 raise error.Abort(_('--tool can only be used with --rebase'))
1776 ret = orig(ui, repo, *args, **opts)
1776 ret = orig(ui, repo, *args, **opts)
1777
1777
1778 return ret
1778 return ret
1779
1779
1780 def _filterobsoleterevs(repo, revs):
1780 def _filterobsoleterevs(repo, revs):
1781 """returns a set of the obsolete revisions in revs"""
1781 """returns a set of the obsolete revisions in revs"""
1782 return set(r for r in revs if repo[r].obsolete())
1782 return set(r for r in revs if repo[r].obsolete())
1783
1783
1784 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
1784 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
1785 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
1785 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
1786
1786
1787 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
1787 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
1788 obsolete nodes to be rebased given in `rebaseobsrevs`.
1788 obsolete nodes to be rebased given in `rebaseobsrevs`.
1789
1789
1790 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
1790 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
1791 without a successor in destination.
1791 without a successor in destination.
1792
1792
1793 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
1793 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
1794 obsolete successors.
1794 obsolete successors.
1795 """
1795 """
1796 obsoletenotrebased = {}
1796 obsoletenotrebased = {}
1797 obsoletewithoutsuccessorindestination = set([])
1797 obsoletewithoutsuccessorindestination = set([])
1798 obsoleteextinctsuccessors = set([])
1798 obsoleteextinctsuccessors = set([])
1799
1799
1800 assert repo.filtername is None
1800 assert repo.filtername is None
1801 cl = repo.changelog
1801 cl = repo.changelog
1802 nodemap = cl.nodemap
1802 nodemap = cl.nodemap
1803 extinctnodes = set(cl.node(r) for r in repo.revs('extinct()'))
1803 extinctnodes = set(cl.node(r) for r in repo.revs('extinct()'))
1804 for srcrev in rebaseobsrevs:
1804 for srcrev in rebaseobsrevs:
1805 srcnode = cl.node(srcrev)
1805 srcnode = cl.node(srcrev)
1806 destnode = cl.node(destmap[srcrev])
1806 destnode = cl.node(destmap[srcrev])
1807 # XXX: more advanced APIs are required to handle split correctly
1807 # XXX: more advanced APIs are required to handle split correctly
1808 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
1808 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
1809 # obsutil.allsuccessors includes node itself
1809 # obsutil.allsuccessors includes node itself
1810 successors.remove(srcnode)
1810 successors.remove(srcnode)
1811 if successors.issubset(extinctnodes):
1811 if successors.issubset(extinctnodes):
1812 # all successors are extinct
1812 # all successors are extinct
1813 obsoleteextinctsuccessors.add(srcrev)
1813 obsoleteextinctsuccessors.add(srcrev)
1814 if not successors:
1814 if not successors:
1815 # no successor
1815 # no successor
1816 obsoletenotrebased[srcrev] = None
1816 obsoletenotrebased[srcrev] = None
1817 else:
1817 else:
1818 for succnode in successors:
1818 for succnode in successors:
1819 if succnode not in nodemap:
1819 if succnode not in nodemap:
1820 continue
1820 continue
1821 if cl.isancestor(succnode, destnode):
1821 if cl.isancestor(succnode, destnode):
1822 obsoletenotrebased[srcrev] = nodemap[succnode]
1822 obsoletenotrebased[srcrev] = nodemap[succnode]
1823 break
1823 break
1824 else:
1824 else:
1825 # If 'srcrev' has a successor in rebase set but none in
1825 # If 'srcrev' has a successor in rebase set but none in
1826 # destination (which would be catched above), we shall skip it
1826 # destination (which would be catched above), we shall skip it
1827 # and its descendants to avoid divergence.
1827 # and its descendants to avoid divergence.
1828 if any(nodemap[s] in destmap for s in successors):
1828 if any(nodemap[s] in destmap for s in successors):
1829 obsoletewithoutsuccessorindestination.add(srcrev)
1829 obsoletewithoutsuccessorindestination.add(srcrev)
1830
1830
1831 return (
1831 return (
1832 obsoletenotrebased,
1832 obsoletenotrebased,
1833 obsoletewithoutsuccessorindestination,
1833 obsoletewithoutsuccessorindestination,
1834 obsoleteextinctsuccessors,
1834 obsoleteextinctsuccessors,
1835 )
1835 )
1836
1836
1837 def summaryhook(ui, repo):
1837 def summaryhook(ui, repo):
1838 if not repo.vfs.exists('rebasestate'):
1838 if not repo.vfs.exists('rebasestate'):
1839 return
1839 return
1840 try:
1840 try:
1841 rbsrt = rebaseruntime(repo, ui, {})
1841 rbsrt = rebaseruntime(repo, ui, {})
1842 rbsrt.restorestatus()
1842 rbsrt.restorestatus()
1843 state = rbsrt.state
1843 state = rbsrt.state
1844 except error.RepoLookupError:
1844 except error.RepoLookupError:
1845 # i18n: column positioning for "hg summary"
1845 # i18n: column positioning for "hg summary"
1846 msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
1846 msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
1847 ui.write(msg)
1847 ui.write(msg)
1848 return
1848 return
1849 numrebased = len([i for i in state.itervalues() if i >= 0])
1849 numrebased = len([i for i in state.itervalues() if i >= 0])
1850 # i18n: column positioning for "hg summary"
1850 # i18n: column positioning for "hg summary"
1851 ui.write(_('rebase: %s, %s (rebase --continue)\n') %
1851 ui.write(_('rebase: %s, %s (rebase --continue)\n') %
1852 (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
1852 (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
1853 ui.label(_('%d remaining'), 'rebase.remaining') %
1853 ui.label(_('%d remaining'), 'rebase.remaining') %
1854 (len(state) - numrebased)))
1854 (len(state) - numrebased)))
1855
1855
1856 def uisetup(ui):
1856 def uisetup(ui):
1857 #Replace pull with a decorator to provide --rebase option
1857 #Replace pull with a decorator to provide --rebase option
1858 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
1858 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
1859 entry[1].append(('', 'rebase', None,
1859 entry[1].append(('', 'rebase', None,
1860 _("rebase working directory to branch head")))
1860 _("rebase working directory to branch head")))
1861 entry[1].append(('t', 'tool', '',
1861 entry[1].append(('t', 'tool', '',
1862 _("specify merge tool for rebase")))
1862 _("specify merge tool for rebase")))
1863 cmdutil.summaryhooks.add('rebase', summaryhook)
1863 cmdutil.summaryhooks.add('rebase', summaryhook)
1864 cmdutil.unfinishedstates.append(
1864 cmdutil.unfinishedstates.append(
1865 ['rebasestate', False, False, _('rebase in progress'),
1865 ['rebasestate', False, False, _('rebase in progress'),
1866 _("use 'hg rebase --continue' or 'hg rebase --abort'")])
1866 _("use 'hg rebase --continue' or 'hg rebase --abort'")])
1867 cmdutil.afterresolvedstates.append(
1867 cmdutil.afterresolvedstates.append(
1868 ['rebasestate', _('hg rebase --continue')])
1868 ['rebasestate', _('hg rebase --continue')])
@@ -1,1058 +1,1058
1 # shelve.py - save/restore working directory state
1 # shelve.py - save/restore working directory state
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """save and restore changes to the working directory
8 """save and restore changes to the working directory
9
9
10 The "hg shelve" command saves changes made to the working directory
10 The "hg shelve" command saves changes made to the working directory
11 and reverts those changes, resetting the working directory to a clean
11 and reverts those changes, resetting the working directory to a clean
12 state.
12 state.
13
13
14 Later on, the "hg unshelve" command restores the changes saved by "hg
14 Later on, the "hg unshelve" command restores the changes saved by "hg
15 shelve". Changes can be restored even after updating to a different
15 shelve". Changes can be restored even after updating to a different
16 parent, in which case Mercurial's merge machinery will resolve any
16 parent, in which case Mercurial's merge machinery will resolve any
17 conflicts if necessary.
17 conflicts if necessary.
18
18
19 You can have more than one shelved change outstanding at a time; each
19 You can have more than one shelved change outstanding at a time; each
20 shelved change has a distinct name. For details, see the help for "hg
20 shelved change has a distinct name. For details, see the help for "hg
21 shelve".
21 shelve".
22 """
22 """
23 from __future__ import absolute_import
23 from __future__ import absolute_import
24
24
25 import collections
25 import collections
26 import errno
26 import errno
27 import itertools
27 import itertools
28
28
29 from mercurial.i18n import _
29 from mercurial.i18n import _
30 from mercurial import (
30 from mercurial import (
31 bookmarks,
31 bookmarks,
32 bundle2,
32 bundle2,
33 bundlerepo,
33 bundlerepo,
34 changegroup,
34 changegroup,
35 cmdutil,
35 cmdutil,
36 discovery,
36 discovery,
37 error,
37 error,
38 exchange,
38 exchange,
39 hg,
39 hg,
40 lock as lockmod,
40 lock as lockmod,
41 mdiff,
41 mdiff,
42 merge,
42 merge,
43 node as nodemod,
43 node as nodemod,
44 patch,
44 patch,
45 phases,
45 phases,
46 pycompat,
46 pycompat,
47 registrar,
47 registrar,
48 repair,
48 repair,
49 scmutil,
49 scmutil,
50 templatefilters,
50 templatefilters,
51 util,
51 util,
52 vfs as vfsmod,
52 vfs as vfsmod,
53 )
53 )
54
54
55 from . import (
55 from . import (
56 rebase,
56 rebase,
57 )
57 )
58
58
59 cmdtable = {}
59 cmdtable = {}
60 command = registrar.command(cmdtable)
60 command = registrar.command(cmdtable)
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
63 # be specifying the version(s) of Mercurial they are tested with, or
63 # be specifying the version(s) of Mercurial they are tested with, or
64 # leave the attribute unspecified.
64 # leave the attribute unspecified.
65 testedwith = 'ships-with-hg-core'
65 testedwith = 'ships-with-hg-core'
66
66
67 configtable = {}
67 configtable = {}
68 configitem = registrar.configitem(configtable)
68 configitem = registrar.configitem(configtable)
69
69
70 configitem('shelve', 'maxbackups',
70 configitem('shelve', 'maxbackups',
71 default=10,
71 default=10,
72 )
72 )
73
73
74 backupdir = 'shelve-backup'
74 backupdir = 'shelve-backup'
75 shelvedir = 'shelved'
75 shelvedir = 'shelved'
76 shelvefileextensions = ['hg', 'patch', 'oshelve']
76 shelvefileextensions = ['hg', 'patch', 'oshelve']
77 # universal extension is present in all types of shelves
77 # universal extension is present in all types of shelves
78 patchextension = 'patch'
78 patchextension = 'patch'
79
79
80 # we never need the user, so we use a
80 # we never need the user, so we use a
81 # generic user for all shelve operations
81 # generic user for all shelve operations
82 shelveuser = 'shelve@localhost'
82 shelveuser = 'shelve@localhost'
83
83
84 class shelvedfile(object):
84 class shelvedfile(object):
85 """Helper for the file storing a single shelve
85 """Helper for the file storing a single shelve
86
86
87 Handles common functions on shelve files (.hg/.patch) using
87 Handles common functions on shelve files (.hg/.patch) using
88 the vfs layer"""
88 the vfs layer"""
89 def __init__(self, repo, name, filetype=None):
89 def __init__(self, repo, name, filetype=None):
90 self.repo = repo
90 self.repo = repo
91 self.name = name
91 self.name = name
92 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
92 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
93 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
93 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
94 self.ui = self.repo.ui
94 self.ui = self.repo.ui
95 if filetype:
95 if filetype:
96 self.fname = name + '.' + filetype
96 self.fname = name + '.' + filetype
97 else:
97 else:
98 self.fname = name
98 self.fname = name
99
99
100 def exists(self):
100 def exists(self):
101 return self.vfs.exists(self.fname)
101 return self.vfs.exists(self.fname)
102
102
103 def filename(self):
103 def filename(self):
104 return self.vfs.join(self.fname)
104 return self.vfs.join(self.fname)
105
105
106 def backupfilename(self):
106 def backupfilename(self):
107 def gennames(base):
107 def gennames(base):
108 yield base
108 yield base
109 base, ext = base.rsplit('.', 1)
109 base, ext = base.rsplit('.', 1)
110 for i in itertools.count(1):
110 for i in itertools.count(1):
111 yield '%s-%d.%s' % (base, i, ext)
111 yield '%s-%d.%s' % (base, i, ext)
112
112
113 name = self.backupvfs.join(self.fname)
113 name = self.backupvfs.join(self.fname)
114 for n in gennames(name):
114 for n in gennames(name):
115 if not self.backupvfs.exists(n):
115 if not self.backupvfs.exists(n):
116 return n
116 return n
117
117
118 def movetobackup(self):
118 def movetobackup(self):
119 if not self.backupvfs.isdir():
119 if not self.backupvfs.isdir():
120 self.backupvfs.makedir()
120 self.backupvfs.makedir()
121 util.rename(self.filename(), self.backupfilename())
121 util.rename(self.filename(), self.backupfilename())
122
122
123 def stat(self):
123 def stat(self):
124 return self.vfs.stat(self.fname)
124 return self.vfs.stat(self.fname)
125
125
126 def opener(self, mode='rb'):
126 def opener(self, mode='rb'):
127 try:
127 try:
128 return self.vfs(self.fname, mode)
128 return self.vfs(self.fname, mode)
129 except IOError as err:
129 except IOError as err:
130 if err.errno != errno.ENOENT:
130 if err.errno != errno.ENOENT:
131 raise
131 raise
132 raise error.Abort(_("shelved change '%s' not found") % self.name)
132 raise error.Abort(_("shelved change '%s' not found") % self.name)
133
133
134 def applybundle(self):
134 def applybundle(self):
135 fp = self.opener()
135 fp = self.opener()
136 try:
136 try:
137 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
137 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
138 bundle2.applybundle(self.repo, gen, self.repo.currenttransaction(),
138 bundle2.applybundle(self.repo, gen, self.repo.currenttransaction(),
139 source='unshelve',
139 source='unshelve',
140 url='bundle:' + self.vfs.join(self.fname),
140 url='bundle:' + self.vfs.join(self.fname),
141 targetphase=phases.secret)
141 targetphase=phases.secret)
142 finally:
142 finally:
143 fp.close()
143 fp.close()
144
144
145 def bundlerepo(self):
145 def bundlerepo(self):
146 return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
146 return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
147 self.vfs.join(self.fname))
147 self.vfs.join(self.fname))
148 def writebundle(self, bases, node):
148 def writebundle(self, bases, node):
149 cgversion = changegroup.safeversion(self.repo)
149 cgversion = changegroup.safeversion(self.repo)
150 if cgversion == '01':
150 if cgversion == '01':
151 btype = 'HG10BZ'
151 btype = 'HG10BZ'
152 compression = None
152 compression = None
153 else:
153 else:
154 btype = 'HG20'
154 btype = 'HG20'
155 compression = 'BZ'
155 compression = 'BZ'
156
156
157 outgoing = discovery.outgoing(self.repo, missingroots=bases,
157 outgoing = discovery.outgoing(self.repo, missingroots=bases,
158 missingheads=[node])
158 missingheads=[node])
159 cg = changegroup.makechangegroup(self.repo, outgoing, cgversion,
159 cg = changegroup.makechangegroup(self.repo, outgoing, cgversion,
160 'shelve')
160 'shelve')
161
161
162 bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
162 bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
163 compression=compression)
163 compression=compression)
164
164
165 def writeobsshelveinfo(self, info):
165 def writeobsshelveinfo(self, info):
166 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
166 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
167
167
168 def readobsshelveinfo(self):
168 def readobsshelveinfo(self):
169 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
169 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
170
170
171 class shelvedstate(object):
171 class shelvedstate(object):
172 """Handle persistence during unshelving operations.
172 """Handle persistence during unshelving operations.
173
173
174 Handles saving and restoring a shelved state. Ensures that different
174 Handles saving and restoring a shelved state. Ensures that different
175 versions of a shelved state are possible and handles them appropriately.
175 versions of a shelved state are possible and handles them appropriately.
176 """
176 """
177 _version = 2
177 _version = 2
178 _filename = 'shelvedstate'
178 _filename = 'shelvedstate'
179 _keep = 'keep'
179 _keep = 'keep'
180 _nokeep = 'nokeep'
180 _nokeep = 'nokeep'
181 # colon is essential to differentiate from a real bookmark name
181 # colon is essential to differentiate from a real bookmark name
182 _noactivebook = ':no-active-bookmark'
182 _noactivebook = ':no-active-bookmark'
183
183
184 @classmethod
184 @classmethod
185 def _verifyandtransform(cls, d):
185 def _verifyandtransform(cls, d):
186 """Some basic shelvestate syntactic verification and transformation"""
186 """Some basic shelvestate syntactic verification and transformation"""
187 try:
187 try:
188 d['originalwctx'] = nodemod.bin(d['originalwctx'])
188 d['originalwctx'] = nodemod.bin(d['originalwctx'])
189 d['pendingctx'] = nodemod.bin(d['pendingctx'])
189 d['pendingctx'] = nodemod.bin(d['pendingctx'])
190 d['parents'] = [nodemod.bin(h)
190 d['parents'] = [nodemod.bin(h)
191 for h in d['parents'].split(' ')]
191 for h in d['parents'].split(' ')]
192 d['nodestoremove'] = [nodemod.bin(h)
192 d['nodestoremove'] = [nodemod.bin(h)
193 for h in d['nodestoremove'].split(' ')]
193 for h in d['nodestoremove'].split(' ')]
194 except (ValueError, TypeError, KeyError) as err:
194 except (ValueError, TypeError, KeyError) as err:
195 raise error.CorruptedState(str(err))
195 raise error.CorruptedState(str(err))
196
196
197 @classmethod
197 @classmethod
198 def _getversion(cls, repo):
198 def _getversion(cls, repo):
199 """Read version information from shelvestate file"""
199 """Read version information from shelvestate file"""
200 fp = repo.vfs(cls._filename)
200 fp = repo.vfs(cls._filename)
201 try:
201 try:
202 version = int(fp.readline().strip())
202 version = int(fp.readline().strip())
203 except ValueError as err:
203 except ValueError as err:
204 raise error.CorruptedState(str(err))
204 raise error.CorruptedState(str(err))
205 finally:
205 finally:
206 fp.close()
206 fp.close()
207 return version
207 return version
208
208
209 @classmethod
209 @classmethod
210 def _readold(cls, repo):
210 def _readold(cls, repo):
211 """Read the old position-based version of a shelvestate file"""
211 """Read the old position-based version of a shelvestate file"""
212 # Order is important, because old shelvestate file uses it
212 # Order is important, because old shelvestate file uses it
213 # to detemine values of fields (i.g. name is on the second line,
213 # to detemine values of fields (i.g. name is on the second line,
214 # originalwctx is on the third and so forth). Please do not change.
214 # originalwctx is on the third and so forth). Please do not change.
215 keys = ['version', 'name', 'originalwctx', 'pendingctx', 'parents',
215 keys = ['version', 'name', 'originalwctx', 'pendingctx', 'parents',
216 'nodestoremove', 'branchtorestore', 'keep', 'activebook']
216 'nodestoremove', 'branchtorestore', 'keep', 'activebook']
217 # this is executed only seldomly, so it is not a big deal
217 # this is executed only seldomly, so it is not a big deal
218 # that we open this file twice
218 # that we open this file twice
219 fp = repo.vfs(cls._filename)
219 fp = repo.vfs(cls._filename)
220 d = {}
220 d = {}
221 try:
221 try:
222 for key in keys:
222 for key in keys:
223 d[key] = fp.readline().strip()
223 d[key] = fp.readline().strip()
224 finally:
224 finally:
225 fp.close()
225 fp.close()
226 return d
226 return d
227
227
228 @classmethod
228 @classmethod
229 def load(cls, repo):
229 def load(cls, repo):
230 version = cls._getversion(repo)
230 version = cls._getversion(repo)
231 if version < cls._version:
231 if version < cls._version:
232 d = cls._readold(repo)
232 d = cls._readold(repo)
233 elif version == cls._version:
233 elif version == cls._version:
234 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
234 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
235 .read(firstlinenonkeyval=True)
235 .read(firstlinenonkeyval=True)
236 else:
236 else:
237 raise error.Abort(_('this version of shelve is incompatible '
237 raise error.Abort(_('this version of shelve is incompatible '
238 'with the version used in this repo'))
238 'with the version used in this repo'))
239
239
240 cls._verifyandtransform(d)
240 cls._verifyandtransform(d)
241 try:
241 try:
242 obj = cls()
242 obj = cls()
243 obj.name = d['name']
243 obj.name = d['name']
244 obj.wctx = repo[d['originalwctx']]
244 obj.wctx = repo[d['originalwctx']]
245 obj.pendingctx = repo[d['pendingctx']]
245 obj.pendingctx = repo[d['pendingctx']]
246 obj.parents = d['parents']
246 obj.parents = d['parents']
247 obj.nodestoremove = d['nodestoremove']
247 obj.nodestoremove = d['nodestoremove']
248 obj.branchtorestore = d.get('branchtorestore', '')
248 obj.branchtorestore = d.get('branchtorestore', '')
249 obj.keep = d.get('keep') == cls._keep
249 obj.keep = d.get('keep') == cls._keep
250 obj.activebookmark = ''
250 obj.activebookmark = ''
251 if d.get('activebook', '') != cls._noactivebook:
251 if d.get('activebook', '') != cls._noactivebook:
252 obj.activebookmark = d.get('activebook', '')
252 obj.activebookmark = d.get('activebook', '')
253 except (error.RepoLookupError, KeyError) as err:
253 except (error.RepoLookupError, KeyError) as err:
254 raise error.CorruptedState(str(err))
254 raise error.CorruptedState(str(err))
255
255
256 return obj
256 return obj
257
257
258 @classmethod
258 @classmethod
259 def save(cls, repo, name, originalwctx, pendingctx, nodestoremove,
259 def save(cls, repo, name, originalwctx, pendingctx, nodestoremove,
260 branchtorestore, keep=False, activebook=''):
260 branchtorestore, keep=False, activebook=''):
261 info = {
261 info = {
262 "name": name,
262 "name": name,
263 "originalwctx": nodemod.hex(originalwctx.node()),
263 "originalwctx": nodemod.hex(originalwctx.node()),
264 "pendingctx": nodemod.hex(pendingctx.node()),
264 "pendingctx": nodemod.hex(pendingctx.node()),
265 "parents": ' '.join([nodemod.hex(p)
265 "parents": ' '.join([nodemod.hex(p)
266 for p in repo.dirstate.parents()]),
266 for p in repo.dirstate.parents()]),
267 "nodestoremove": ' '.join([nodemod.hex(n)
267 "nodestoremove": ' '.join([nodemod.hex(n)
268 for n in nodestoremove]),
268 for n in nodestoremove]),
269 "branchtorestore": branchtorestore,
269 "branchtorestore": branchtorestore,
270 "keep": cls._keep if keep else cls._nokeep,
270 "keep": cls._keep if keep else cls._nokeep,
271 "activebook": activebook or cls._noactivebook
271 "activebook": activebook or cls._noactivebook
272 }
272 }
273 scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
273 scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
274 .write(info, firstline=str(cls._version))
274 .write(info, firstline=("%d" % cls._version))
275
275
276 @classmethod
276 @classmethod
277 def clear(cls, repo):
277 def clear(cls, repo):
278 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
278 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
279
279
280 def cleanupoldbackups(repo):
280 def cleanupoldbackups(repo):
281 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
281 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
282 maxbackups = repo.ui.configint('shelve', 'maxbackups')
282 maxbackups = repo.ui.configint('shelve', 'maxbackups')
283 hgfiles = [f for f in vfs.listdir()
283 hgfiles = [f for f in vfs.listdir()
284 if f.endswith('.' + patchextension)]
284 if f.endswith('.' + patchextension)]
285 hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
285 hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
286 if 0 < maxbackups and maxbackups < len(hgfiles):
286 if 0 < maxbackups and maxbackups < len(hgfiles):
287 bordermtime = hgfiles[-maxbackups][0]
287 bordermtime = hgfiles[-maxbackups][0]
288 else:
288 else:
289 bordermtime = None
289 bordermtime = None
290 for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
290 for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
291 if mtime == bordermtime:
291 if mtime == bordermtime:
292 # keep it, because timestamp can't decide exact order of backups
292 # keep it, because timestamp can't decide exact order of backups
293 continue
293 continue
294 base = f[:-(1 + len(patchextension))]
294 base = f[:-(1 + len(patchextension))]
295 for ext in shelvefileextensions:
295 for ext in shelvefileextensions:
296 vfs.tryunlink(base + '.' + ext)
296 vfs.tryunlink(base + '.' + ext)
297
297
298 def _backupactivebookmark(repo):
298 def _backupactivebookmark(repo):
299 activebookmark = repo._activebookmark
299 activebookmark = repo._activebookmark
300 if activebookmark:
300 if activebookmark:
301 bookmarks.deactivate(repo)
301 bookmarks.deactivate(repo)
302 return activebookmark
302 return activebookmark
303
303
304 def _restoreactivebookmark(repo, mark):
304 def _restoreactivebookmark(repo, mark):
305 if mark:
305 if mark:
306 bookmarks.activate(repo, mark)
306 bookmarks.activate(repo, mark)
307
307
308 def _aborttransaction(repo):
308 def _aborttransaction(repo):
309 '''Abort current transaction for shelve/unshelve, but keep dirstate
309 '''Abort current transaction for shelve/unshelve, but keep dirstate
310 '''
310 '''
311 tr = repo.currenttransaction()
311 tr = repo.currenttransaction()
312 backupname = 'dirstate.shelve'
312 backupname = 'dirstate.shelve'
313 repo.dirstate.savebackup(tr, backupname)
313 repo.dirstate.savebackup(tr, backupname)
314 tr.abort()
314 tr.abort()
315 repo.dirstate.restorebackup(None, backupname)
315 repo.dirstate.restorebackup(None, backupname)
316
316
317 def createcmd(ui, repo, pats, opts):
317 def createcmd(ui, repo, pats, opts):
318 """subcommand that creates a new shelve"""
318 """subcommand that creates a new shelve"""
319 with repo.wlock():
319 with repo.wlock():
320 cmdutil.checkunfinished(repo)
320 cmdutil.checkunfinished(repo)
321 return _docreatecmd(ui, repo, pats, opts)
321 return _docreatecmd(ui, repo, pats, opts)
322
322
323 def getshelvename(repo, parent, opts):
323 def getshelvename(repo, parent, opts):
324 """Decide on the name this shelve is going to have"""
324 """Decide on the name this shelve is going to have"""
325 def gennames():
325 def gennames():
326 yield label
326 yield label
327 for i in itertools.count(1):
327 for i in itertools.count(1):
328 yield '%s-%02d' % (label, i)
328 yield '%s-%02d' % (label, i)
329 name = opts.get('name')
329 name = opts.get('name')
330 label = repo._activebookmark or parent.branch() or 'default'
330 label = repo._activebookmark or parent.branch() or 'default'
331 # slashes aren't allowed in filenames, therefore we rename it
331 # slashes aren't allowed in filenames, therefore we rename it
332 label = label.replace('/', '_')
332 label = label.replace('/', '_')
333 label = label.replace('\\', '_')
333 label = label.replace('\\', '_')
334 # filenames must not start with '.' as it should not be hidden
334 # filenames must not start with '.' as it should not be hidden
335 if label.startswith('.'):
335 if label.startswith('.'):
336 label = label.replace('.', '_', 1)
336 label = label.replace('.', '_', 1)
337
337
338 if name:
338 if name:
339 if shelvedfile(repo, name, patchextension).exists():
339 if shelvedfile(repo, name, patchextension).exists():
340 e = _("a shelved change named '%s' already exists") % name
340 e = _("a shelved change named '%s' already exists") % name
341 raise error.Abort(e)
341 raise error.Abort(e)
342
342
343 # ensure we are not creating a subdirectory or a hidden file
343 # ensure we are not creating a subdirectory or a hidden file
344 if '/' in name or '\\' in name:
344 if '/' in name or '\\' in name:
345 raise error.Abort(_('shelved change names can not contain slashes'))
345 raise error.Abort(_('shelved change names can not contain slashes'))
346 if name.startswith('.'):
346 if name.startswith('.'):
347 raise error.Abort(_("shelved change names can not start with '.'"))
347 raise error.Abort(_("shelved change names can not start with '.'"))
348
348
349 else:
349 else:
350 for n in gennames():
350 for n in gennames():
351 if not shelvedfile(repo, n, patchextension).exists():
351 if not shelvedfile(repo, n, patchextension).exists():
352 name = n
352 name = n
353 break
353 break
354
354
355 return name
355 return name
356
356
357 def mutableancestors(ctx):
357 def mutableancestors(ctx):
358 """return all mutable ancestors for ctx (included)
358 """return all mutable ancestors for ctx (included)
359
359
360 Much faster than the revset ancestors(ctx) & draft()"""
360 Much faster than the revset ancestors(ctx) & draft()"""
361 seen = {nodemod.nullrev}
361 seen = {nodemod.nullrev}
362 visit = collections.deque()
362 visit = collections.deque()
363 visit.append(ctx)
363 visit.append(ctx)
364 while visit:
364 while visit:
365 ctx = visit.popleft()
365 ctx = visit.popleft()
366 yield ctx.node()
366 yield ctx.node()
367 for parent in ctx.parents():
367 for parent in ctx.parents():
368 rev = parent.rev()
368 rev = parent.rev()
369 if rev not in seen:
369 if rev not in seen:
370 seen.add(rev)
370 seen.add(rev)
371 if parent.mutable():
371 if parent.mutable():
372 visit.append(parent)
372 visit.append(parent)
373
373
374 def getcommitfunc(extra, interactive, editor=False):
374 def getcommitfunc(extra, interactive, editor=False):
375 def commitfunc(ui, repo, message, match, opts):
375 def commitfunc(ui, repo, message, match, opts):
376 hasmq = util.safehasattr(repo, 'mq')
376 hasmq = util.safehasattr(repo, 'mq')
377 if hasmq:
377 if hasmq:
378 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
378 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
379 overrides = {('phases', 'new-commit'): phases.secret}
379 overrides = {('phases', 'new-commit'): phases.secret}
380 try:
380 try:
381 editor_ = False
381 editor_ = False
382 if editor:
382 if editor:
383 editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
383 editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
384 **pycompat.strkwargs(opts))
384 **pycompat.strkwargs(opts))
385 with repo.ui.configoverride(overrides):
385 with repo.ui.configoverride(overrides):
386 return repo.commit(message, shelveuser, opts.get('date'),
386 return repo.commit(message, shelveuser, opts.get('date'),
387 match, editor=editor_, extra=extra)
387 match, editor=editor_, extra=extra)
388 finally:
388 finally:
389 if hasmq:
389 if hasmq:
390 repo.mq.checkapplied = saved
390 repo.mq.checkapplied = saved
391
391
392 def interactivecommitfunc(ui, repo, *pats, **opts):
392 def interactivecommitfunc(ui, repo, *pats, **opts):
393 opts = pycompat.byteskwargs(opts)
393 opts = pycompat.byteskwargs(opts)
394 match = scmutil.match(repo['.'], pats, {})
394 match = scmutil.match(repo['.'], pats, {})
395 message = opts['message']
395 message = opts['message']
396 return commitfunc(ui, repo, message, match, opts)
396 return commitfunc(ui, repo, message, match, opts)
397
397
398 return interactivecommitfunc if interactive else commitfunc
398 return interactivecommitfunc if interactive else commitfunc
399
399
400 def _nothingtoshelvemessaging(ui, repo, pats, opts):
400 def _nothingtoshelvemessaging(ui, repo, pats, opts):
401 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
401 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
402 if stat.deleted:
402 if stat.deleted:
403 ui.status(_("nothing changed (%d missing files, see "
403 ui.status(_("nothing changed (%d missing files, see "
404 "'hg status')\n") % len(stat.deleted))
404 "'hg status')\n") % len(stat.deleted))
405 else:
405 else:
406 ui.status(_("nothing changed\n"))
406 ui.status(_("nothing changed\n"))
407
407
408 def _shelvecreatedcommit(repo, node, name):
408 def _shelvecreatedcommit(repo, node, name):
409 bases = list(mutableancestors(repo[node]))
409 bases = list(mutableancestors(repo[node]))
410 shelvedfile(repo, name, 'hg').writebundle(bases, node)
410 shelvedfile(repo, name, 'hg').writebundle(bases, node)
411 cmdutil.export(repo, [node],
411 cmdutil.export(repo, [node],
412 fp=shelvedfile(repo, name, patchextension).opener('wb'),
412 fp=shelvedfile(repo, name, patchextension).opener('wb'),
413 opts=mdiff.diffopts(git=True))
413 opts=mdiff.diffopts(git=True))
414
414
415 def _includeunknownfiles(repo, pats, opts, extra):
415 def _includeunknownfiles(repo, pats, opts, extra):
416 s = repo.status(match=scmutil.match(repo[None], pats, opts),
416 s = repo.status(match=scmutil.match(repo[None], pats, opts),
417 unknown=True)
417 unknown=True)
418 if s.unknown:
418 if s.unknown:
419 extra['shelve_unknown'] = '\0'.join(s.unknown)
419 extra['shelve_unknown'] = '\0'.join(s.unknown)
420 repo[None].add(s.unknown)
420 repo[None].add(s.unknown)
421
421
422 def _finishshelve(repo):
422 def _finishshelve(repo):
423 _aborttransaction(repo)
423 _aborttransaction(repo)
424
424
425 def _docreatecmd(ui, repo, pats, opts):
425 def _docreatecmd(ui, repo, pats, opts):
426 wctx = repo[None]
426 wctx = repo[None]
427 parents = wctx.parents()
427 parents = wctx.parents()
428 if len(parents) > 1:
428 if len(parents) > 1:
429 raise error.Abort(_('cannot shelve while merging'))
429 raise error.Abort(_('cannot shelve while merging'))
430 parent = parents[0]
430 parent = parents[0]
431 origbranch = wctx.branch()
431 origbranch = wctx.branch()
432
432
433 if parent.node() != nodemod.nullid:
433 if parent.node() != nodemod.nullid:
434 desc = "changes to: %s" % parent.description().split('\n', 1)[0]
434 desc = "changes to: %s" % parent.description().split('\n', 1)[0]
435 else:
435 else:
436 desc = '(changes in empty repository)'
436 desc = '(changes in empty repository)'
437
437
438 if not opts.get('message'):
438 if not opts.get('message'):
439 opts['message'] = desc
439 opts['message'] = desc
440
440
441 lock = tr = activebookmark = None
441 lock = tr = activebookmark = None
442 try:
442 try:
443 lock = repo.lock()
443 lock = repo.lock()
444
444
445 # use an uncommitted transaction to generate the bundle to avoid
445 # use an uncommitted transaction to generate the bundle to avoid
446 # pull races. ensure we don't print the abort message to stderr.
446 # pull races. ensure we don't print the abort message to stderr.
447 tr = repo.transaction('commit', report=lambda x: None)
447 tr = repo.transaction('commit', report=lambda x: None)
448
448
449 interactive = opts.get('interactive', False)
449 interactive = opts.get('interactive', False)
450 includeunknown = (opts.get('unknown', False) and
450 includeunknown = (opts.get('unknown', False) and
451 not opts.get('addremove', False))
451 not opts.get('addremove', False))
452
452
453 name = getshelvename(repo, parent, opts)
453 name = getshelvename(repo, parent, opts)
454 activebookmark = _backupactivebookmark(repo)
454 activebookmark = _backupactivebookmark(repo)
455 extra = {}
455 extra = {}
456 if includeunknown:
456 if includeunknown:
457 _includeunknownfiles(repo, pats, opts, extra)
457 _includeunknownfiles(repo, pats, opts, extra)
458
458
459 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
459 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
460 # In non-bare shelve we don't store newly created branch
460 # In non-bare shelve we don't store newly created branch
461 # at bundled commit
461 # at bundled commit
462 repo.dirstate.setbranch(repo['.'].branch())
462 repo.dirstate.setbranch(repo['.'].branch())
463
463
464 commitfunc = getcommitfunc(extra, interactive, editor=True)
464 commitfunc = getcommitfunc(extra, interactive, editor=True)
465 if not interactive:
465 if not interactive:
466 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
466 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
467 else:
467 else:
468 node = cmdutil.dorecord(ui, repo, commitfunc, None,
468 node = cmdutil.dorecord(ui, repo, commitfunc, None,
469 False, cmdutil.recordfilter, *pats,
469 False, cmdutil.recordfilter, *pats,
470 **pycompat.strkwargs(opts))
470 **pycompat.strkwargs(opts))
471 if not node:
471 if not node:
472 _nothingtoshelvemessaging(ui, repo, pats, opts)
472 _nothingtoshelvemessaging(ui, repo, pats, opts)
473 return 1
473 return 1
474
474
475 _shelvecreatedcommit(repo, node, name)
475 _shelvecreatedcommit(repo, node, name)
476
476
477 if ui.formatted():
477 if ui.formatted():
478 desc = util.ellipsis(desc, ui.termwidth())
478 desc = util.ellipsis(desc, ui.termwidth())
479 ui.status(_('shelved as %s\n') % name)
479 ui.status(_('shelved as %s\n') % name)
480 hg.update(repo, parent.node())
480 hg.update(repo, parent.node())
481 if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
481 if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
482 repo.dirstate.setbranch(origbranch)
482 repo.dirstate.setbranch(origbranch)
483
483
484 _finishshelve(repo)
484 _finishshelve(repo)
485 finally:
485 finally:
486 _restoreactivebookmark(repo, activebookmark)
486 _restoreactivebookmark(repo, activebookmark)
487 lockmod.release(tr, lock)
487 lockmod.release(tr, lock)
488
488
489 def _isbareshelve(pats, opts):
489 def _isbareshelve(pats, opts):
490 return (not pats
490 return (not pats
491 and not opts.get('interactive', False)
491 and not opts.get('interactive', False)
492 and not opts.get('include', False)
492 and not opts.get('include', False)
493 and not opts.get('exclude', False))
493 and not opts.get('exclude', False))
494
494
495 def _iswctxonnewbranch(repo):
495 def _iswctxonnewbranch(repo):
496 return repo[None].branch() != repo['.'].branch()
496 return repo[None].branch() != repo['.'].branch()
497
497
498 def cleanupcmd(ui, repo):
498 def cleanupcmd(ui, repo):
499 """subcommand that deletes all shelves"""
499 """subcommand that deletes all shelves"""
500
500
501 with repo.wlock():
501 with repo.wlock():
502 for (name, _type) in repo.vfs.readdir(shelvedir):
502 for (name, _type) in repo.vfs.readdir(shelvedir):
503 suffix = name.rsplit('.', 1)[-1]
503 suffix = name.rsplit('.', 1)[-1]
504 if suffix in shelvefileextensions:
504 if suffix in shelvefileextensions:
505 shelvedfile(repo, name).movetobackup()
505 shelvedfile(repo, name).movetobackup()
506 cleanupoldbackups(repo)
506 cleanupoldbackups(repo)
507
507
508 def deletecmd(ui, repo, pats):
508 def deletecmd(ui, repo, pats):
509 """subcommand that deletes a specific shelve"""
509 """subcommand that deletes a specific shelve"""
510 if not pats:
510 if not pats:
511 raise error.Abort(_('no shelved changes specified!'))
511 raise error.Abort(_('no shelved changes specified!'))
512 with repo.wlock():
512 with repo.wlock():
513 try:
513 try:
514 for name in pats:
514 for name in pats:
515 for suffix in shelvefileextensions:
515 for suffix in shelvefileextensions:
516 shfile = shelvedfile(repo, name, suffix)
516 shfile = shelvedfile(repo, name, suffix)
517 # patch file is necessary, as it should
517 # patch file is necessary, as it should
518 # be present for any kind of shelve,
518 # be present for any kind of shelve,
519 # but the .hg file is optional as in future we
519 # but the .hg file is optional as in future we
520 # will add obsolete shelve with does not create a
520 # will add obsolete shelve with does not create a
521 # bundle
521 # bundle
522 if shfile.exists() or suffix == patchextension:
522 if shfile.exists() or suffix == patchextension:
523 shfile.movetobackup()
523 shfile.movetobackup()
524 cleanupoldbackups(repo)
524 cleanupoldbackups(repo)
525 except OSError as err:
525 except OSError as err:
526 if err.errno != errno.ENOENT:
526 if err.errno != errno.ENOENT:
527 raise
527 raise
528 raise error.Abort(_("shelved change '%s' not found") % name)
528 raise error.Abort(_("shelved change '%s' not found") % name)
529
529
530 def listshelves(repo):
530 def listshelves(repo):
531 """return all shelves in repo as list of (time, filename)"""
531 """return all shelves in repo as list of (time, filename)"""
532 try:
532 try:
533 names = repo.vfs.readdir(shelvedir)
533 names = repo.vfs.readdir(shelvedir)
534 except OSError as err:
534 except OSError as err:
535 if err.errno != errno.ENOENT:
535 if err.errno != errno.ENOENT:
536 raise
536 raise
537 return []
537 return []
538 info = []
538 info = []
539 for (name, _type) in names:
539 for (name, _type) in names:
540 pfx, sfx = name.rsplit('.', 1)
540 pfx, sfx = name.rsplit('.', 1)
541 if not pfx or sfx != patchextension:
541 if not pfx or sfx != patchextension:
542 continue
542 continue
543 st = shelvedfile(repo, name).stat()
543 st = shelvedfile(repo, name).stat()
544 info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
544 info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
545 return sorted(info, reverse=True)
545 return sorted(info, reverse=True)
546
546
547 def listcmd(ui, repo, pats, opts):
547 def listcmd(ui, repo, pats, opts):
548 """subcommand that displays the list of shelves"""
548 """subcommand that displays the list of shelves"""
549 pats = set(pats)
549 pats = set(pats)
550 width = 80
550 width = 80
551 if not ui.plain():
551 if not ui.plain():
552 width = ui.termwidth()
552 width = ui.termwidth()
553 namelabel = 'shelve.newest'
553 namelabel = 'shelve.newest'
554 ui.pager('shelve')
554 ui.pager('shelve')
555 for mtime, name in listshelves(repo):
555 for mtime, name in listshelves(repo):
556 sname = util.split(name)[1]
556 sname = util.split(name)[1]
557 if pats and sname not in pats:
557 if pats and sname not in pats:
558 continue
558 continue
559 ui.write(sname, label=namelabel)
559 ui.write(sname, label=namelabel)
560 namelabel = 'shelve.name'
560 namelabel = 'shelve.name'
561 if ui.quiet:
561 if ui.quiet:
562 ui.write('\n')
562 ui.write('\n')
563 continue
563 continue
564 ui.write(' ' * (16 - len(sname)))
564 ui.write(' ' * (16 - len(sname)))
565 used = 16
565 used = 16
566 age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
566 age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
567 ui.write(age, label='shelve.age')
567 ui.write(age, label='shelve.age')
568 ui.write(' ' * (12 - len(age)))
568 ui.write(' ' * (12 - len(age)))
569 used += 12
569 used += 12
570 with open(name + '.' + patchextension, 'rb') as fp:
570 with open(name + '.' + patchextension, 'rb') as fp:
571 while True:
571 while True:
572 line = fp.readline()
572 line = fp.readline()
573 if not line:
573 if not line:
574 break
574 break
575 if not line.startswith('#'):
575 if not line.startswith('#'):
576 desc = line.rstrip()
576 desc = line.rstrip()
577 if ui.formatted():
577 if ui.formatted():
578 desc = util.ellipsis(desc, width - used)
578 desc = util.ellipsis(desc, width - used)
579 ui.write(desc)
579 ui.write(desc)
580 break
580 break
581 ui.write('\n')
581 ui.write('\n')
582 if not (opts['patch'] or opts['stat']):
582 if not (opts['patch'] or opts['stat']):
583 continue
583 continue
584 difflines = fp.readlines()
584 difflines = fp.readlines()
585 if opts['patch']:
585 if opts['patch']:
586 for chunk, label in patch.difflabel(iter, difflines):
586 for chunk, label in patch.difflabel(iter, difflines):
587 ui.write(chunk, label=label)
587 ui.write(chunk, label=label)
588 if opts['stat']:
588 if opts['stat']:
589 for chunk, label in patch.diffstatui(difflines, width=width):
589 for chunk, label in patch.diffstatui(difflines, width=width):
590 ui.write(chunk, label=label)
590 ui.write(chunk, label=label)
591
591
592 def patchcmds(ui, repo, pats, opts, subcommand):
592 def patchcmds(ui, repo, pats, opts, subcommand):
593 """subcommand that displays shelves"""
593 """subcommand that displays shelves"""
594 if len(pats) == 0:
594 if len(pats) == 0:
595 raise error.Abort(_("--%s expects at least one shelf") % subcommand)
595 raise error.Abort(_("--%s expects at least one shelf") % subcommand)
596
596
597 for shelfname in pats:
597 for shelfname in pats:
598 if not shelvedfile(repo, shelfname, patchextension).exists():
598 if not shelvedfile(repo, shelfname, patchextension).exists():
599 raise error.Abort(_("cannot find shelf %s") % shelfname)
599 raise error.Abort(_("cannot find shelf %s") % shelfname)
600
600
601 listcmd(ui, repo, pats, opts)
601 listcmd(ui, repo, pats, opts)
602
602
603 def checkparents(repo, state):
603 def checkparents(repo, state):
604 """check parent while resuming an unshelve"""
604 """check parent while resuming an unshelve"""
605 if state.parents != repo.dirstate.parents():
605 if state.parents != repo.dirstate.parents():
606 raise error.Abort(_('working directory parents do not match unshelve '
606 raise error.Abort(_('working directory parents do not match unshelve '
607 'state'))
607 'state'))
608
608
609 def pathtofiles(repo, files):
609 def pathtofiles(repo, files):
610 cwd = repo.getcwd()
610 cwd = repo.getcwd()
611 return [repo.pathto(f, cwd) for f in files]
611 return [repo.pathto(f, cwd) for f in files]
612
612
613 def unshelveabort(ui, repo, state, opts):
613 def unshelveabort(ui, repo, state, opts):
614 """subcommand that abort an in-progress unshelve"""
614 """subcommand that abort an in-progress unshelve"""
615 with repo.lock():
615 with repo.lock():
616 try:
616 try:
617 checkparents(repo, state)
617 checkparents(repo, state)
618
618
619 repo.vfs.rename('unshelverebasestate', 'rebasestate')
619 repo.vfs.rename('unshelverebasestate', 'rebasestate')
620 try:
620 try:
621 rebase.rebase(ui, repo, **{
621 rebase.rebase(ui, repo, **{
622 r'abort' : True
622 r'abort' : True
623 })
623 })
624 except Exception:
624 except Exception:
625 repo.vfs.rename('rebasestate', 'unshelverebasestate')
625 repo.vfs.rename('rebasestate', 'unshelverebasestate')
626 raise
626 raise
627
627
628 mergefiles(ui, repo, state.wctx, state.pendingctx)
628 mergefiles(ui, repo, state.wctx, state.pendingctx)
629 repair.strip(ui, repo, state.nodestoremove, backup=False,
629 repair.strip(ui, repo, state.nodestoremove, backup=False,
630 topic='shelve')
630 topic='shelve')
631 finally:
631 finally:
632 shelvedstate.clear(repo)
632 shelvedstate.clear(repo)
633 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
633 ui.warn(_("unshelve of '%s' aborted\n") % state.name)
634
634
635 def mergefiles(ui, repo, wctx, shelvectx):
635 def mergefiles(ui, repo, wctx, shelvectx):
636 """updates to wctx and merges the changes from shelvectx into the
636 """updates to wctx and merges the changes from shelvectx into the
637 dirstate."""
637 dirstate."""
638 with ui.configoverride({('ui', 'quiet'): True}):
638 with ui.configoverride({('ui', 'quiet'): True}):
639 hg.update(repo, wctx.node())
639 hg.update(repo, wctx.node())
640 files = []
640 files = []
641 files.extend(shelvectx.files())
641 files.extend(shelvectx.files())
642 files.extend(shelvectx.parents()[0].files())
642 files.extend(shelvectx.parents()[0].files())
643
643
644 # revert will overwrite unknown files, so move them out of the way
644 # revert will overwrite unknown files, so move them out of the way
645 for file in repo.status(unknown=True).unknown:
645 for file in repo.status(unknown=True).unknown:
646 if file in files:
646 if file in files:
647 util.rename(file, scmutil.origpath(ui, repo, file))
647 util.rename(file, scmutil.origpath(ui, repo, file))
648 ui.pushbuffer(True)
648 ui.pushbuffer(True)
649 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
649 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
650 *pathtofiles(repo, files),
650 *pathtofiles(repo, files),
651 **{r'no_backup': True})
651 **{r'no_backup': True})
652 ui.popbuffer()
652 ui.popbuffer()
653
653
654 def restorebranch(ui, repo, branchtorestore):
654 def restorebranch(ui, repo, branchtorestore):
655 if branchtorestore and branchtorestore != repo.dirstate.branch():
655 if branchtorestore and branchtorestore != repo.dirstate.branch():
656 repo.dirstate.setbranch(branchtorestore)
656 repo.dirstate.setbranch(branchtorestore)
657 ui.status(_('marked working directory as branch %s\n')
657 ui.status(_('marked working directory as branch %s\n')
658 % branchtorestore)
658 % branchtorestore)
659
659
660 def unshelvecleanup(ui, repo, name, opts):
660 def unshelvecleanup(ui, repo, name, opts):
661 """remove related files after an unshelve"""
661 """remove related files after an unshelve"""
662 if not opts.get('keep'):
662 if not opts.get('keep'):
663 for filetype in shelvefileextensions:
663 for filetype in shelvefileextensions:
664 shfile = shelvedfile(repo, name, filetype)
664 shfile = shelvedfile(repo, name, filetype)
665 if shfile.exists():
665 if shfile.exists():
666 shfile.movetobackup()
666 shfile.movetobackup()
667 cleanupoldbackups(repo)
667 cleanupoldbackups(repo)
668
668
669 def unshelvecontinue(ui, repo, state, opts):
669 def unshelvecontinue(ui, repo, state, opts):
670 """subcommand to continue an in-progress unshelve"""
670 """subcommand to continue an in-progress unshelve"""
671 # We're finishing off a merge. First parent is our original
671 # We're finishing off a merge. First parent is our original
672 # parent, second is the temporary "fake" commit we're unshelving.
672 # parent, second is the temporary "fake" commit we're unshelving.
673 with repo.lock():
673 with repo.lock():
674 checkparents(repo, state)
674 checkparents(repo, state)
675 ms = merge.mergestate.read(repo)
675 ms = merge.mergestate.read(repo)
676 if list(ms.unresolved()):
676 if list(ms.unresolved()):
677 raise error.Abort(
677 raise error.Abort(
678 _("unresolved conflicts, can't continue"),
678 _("unresolved conflicts, can't continue"),
679 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
679 hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
680
680
681 repo.vfs.rename('unshelverebasestate', 'rebasestate')
681 repo.vfs.rename('unshelverebasestate', 'rebasestate')
682 try:
682 try:
683 rebase.rebase(ui, repo, **{
683 rebase.rebase(ui, repo, **{
684 r'continue' : True
684 r'continue' : True
685 })
685 })
686 except Exception:
686 except Exception:
687 repo.vfs.rename('rebasestate', 'unshelverebasestate')
687 repo.vfs.rename('rebasestate', 'unshelverebasestate')
688 raise
688 raise
689
689
690 shelvectx = repo['tip']
690 shelvectx = repo['tip']
691 if state.pendingctx not in shelvectx.parents():
691 if state.pendingctx not in shelvectx.parents():
692 # rebase was a no-op, so it produced no child commit
692 # rebase was a no-op, so it produced no child commit
693 shelvectx = state.pendingctx
693 shelvectx = state.pendingctx
694 else:
694 else:
695 # only strip the shelvectx if the rebase produced it
695 # only strip the shelvectx if the rebase produced it
696 state.nodestoremove.append(shelvectx.node())
696 state.nodestoremove.append(shelvectx.node())
697
697
698 mergefiles(ui, repo, state.wctx, shelvectx)
698 mergefiles(ui, repo, state.wctx, shelvectx)
699 restorebranch(ui, repo, state.branchtorestore)
699 restorebranch(ui, repo, state.branchtorestore)
700
700
701 repair.strip(ui, repo, state.nodestoremove, backup=False,
701 repair.strip(ui, repo, state.nodestoremove, backup=False,
702 topic='shelve')
702 topic='shelve')
703 _restoreactivebookmark(repo, state.activebookmark)
703 _restoreactivebookmark(repo, state.activebookmark)
704 shelvedstate.clear(repo)
704 shelvedstate.clear(repo)
705 unshelvecleanup(ui, repo, state.name, opts)
705 unshelvecleanup(ui, repo, state.name, opts)
706 ui.status(_("unshelve of '%s' complete\n") % state.name)
706 ui.status(_("unshelve of '%s' complete\n") % state.name)
707
707
708 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
708 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
709 """Temporarily commit working copy changes before moving unshelve commit"""
709 """Temporarily commit working copy changes before moving unshelve commit"""
710 # Store pending changes in a commit and remember added in case a shelve
710 # Store pending changes in a commit and remember added in case a shelve
711 # contains unknown files that are part of the pending change
711 # contains unknown files that are part of the pending change
712 s = repo.status()
712 s = repo.status()
713 addedbefore = frozenset(s.added)
713 addedbefore = frozenset(s.added)
714 if not (s.modified or s.added or s.removed):
714 if not (s.modified or s.added or s.removed):
715 return tmpwctx, addedbefore
715 return tmpwctx, addedbefore
716 ui.status(_("temporarily committing pending changes "
716 ui.status(_("temporarily committing pending changes "
717 "(restore with 'hg unshelve --abort')\n"))
717 "(restore with 'hg unshelve --abort')\n"))
718 commitfunc = getcommitfunc(extra=None, interactive=False,
718 commitfunc = getcommitfunc(extra=None, interactive=False,
719 editor=False)
719 editor=False)
720 tempopts = {}
720 tempopts = {}
721 tempopts['message'] = "pending changes temporary commit"
721 tempopts['message'] = "pending changes temporary commit"
722 tempopts['date'] = opts.get('date')
722 tempopts['date'] = opts.get('date')
723 with ui.configoverride({('ui', 'quiet'): True}):
723 with ui.configoverride({('ui', 'quiet'): True}):
724 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
724 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
725 tmpwctx = repo[node]
725 tmpwctx = repo[node]
726 return tmpwctx, addedbefore
726 return tmpwctx, addedbefore
727
727
728 def _unshelverestorecommit(ui, repo, basename):
728 def _unshelverestorecommit(ui, repo, basename):
729 """Recreate commit in the repository during the unshelve"""
729 """Recreate commit in the repository during the unshelve"""
730 with ui.configoverride({('ui', 'quiet'): True}):
730 with ui.configoverride({('ui', 'quiet'): True}):
731 shelvedfile(repo, basename, 'hg').applybundle()
731 shelvedfile(repo, basename, 'hg').applybundle()
732 shelvectx = repo['tip']
732 shelvectx = repo['tip']
733 return repo, shelvectx
733 return repo, shelvectx
734
734
735 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
735 def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
736 tmpwctx, shelvectx, branchtorestore,
736 tmpwctx, shelvectx, branchtorestore,
737 activebookmark):
737 activebookmark):
738 """Rebase restored commit from its original location to a destination"""
738 """Rebase restored commit from its original location to a destination"""
739 # If the shelve is not immediately on top of the commit
739 # If the shelve is not immediately on top of the commit
740 # we'll be merging with, rebase it to be on top.
740 # we'll be merging with, rebase it to be on top.
741 if tmpwctx.node() == shelvectx.parents()[0].node():
741 if tmpwctx.node() == shelvectx.parents()[0].node():
742 return shelvectx
742 return shelvectx
743
743
744 ui.status(_('rebasing shelved changes\n'))
744 ui.status(_('rebasing shelved changes\n'))
745 try:
745 try:
746 rebase.rebase(ui, repo, **{
746 rebase.rebase(ui, repo, **{
747 r'rev': [shelvectx.rev()],
747 r'rev': [shelvectx.rev()],
748 r'dest': str(tmpwctx.rev()),
748 r'dest': str(tmpwctx.rev()),
749 r'keep': True,
749 r'keep': True,
750 r'tool': opts.get('tool', ''),
750 r'tool': opts.get('tool', ''),
751 })
751 })
752 except error.InterventionRequired:
752 except error.InterventionRequired:
753 tr.close()
753 tr.close()
754
754
755 nodestoremove = [repo.changelog.node(rev)
755 nodestoremove = [repo.changelog.node(rev)
756 for rev in xrange(oldtiprev, len(repo))]
756 for rev in xrange(oldtiprev, len(repo))]
757 shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
757 shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove,
758 branchtorestore, opts.get('keep'), activebookmark)
758 branchtorestore, opts.get('keep'), activebookmark)
759
759
760 repo.vfs.rename('rebasestate', 'unshelverebasestate')
760 repo.vfs.rename('rebasestate', 'unshelverebasestate')
761 raise error.InterventionRequired(
761 raise error.InterventionRequired(
762 _("unresolved conflicts (see 'hg resolve', then "
762 _("unresolved conflicts (see 'hg resolve', then "
763 "'hg unshelve --continue')"))
763 "'hg unshelve --continue')"))
764
764
765 # refresh ctx after rebase completes
765 # refresh ctx after rebase completes
766 shelvectx = repo['tip']
766 shelvectx = repo['tip']
767
767
768 if tmpwctx not in shelvectx.parents():
768 if tmpwctx not in shelvectx.parents():
769 # rebase was a no-op, so it produced no child commit
769 # rebase was a no-op, so it produced no child commit
770 shelvectx = tmpwctx
770 shelvectx = tmpwctx
771 return shelvectx
771 return shelvectx
772
772
773 def _forgetunknownfiles(repo, shelvectx, addedbefore):
773 def _forgetunknownfiles(repo, shelvectx, addedbefore):
774 # Forget any files that were unknown before the shelve, unknown before
774 # Forget any files that were unknown before the shelve, unknown before
775 # unshelve started, but are now added.
775 # unshelve started, but are now added.
776 shelveunknown = shelvectx.extra().get('shelve_unknown')
776 shelveunknown = shelvectx.extra().get('shelve_unknown')
777 if not shelveunknown:
777 if not shelveunknown:
778 return
778 return
779 shelveunknown = frozenset(shelveunknown.split('\0'))
779 shelveunknown = frozenset(shelveunknown.split('\0'))
780 addedafter = frozenset(repo.status().added)
780 addedafter = frozenset(repo.status().added)
781 toforget = (addedafter & shelveunknown) - addedbefore
781 toforget = (addedafter & shelveunknown) - addedbefore
782 repo[None].forget(toforget)
782 repo[None].forget(toforget)
783
783
784 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
784 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
785 _restoreactivebookmark(repo, activebookmark)
785 _restoreactivebookmark(repo, activebookmark)
786 # The transaction aborting will strip all the commits for us,
786 # The transaction aborting will strip all the commits for us,
787 # but it doesn't update the inmemory structures, so addchangegroup
787 # but it doesn't update the inmemory structures, so addchangegroup
788 # hooks still fire and try to operate on the missing commits.
788 # hooks still fire and try to operate on the missing commits.
789 # Clean up manually to prevent this.
789 # Clean up manually to prevent this.
790 repo.unfiltered().changelog.strip(oldtiprev, tr)
790 repo.unfiltered().changelog.strip(oldtiprev, tr)
791 _aborttransaction(repo)
791 _aborttransaction(repo)
792
792
793 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
793 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
794 """Check potential problems which may result from working
794 """Check potential problems which may result from working
795 copy having untracked changes."""
795 copy having untracked changes."""
796 wcdeleted = set(repo.status().deleted)
796 wcdeleted = set(repo.status().deleted)
797 shelvetouched = set(shelvectx.files())
797 shelvetouched = set(shelvectx.files())
798 intersection = wcdeleted.intersection(shelvetouched)
798 intersection = wcdeleted.intersection(shelvetouched)
799 if intersection:
799 if intersection:
800 m = _("shelved change touches missing files")
800 m = _("shelved change touches missing files")
801 hint = _("run hg status to see which files are missing")
801 hint = _("run hg status to see which files are missing")
802 raise error.Abort(m, hint=hint)
802 raise error.Abort(m, hint=hint)
803
803
804 @command('unshelve',
804 @command('unshelve',
805 [('a', 'abort', None,
805 [('a', 'abort', None,
806 _('abort an incomplete unshelve operation')),
806 _('abort an incomplete unshelve operation')),
807 ('c', 'continue', None,
807 ('c', 'continue', None,
808 _('continue an incomplete unshelve operation')),
808 _('continue an incomplete unshelve operation')),
809 ('k', 'keep', None,
809 ('k', 'keep', None,
810 _('keep shelve after unshelving')),
810 _('keep shelve after unshelving')),
811 ('n', 'name', '',
811 ('n', 'name', '',
812 _('restore shelved change with given name'), _('NAME')),
812 _('restore shelved change with given name'), _('NAME')),
813 ('t', 'tool', '', _('specify merge tool')),
813 ('t', 'tool', '', _('specify merge tool')),
814 ('', 'date', '',
814 ('', 'date', '',
815 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
815 _('set date for temporary commits (DEPRECATED)'), _('DATE'))],
816 _('hg unshelve [[-n] SHELVED]'))
816 _('hg unshelve [[-n] SHELVED]'))
817 def unshelve(ui, repo, *shelved, **opts):
817 def unshelve(ui, repo, *shelved, **opts):
818 """restore a shelved change to the working directory
818 """restore a shelved change to the working directory
819
819
820 This command accepts an optional name of a shelved change to
820 This command accepts an optional name of a shelved change to
821 restore. If none is given, the most recent shelved change is used.
821 restore. If none is given, the most recent shelved change is used.
822
822
823 If a shelved change is applied successfully, the bundle that
823 If a shelved change is applied successfully, the bundle that
824 contains the shelved changes is moved to a backup location
824 contains the shelved changes is moved to a backup location
825 (.hg/shelve-backup).
825 (.hg/shelve-backup).
826
826
827 Since you can restore a shelved change on top of an arbitrary
827 Since you can restore a shelved change on top of an arbitrary
828 commit, it is possible that unshelving will result in a conflict
828 commit, it is possible that unshelving will result in a conflict
829 between your changes and the commits you are unshelving onto. If
829 between your changes and the commits you are unshelving onto. If
830 this occurs, you must resolve the conflict, then use
830 this occurs, you must resolve the conflict, then use
831 ``--continue`` to complete the unshelve operation. (The bundle
831 ``--continue`` to complete the unshelve operation. (The bundle
832 will not be moved until you successfully complete the unshelve.)
832 will not be moved until you successfully complete the unshelve.)
833
833
834 (Alternatively, you can use ``--abort`` to abandon an unshelve
834 (Alternatively, you can use ``--abort`` to abandon an unshelve
835 that causes a conflict. This reverts the unshelved changes, and
835 that causes a conflict. This reverts the unshelved changes, and
836 leaves the bundle in place.)
836 leaves the bundle in place.)
837
837
838 If bare shelved change(when no files are specified, without interactive,
838 If bare shelved change(when no files are specified, without interactive,
839 include and exclude option) was done on newly created branch it would
839 include and exclude option) was done on newly created branch it would
840 restore branch information to the working directory.
840 restore branch information to the working directory.
841
841
842 After a successful unshelve, the shelved changes are stored in a
842 After a successful unshelve, the shelved changes are stored in a
843 backup directory. Only the N most recent backups are kept. N
843 backup directory. Only the N most recent backups are kept. N
844 defaults to 10 but can be overridden using the ``shelve.maxbackups``
844 defaults to 10 but can be overridden using the ``shelve.maxbackups``
845 configuration option.
845 configuration option.
846
846
847 .. container:: verbose
847 .. container:: verbose
848
848
849 Timestamp in seconds is used to decide order of backups. More
849 Timestamp in seconds is used to decide order of backups. More
850 than ``maxbackups`` backups are kept, if same timestamp
850 than ``maxbackups`` backups are kept, if same timestamp
851 prevents from deciding exact order of them, for safety.
851 prevents from deciding exact order of them, for safety.
852 """
852 """
853 with repo.wlock():
853 with repo.wlock():
854 return _dounshelve(ui, repo, *shelved, **opts)
854 return _dounshelve(ui, repo, *shelved, **opts)
855
855
856 def _dounshelve(ui, repo, *shelved, **opts):
856 def _dounshelve(ui, repo, *shelved, **opts):
857 opts = pycompat.byteskwargs(opts)
857 opts = pycompat.byteskwargs(opts)
858 abortf = opts.get('abort')
858 abortf = opts.get('abort')
859 continuef = opts.get('continue')
859 continuef = opts.get('continue')
860 if not abortf and not continuef:
860 if not abortf and not continuef:
861 cmdutil.checkunfinished(repo)
861 cmdutil.checkunfinished(repo)
862 shelved = list(shelved)
862 shelved = list(shelved)
863 if opts.get("name"):
863 if opts.get("name"):
864 shelved.append(opts["name"])
864 shelved.append(opts["name"])
865
865
866 if abortf or continuef:
866 if abortf or continuef:
867 if abortf and continuef:
867 if abortf and continuef:
868 raise error.Abort(_('cannot use both abort and continue'))
868 raise error.Abort(_('cannot use both abort and continue'))
869 if shelved:
869 if shelved:
870 raise error.Abort(_('cannot combine abort/continue with '
870 raise error.Abort(_('cannot combine abort/continue with '
871 'naming a shelved change'))
871 'naming a shelved change'))
872 if abortf and opts.get('tool', False):
872 if abortf and opts.get('tool', False):
873 ui.warn(_('tool option will be ignored\n'))
873 ui.warn(_('tool option will be ignored\n'))
874
874
875 try:
875 try:
876 state = shelvedstate.load(repo)
876 state = shelvedstate.load(repo)
877 if opts.get('keep') is None:
877 if opts.get('keep') is None:
878 opts['keep'] = state.keep
878 opts['keep'] = state.keep
879 except IOError as err:
879 except IOError as err:
880 if err.errno != errno.ENOENT:
880 if err.errno != errno.ENOENT:
881 raise
881 raise
882 cmdutil.wrongtooltocontinue(repo, _('unshelve'))
882 cmdutil.wrongtooltocontinue(repo, _('unshelve'))
883 except error.CorruptedState as err:
883 except error.CorruptedState as err:
884 ui.debug(str(err) + '\n')
884 ui.debug(str(err) + '\n')
885 if continuef:
885 if continuef:
886 msg = _('corrupted shelved state file')
886 msg = _('corrupted shelved state file')
887 hint = _('please run hg unshelve --abort to abort unshelve '
887 hint = _('please run hg unshelve --abort to abort unshelve '
888 'operation')
888 'operation')
889 raise error.Abort(msg, hint=hint)
889 raise error.Abort(msg, hint=hint)
890 elif abortf:
890 elif abortf:
891 msg = _('could not read shelved state file, your working copy '
891 msg = _('could not read shelved state file, your working copy '
892 'may be in an unexpected state\nplease update to some '
892 'may be in an unexpected state\nplease update to some '
893 'commit\n')
893 'commit\n')
894 ui.warn(msg)
894 ui.warn(msg)
895 shelvedstate.clear(repo)
895 shelvedstate.clear(repo)
896 return
896 return
897
897
898 if abortf:
898 if abortf:
899 return unshelveabort(ui, repo, state, opts)
899 return unshelveabort(ui, repo, state, opts)
900 elif continuef:
900 elif continuef:
901 return unshelvecontinue(ui, repo, state, opts)
901 return unshelvecontinue(ui, repo, state, opts)
902 elif len(shelved) > 1:
902 elif len(shelved) > 1:
903 raise error.Abort(_('can only unshelve one change at a time'))
903 raise error.Abort(_('can only unshelve one change at a time'))
904 elif not shelved:
904 elif not shelved:
905 shelved = listshelves(repo)
905 shelved = listshelves(repo)
906 if not shelved:
906 if not shelved:
907 raise error.Abort(_('no shelved changes to apply!'))
907 raise error.Abort(_('no shelved changes to apply!'))
908 basename = util.split(shelved[0][1])[1]
908 basename = util.split(shelved[0][1])[1]
909 ui.status(_("unshelving change '%s'\n") % basename)
909 ui.status(_("unshelving change '%s'\n") % basename)
910 else:
910 else:
911 basename = shelved[0]
911 basename = shelved[0]
912
912
913 if not shelvedfile(repo, basename, patchextension).exists():
913 if not shelvedfile(repo, basename, patchextension).exists():
914 raise error.Abort(_("shelved change '%s' not found") % basename)
914 raise error.Abort(_("shelved change '%s' not found") % basename)
915
915
916 lock = tr = None
916 lock = tr = None
917 try:
917 try:
918 lock = repo.lock()
918 lock = repo.lock()
919 tr = repo.transaction('unshelve', report=lambda x: None)
919 tr = repo.transaction('unshelve', report=lambda x: None)
920 oldtiprev = len(repo)
920 oldtiprev = len(repo)
921
921
922 pctx = repo['.']
922 pctx = repo['.']
923 tmpwctx = pctx
923 tmpwctx = pctx
924 # The goal is to have a commit structure like so:
924 # The goal is to have a commit structure like so:
925 # ...-> pctx -> tmpwctx -> shelvectx
925 # ...-> pctx -> tmpwctx -> shelvectx
926 # where tmpwctx is an optional commit with the user's pending changes
926 # where tmpwctx is an optional commit with the user's pending changes
927 # and shelvectx is the unshelved changes. Then we merge it all down
927 # and shelvectx is the unshelved changes. Then we merge it all down
928 # to the original pctx.
928 # to the original pctx.
929
929
930 activebookmark = _backupactivebookmark(repo)
930 activebookmark = _backupactivebookmark(repo)
931 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
931 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
932 with ui.configoverride(overrides, 'unshelve'):
932 with ui.configoverride(overrides, 'unshelve'):
933 tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
933 tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
934 tmpwctx)
934 tmpwctx)
935 repo, shelvectx = _unshelverestorecommit(ui, repo, basename)
935 repo, shelvectx = _unshelverestorecommit(ui, repo, basename)
936 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
936 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
937 branchtorestore = ''
937 branchtorestore = ''
938 if shelvectx.branch() != shelvectx.p1().branch():
938 if shelvectx.branch() != shelvectx.p1().branch():
939 branchtorestore = shelvectx.branch()
939 branchtorestore = shelvectx.branch()
940
940
941 shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
941 shelvectx = _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev,
942 basename, pctx, tmpwctx,
942 basename, pctx, tmpwctx,
943 shelvectx, branchtorestore,
943 shelvectx, branchtorestore,
944 activebookmark)
944 activebookmark)
945 mergefiles(ui, repo, pctx, shelvectx)
945 mergefiles(ui, repo, pctx, shelvectx)
946 restorebranch(ui, repo, branchtorestore)
946 restorebranch(ui, repo, branchtorestore)
947 _forgetunknownfiles(repo, shelvectx, addedbefore)
947 _forgetunknownfiles(repo, shelvectx, addedbefore)
948
948
949 shelvedstate.clear(repo)
949 shelvedstate.clear(repo)
950 _finishunshelve(repo, oldtiprev, tr, activebookmark)
950 _finishunshelve(repo, oldtiprev, tr, activebookmark)
951 unshelvecleanup(ui, repo, basename, opts)
951 unshelvecleanup(ui, repo, basename, opts)
952 finally:
952 finally:
953 if tr:
953 if tr:
954 tr.release()
954 tr.release()
955 lockmod.release(lock)
955 lockmod.release(lock)
956
956
957 @command('shelve',
957 @command('shelve',
958 [('A', 'addremove', None,
958 [('A', 'addremove', None,
959 _('mark new/missing files as added/removed before shelving')),
959 _('mark new/missing files as added/removed before shelving')),
960 ('u', 'unknown', None,
960 ('u', 'unknown', None,
961 _('store unknown files in the shelve')),
961 _('store unknown files in the shelve')),
962 ('', 'cleanup', None,
962 ('', 'cleanup', None,
963 _('delete all shelved changes')),
963 _('delete all shelved changes')),
964 ('', 'date', '',
964 ('', 'date', '',
965 _('shelve with the specified commit date'), _('DATE')),
965 _('shelve with the specified commit date'), _('DATE')),
966 ('d', 'delete', None,
966 ('d', 'delete', None,
967 _('delete the named shelved change(s)')),
967 _('delete the named shelved change(s)')),
968 ('e', 'edit', False,
968 ('e', 'edit', False,
969 _('invoke editor on commit messages')),
969 _('invoke editor on commit messages')),
970 ('l', 'list', None,
970 ('l', 'list', None,
971 _('list current shelves')),
971 _('list current shelves')),
972 ('m', 'message', '',
972 ('m', 'message', '',
973 _('use text as shelve message'), _('TEXT')),
973 _('use text as shelve message'), _('TEXT')),
974 ('n', 'name', '',
974 ('n', 'name', '',
975 _('use the given name for the shelved commit'), _('NAME')),
975 _('use the given name for the shelved commit'), _('NAME')),
976 ('p', 'patch', None,
976 ('p', 'patch', None,
977 _('show patch')),
977 _('show patch')),
978 ('i', 'interactive', None,
978 ('i', 'interactive', None,
979 _('interactive mode, only works while creating a shelve')),
979 _('interactive mode, only works while creating a shelve')),
980 ('', 'stat', None,
980 ('', 'stat', None,
981 _('output diffstat-style summary of changes'))] + cmdutil.walkopts,
981 _('output diffstat-style summary of changes'))] + cmdutil.walkopts,
982 _('hg shelve [OPTION]... [FILE]...'))
982 _('hg shelve [OPTION]... [FILE]...'))
983 def shelvecmd(ui, repo, *pats, **opts):
983 def shelvecmd(ui, repo, *pats, **opts):
984 '''save and set aside changes from the working directory
984 '''save and set aside changes from the working directory
985
985
986 Shelving takes files that "hg status" reports as not clean, saves
986 Shelving takes files that "hg status" reports as not clean, saves
987 the modifications to a bundle (a shelved change), and reverts the
987 the modifications to a bundle (a shelved change), and reverts the
988 files so that their state in the working directory becomes clean.
988 files so that their state in the working directory becomes clean.
989
989
990 To restore these changes to the working directory, using "hg
990 To restore these changes to the working directory, using "hg
991 unshelve"; this will work even if you switch to a different
991 unshelve"; this will work even if you switch to a different
992 commit.
992 commit.
993
993
994 When no files are specified, "hg shelve" saves all not-clean
994 When no files are specified, "hg shelve" saves all not-clean
995 files. If specific files or directories are named, only changes to
995 files. If specific files or directories are named, only changes to
996 those files are shelved.
996 those files are shelved.
997
997
998 In bare shelve (when no files are specified, without interactive,
998 In bare shelve (when no files are specified, without interactive,
999 include and exclude option), shelving remembers information if the
999 include and exclude option), shelving remembers information if the
1000 working directory was on newly created branch, in other words working
1000 working directory was on newly created branch, in other words working
1001 directory was on different branch than its first parent. In this
1001 directory was on different branch than its first parent. In this
1002 situation unshelving restores branch information to the working directory.
1002 situation unshelving restores branch information to the working directory.
1003
1003
1004 Each shelved change has a name that makes it easier to find later.
1004 Each shelved change has a name that makes it easier to find later.
1005 The name of a shelved change defaults to being based on the active
1005 The name of a shelved change defaults to being based on the active
1006 bookmark, or if there is no active bookmark, the current named
1006 bookmark, or if there is no active bookmark, the current named
1007 branch. To specify a different name, use ``--name``.
1007 branch. To specify a different name, use ``--name``.
1008
1008
1009 To see a list of existing shelved changes, use the ``--list``
1009 To see a list of existing shelved changes, use the ``--list``
1010 option. For each shelved change, this will print its name, age,
1010 option. For each shelved change, this will print its name, age,
1011 and description; use ``--patch`` or ``--stat`` for more details.
1011 and description; use ``--patch`` or ``--stat`` for more details.
1012
1012
1013 To delete specific shelved changes, use ``--delete``. To delete
1013 To delete specific shelved changes, use ``--delete``. To delete
1014 all shelved changes, use ``--cleanup``.
1014 all shelved changes, use ``--cleanup``.
1015 '''
1015 '''
1016 opts = pycompat.byteskwargs(opts)
1016 opts = pycompat.byteskwargs(opts)
1017 allowables = [
1017 allowables = [
1018 ('addremove', {'create'}), # 'create' is pseudo action
1018 ('addremove', {'create'}), # 'create' is pseudo action
1019 ('unknown', {'create'}),
1019 ('unknown', {'create'}),
1020 ('cleanup', {'cleanup'}),
1020 ('cleanup', {'cleanup'}),
1021 # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
1021 # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
1022 ('delete', {'delete'}),
1022 ('delete', {'delete'}),
1023 ('edit', {'create'}),
1023 ('edit', {'create'}),
1024 ('list', {'list'}),
1024 ('list', {'list'}),
1025 ('message', {'create'}),
1025 ('message', {'create'}),
1026 ('name', {'create'}),
1026 ('name', {'create'}),
1027 ('patch', {'patch', 'list'}),
1027 ('patch', {'patch', 'list'}),
1028 ('stat', {'stat', 'list'}),
1028 ('stat', {'stat', 'list'}),
1029 ]
1029 ]
1030 def checkopt(opt):
1030 def checkopt(opt):
1031 if opts.get(opt):
1031 if opts.get(opt):
1032 for i, allowable in allowables:
1032 for i, allowable in allowables:
1033 if opts[i] and opt not in allowable:
1033 if opts[i] and opt not in allowable:
1034 raise error.Abort(_("options '--%s' and '--%s' may not be "
1034 raise error.Abort(_("options '--%s' and '--%s' may not be "
1035 "used together") % (opt, i))
1035 "used together") % (opt, i))
1036 return True
1036 return True
1037 if checkopt('cleanup'):
1037 if checkopt('cleanup'):
1038 if pats:
1038 if pats:
1039 raise error.Abort(_("cannot specify names when using '--cleanup'"))
1039 raise error.Abort(_("cannot specify names when using '--cleanup'"))
1040 return cleanupcmd(ui, repo)
1040 return cleanupcmd(ui, repo)
1041 elif checkopt('delete'):
1041 elif checkopt('delete'):
1042 return deletecmd(ui, repo, pats)
1042 return deletecmd(ui, repo, pats)
1043 elif checkopt('list'):
1043 elif checkopt('list'):
1044 return listcmd(ui, repo, pats, opts)
1044 return listcmd(ui, repo, pats, opts)
1045 elif checkopt('patch'):
1045 elif checkopt('patch'):
1046 return patchcmds(ui, repo, pats, opts, subcommand='patch')
1046 return patchcmds(ui, repo, pats, opts, subcommand='patch')
1047 elif checkopt('stat'):
1047 elif checkopt('stat'):
1048 return patchcmds(ui, repo, pats, opts, subcommand='stat')
1048 return patchcmds(ui, repo, pats, opts, subcommand='stat')
1049 else:
1049 else:
1050 return createcmd(ui, repo, pats, opts)
1050 return createcmd(ui, repo, pats, opts)
1051
1051
1052 def extsetup(ui):
1052 def extsetup(ui):
1053 cmdutil.unfinishedstates.append(
1053 cmdutil.unfinishedstates.append(
1054 [shelvedstate._filename, False, False,
1054 [shelvedstate._filename, False, False,
1055 _('unshelve already in progress'),
1055 _('unshelve already in progress'),
1056 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
1056 _("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
1057 cmdutil.afterresolvedstates.append(
1057 cmdutil.afterresolvedstates.append(
1058 [shelvedstate._filename, _('hg unshelve --continue')])
1058 [shelvedstate._filename, _('hg unshelve --continue')])
@@ -1,490 +1,491
1 # verify.py - repository integrity checking for Mercurial
1 # verify.py - repository integrity checking for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11
11
12 from .i18n import _
12 from .i18n import _
13 from .node import (
13 from .node import (
14 nullid,
14 nullid,
15 short,
15 short,
16 )
16 )
17
17
18 from . import (
18 from . import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlog,
21 revlog,
22 scmutil,
22 scmutil,
23 util,
23 util,
24 )
24 )
25
25
26 def verify(repo):
26 def verify(repo):
27 with repo.lock():
27 with repo.lock():
28 return verifier(repo).verify()
28 return verifier(repo).verify()
29
29
30 def _normpath(f):
30 def _normpath(f):
31 # under hg < 2.4, convert didn't sanitize paths properly, so a
31 # under hg < 2.4, convert didn't sanitize paths properly, so a
32 # converted repo may contain repeated slashes
32 # converted repo may contain repeated slashes
33 while '//' in f:
33 while '//' in f:
34 f = f.replace('//', '/')
34 f = f.replace('//', '/')
35 return f
35 return f
36
36
37 class verifier(object):
37 class verifier(object):
38 # The match argument is always None in hg core, but e.g. the narrowhg
38 # The match argument is always None in hg core, but e.g. the narrowhg
39 # extension will pass in a matcher here.
39 # extension will pass in a matcher here.
40 def __init__(self, repo, match=None):
40 def __init__(self, repo, match=None):
41 self.repo = repo.unfiltered()
41 self.repo = repo.unfiltered()
42 self.ui = repo.ui
42 self.ui = repo.ui
43 self.match = match or scmutil.matchall(repo)
43 self.match = match or scmutil.matchall(repo)
44 self.badrevs = set()
44 self.badrevs = set()
45 self.errors = 0
45 self.errors = 0
46 self.warnings = 0
46 self.warnings = 0
47 self.havecl = len(repo.changelog) > 0
47 self.havecl = len(repo.changelog) > 0
48 self.havemf = len(repo.manifestlog._revlog) > 0
48 self.havemf = len(repo.manifestlog._revlog) > 0
49 self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
49 self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
50 self.lrugetctx = util.lrucachefunc(repo.changectx)
50 self.lrugetctx = util.lrucachefunc(repo.changectx)
51 self.refersmf = False
51 self.refersmf = False
52 self.fncachewarned = False
52 self.fncachewarned = False
53 # developer config: verify.skipflags
53 # developer config: verify.skipflags
54 self.skipflags = repo.ui.configint('verify', 'skipflags')
54 self.skipflags = repo.ui.configint('verify', 'skipflags')
55
55
56 def warn(self, msg):
56 def warn(self, msg):
57 self.ui.warn(msg + "\n")
57 self.ui.warn(msg + "\n")
58 self.warnings += 1
58 self.warnings += 1
59
59
60 def err(self, linkrev, msg, filename=None):
60 def err(self, linkrev, msg, filename=None):
61 if linkrev is not None:
61 if linkrev is not None:
62 self.badrevs.add(linkrev)
62 self.badrevs.add(linkrev)
63 linkrev = "%d" % linkrev
63 else:
64 else:
64 linkrev = '?'
65 linkrev = '?'
65 msg = "%s: %s" % (linkrev, msg)
66 msg = "%s: %s" % (linkrev, msg)
66 if filename:
67 if filename:
67 msg = "%s@%s" % (filename, msg)
68 msg = "%s@%s" % (filename, msg)
68 self.ui.warn(" " + msg + "\n")
69 self.ui.warn(" " + msg + "\n")
69 self.errors += 1
70 self.errors += 1
70
71
71 def exc(self, linkrev, msg, inst, filename=None):
72 def exc(self, linkrev, msg, inst, filename=None):
72 if not str(inst):
73 if not str(inst):
73 inst = repr(inst)
74 inst = repr(inst)
74 self.err(linkrev, "%s: %s" % (msg, inst), filename)
75 self.err(linkrev, "%s: %s" % (msg, inst), filename)
75
76
76 def checklog(self, obj, name, linkrev):
77 def checklog(self, obj, name, linkrev):
77 if not len(obj) and (self.havecl or self.havemf):
78 if not len(obj) and (self.havecl or self.havemf):
78 self.err(linkrev, _("empty or missing %s") % name)
79 self.err(linkrev, _("empty or missing %s") % name)
79 return
80 return
80
81
81 d = obj.checksize()
82 d = obj.checksize()
82 if d[0]:
83 if d[0]:
83 self.err(None, _("data length off by %d bytes") % d[0], name)
84 self.err(None, _("data length off by %d bytes") % d[0], name)
84 if d[1]:
85 if d[1]:
85 self.err(None, _("index contains %d extra bytes") % d[1], name)
86 self.err(None, _("index contains %d extra bytes") % d[1], name)
86
87
87 if obj.version != revlog.REVLOGV0:
88 if obj.version != revlog.REVLOGV0:
88 if not self.revlogv1:
89 if not self.revlogv1:
89 self.warn(_("warning: `%s' uses revlog format 1") % name)
90 self.warn(_("warning: `%s' uses revlog format 1") % name)
90 elif self.revlogv1:
91 elif self.revlogv1:
91 self.warn(_("warning: `%s' uses revlog format 0") % name)
92 self.warn(_("warning: `%s' uses revlog format 0") % name)
92
93
93 def checkentry(self, obj, i, node, seen, linkrevs, f):
94 def checkentry(self, obj, i, node, seen, linkrevs, f):
94 lr = obj.linkrev(obj.rev(node))
95 lr = obj.linkrev(obj.rev(node))
95 if lr < 0 or (self.havecl and lr not in linkrevs):
96 if lr < 0 or (self.havecl and lr not in linkrevs):
96 if lr < 0 or lr >= len(self.repo.changelog):
97 if lr < 0 or lr >= len(self.repo.changelog):
97 msg = _("rev %d points to nonexistent changeset %d")
98 msg = _("rev %d points to nonexistent changeset %d")
98 else:
99 else:
99 msg = _("rev %d points to unexpected changeset %d")
100 msg = _("rev %d points to unexpected changeset %d")
100 self.err(None, msg % (i, lr), f)
101 self.err(None, msg % (i, lr), f)
101 if linkrevs:
102 if linkrevs:
102 if f and len(linkrevs) > 1:
103 if f and len(linkrevs) > 1:
103 try:
104 try:
104 # attempt to filter down to real linkrevs
105 # attempt to filter down to real linkrevs
105 linkrevs = [l for l in linkrevs
106 linkrevs = [l for l in linkrevs
106 if self.lrugetctx(l)[f].filenode() == node]
107 if self.lrugetctx(l)[f].filenode() == node]
107 except Exception:
108 except Exception:
108 pass
109 pass
109 self.warn(_(" (expected %s)") % " ".join
110 self.warn(_(" (expected %s)") % " ".join
110 (map(pycompat.bytestr, linkrevs)))
111 (map(pycompat.bytestr, linkrevs)))
111 lr = None # can't be trusted
112 lr = None # can't be trusted
112
113
113 try:
114 try:
114 p1, p2 = obj.parents(node)
115 p1, p2 = obj.parents(node)
115 if p1 not in seen and p1 != nullid:
116 if p1 not in seen and p1 != nullid:
116 self.err(lr, _("unknown parent 1 %s of %s") %
117 self.err(lr, _("unknown parent 1 %s of %s") %
117 (short(p1), short(node)), f)
118 (short(p1), short(node)), f)
118 if p2 not in seen and p2 != nullid:
119 if p2 not in seen and p2 != nullid:
119 self.err(lr, _("unknown parent 2 %s of %s") %
120 self.err(lr, _("unknown parent 2 %s of %s") %
120 (short(p2), short(node)), f)
121 (short(p2), short(node)), f)
121 except Exception as inst:
122 except Exception as inst:
122 self.exc(lr, _("checking parents of %s") % short(node), inst, f)
123 self.exc(lr, _("checking parents of %s") % short(node), inst, f)
123
124
124 if node in seen:
125 if node in seen:
125 self.err(lr, _("duplicate revision %d (%d)") % (i, seen[node]), f)
126 self.err(lr, _("duplicate revision %d (%d)") % (i, seen[node]), f)
126 seen[node] = i
127 seen[node] = i
127 return lr
128 return lr
128
129
129 def verify(self):
130 def verify(self):
130 repo = self.repo
131 repo = self.repo
131
132
132 ui = repo.ui
133 ui = repo.ui
133
134
134 if not repo.url().startswith('file:'):
135 if not repo.url().startswith('file:'):
135 raise error.Abort(_("cannot verify bundle or remote repos"))
136 raise error.Abort(_("cannot verify bundle or remote repos"))
136
137
137 if os.path.exists(repo.sjoin("journal")):
138 if os.path.exists(repo.sjoin("journal")):
138 ui.warn(_("abandoned transaction found - run hg recover\n"))
139 ui.warn(_("abandoned transaction found - run hg recover\n"))
139
140
140 if ui.verbose or not self.revlogv1:
141 if ui.verbose or not self.revlogv1:
141 ui.status(_("repository uses revlog format %d\n") %
142 ui.status(_("repository uses revlog format %d\n") %
142 (self.revlogv1 and 1 or 0))
143 (self.revlogv1 and 1 or 0))
143
144
144 mflinkrevs, filelinkrevs = self._verifychangelog()
145 mflinkrevs, filelinkrevs = self._verifychangelog()
145
146
146 filenodes = self._verifymanifest(mflinkrevs)
147 filenodes = self._verifymanifest(mflinkrevs)
147 del mflinkrevs
148 del mflinkrevs
148
149
149 self._crosscheckfiles(filelinkrevs, filenodes)
150 self._crosscheckfiles(filelinkrevs, filenodes)
150
151
151 totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs)
152 totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs)
152
153
153 ui.status(_("%d files, %d changesets, %d total revisions\n") %
154 ui.status(_("%d files, %d changesets, %d total revisions\n") %
154 (totalfiles, len(repo.changelog), filerevisions))
155 (totalfiles, len(repo.changelog), filerevisions))
155 if self.warnings:
156 if self.warnings:
156 ui.warn(_("%d warnings encountered!\n") % self.warnings)
157 ui.warn(_("%d warnings encountered!\n") % self.warnings)
157 if self.fncachewarned:
158 if self.fncachewarned:
158 ui.warn(_('hint: run "hg debugrebuildfncache" to recover from '
159 ui.warn(_('hint: run "hg debugrebuildfncache" to recover from '
159 'corrupt fncache\n'))
160 'corrupt fncache\n'))
160 if self.errors:
161 if self.errors:
161 ui.warn(_("%d integrity errors encountered!\n") % self.errors)
162 ui.warn(_("%d integrity errors encountered!\n") % self.errors)
162 if self.badrevs:
163 if self.badrevs:
163 ui.warn(_("(first damaged changeset appears to be %d)\n")
164 ui.warn(_("(first damaged changeset appears to be %d)\n")
164 % min(self.badrevs))
165 % min(self.badrevs))
165 return 1
166 return 1
166
167
167 def _verifychangelog(self):
168 def _verifychangelog(self):
168 ui = self.ui
169 ui = self.ui
169 repo = self.repo
170 repo = self.repo
170 match = self.match
171 match = self.match
171 cl = repo.changelog
172 cl = repo.changelog
172
173
173 ui.status(_("checking changesets\n"))
174 ui.status(_("checking changesets\n"))
174 mflinkrevs = {}
175 mflinkrevs = {}
175 filelinkrevs = {}
176 filelinkrevs = {}
176 seen = {}
177 seen = {}
177 self.checklog(cl, "changelog", 0)
178 self.checklog(cl, "changelog", 0)
178 total = len(repo)
179 total = len(repo)
179 for i in repo:
180 for i in repo:
180 ui.progress(_('checking'), i, total=total, unit=_('changesets'))
181 ui.progress(_('checking'), i, total=total, unit=_('changesets'))
181 n = cl.node(i)
182 n = cl.node(i)
182 self.checkentry(cl, i, n, seen, [i], "changelog")
183 self.checkentry(cl, i, n, seen, [i], "changelog")
183
184
184 try:
185 try:
185 changes = cl.read(n)
186 changes = cl.read(n)
186 if changes[0] != nullid:
187 if changes[0] != nullid:
187 mflinkrevs.setdefault(changes[0], []).append(i)
188 mflinkrevs.setdefault(changes[0], []).append(i)
188 self.refersmf = True
189 self.refersmf = True
189 for f in changes[3]:
190 for f in changes[3]:
190 if match(f):
191 if match(f):
191 filelinkrevs.setdefault(_normpath(f), []).append(i)
192 filelinkrevs.setdefault(_normpath(f), []).append(i)
192 except Exception as inst:
193 except Exception as inst:
193 self.refersmf = True
194 self.refersmf = True
194 self.exc(i, _("unpacking changeset %s") % short(n), inst)
195 self.exc(i, _("unpacking changeset %s") % short(n), inst)
195 ui.progress(_('checking'), None)
196 ui.progress(_('checking'), None)
196 return mflinkrevs, filelinkrevs
197 return mflinkrevs, filelinkrevs
197
198
198 def _verifymanifest(self, mflinkrevs, dir="", storefiles=None,
199 def _verifymanifest(self, mflinkrevs, dir="", storefiles=None,
199 progress=None):
200 progress=None):
200 repo = self.repo
201 repo = self.repo
201 ui = self.ui
202 ui = self.ui
202 match = self.match
203 match = self.match
203 mfl = self.repo.manifestlog
204 mfl = self.repo.manifestlog
204 mf = mfl._revlog.dirlog(dir)
205 mf = mfl._revlog.dirlog(dir)
205
206
206 if not dir:
207 if not dir:
207 self.ui.status(_("checking manifests\n"))
208 self.ui.status(_("checking manifests\n"))
208
209
209 filenodes = {}
210 filenodes = {}
210 subdirnodes = {}
211 subdirnodes = {}
211 seen = {}
212 seen = {}
212 label = "manifest"
213 label = "manifest"
213 if dir:
214 if dir:
214 label = dir
215 label = dir
215 revlogfiles = mf.files()
216 revlogfiles = mf.files()
216 storefiles.difference_update(revlogfiles)
217 storefiles.difference_update(revlogfiles)
217 if progress: # should be true since we're in a subdirectory
218 if progress: # should be true since we're in a subdirectory
218 progress()
219 progress()
219 if self.refersmf:
220 if self.refersmf:
220 # Do not check manifest if there are only changelog entries with
221 # Do not check manifest if there are only changelog entries with
221 # null manifests.
222 # null manifests.
222 self.checklog(mf, label, 0)
223 self.checklog(mf, label, 0)
223 total = len(mf)
224 total = len(mf)
224 for i in mf:
225 for i in mf:
225 if not dir:
226 if not dir:
226 ui.progress(_('checking'), i, total=total, unit=_('manifests'))
227 ui.progress(_('checking'), i, total=total, unit=_('manifests'))
227 n = mf.node(i)
228 n = mf.node(i)
228 lr = self.checkentry(mf, i, n, seen, mflinkrevs.get(n, []), label)
229 lr = self.checkentry(mf, i, n, seen, mflinkrevs.get(n, []), label)
229 if n in mflinkrevs:
230 if n in mflinkrevs:
230 del mflinkrevs[n]
231 del mflinkrevs[n]
231 elif dir:
232 elif dir:
232 self.err(lr, _("%s not in parent-directory manifest") %
233 self.err(lr, _("%s not in parent-directory manifest") %
233 short(n), label)
234 short(n), label)
234 else:
235 else:
235 self.err(lr, _("%s not in changesets") % short(n), label)
236 self.err(lr, _("%s not in changesets") % short(n), label)
236
237
237 try:
238 try:
238 mfdelta = mfl.get(dir, n).readdelta(shallow=True)
239 mfdelta = mfl.get(dir, n).readdelta(shallow=True)
239 for f, fn, fl in mfdelta.iterentries():
240 for f, fn, fl in mfdelta.iterentries():
240 if not f:
241 if not f:
241 self.err(lr, _("entry without name in manifest"))
242 self.err(lr, _("entry without name in manifest"))
242 elif f == "/dev/null": # ignore this in very old repos
243 elif f == "/dev/null": # ignore this in very old repos
243 continue
244 continue
244 fullpath = dir + _normpath(f)
245 fullpath = dir + _normpath(f)
245 if fl == 't':
246 if fl == 't':
246 if not match.visitdir(fullpath):
247 if not match.visitdir(fullpath):
247 continue
248 continue
248 subdirnodes.setdefault(fullpath + '/', {}).setdefault(
249 subdirnodes.setdefault(fullpath + '/', {}).setdefault(
249 fn, []).append(lr)
250 fn, []).append(lr)
250 else:
251 else:
251 if not match(fullpath):
252 if not match(fullpath):
252 continue
253 continue
253 filenodes.setdefault(fullpath, {}).setdefault(fn, lr)
254 filenodes.setdefault(fullpath, {}).setdefault(fn, lr)
254 except Exception as inst:
255 except Exception as inst:
255 self.exc(lr, _("reading delta %s") % short(n), inst, label)
256 self.exc(lr, _("reading delta %s") % short(n), inst, label)
256 if not dir:
257 if not dir:
257 ui.progress(_('checking'), None)
258 ui.progress(_('checking'), None)
258
259
259 if self.havemf:
260 if self.havemf:
260 for c, m in sorted([(c, m) for m in mflinkrevs
261 for c, m in sorted([(c, m) for m in mflinkrevs
261 for c in mflinkrevs[m]]):
262 for c in mflinkrevs[m]]):
262 if dir:
263 if dir:
263 self.err(c, _("parent-directory manifest refers to unknown "
264 self.err(c, _("parent-directory manifest refers to unknown "
264 "revision %s") % short(m), label)
265 "revision %s") % short(m), label)
265 else:
266 else:
266 self.err(c, _("changeset refers to unknown revision %s") %
267 self.err(c, _("changeset refers to unknown revision %s") %
267 short(m), label)
268 short(m), label)
268
269
269 if not dir and subdirnodes:
270 if not dir and subdirnodes:
270 self.ui.status(_("checking directory manifests\n"))
271 self.ui.status(_("checking directory manifests\n"))
271 storefiles = set()
272 storefiles = set()
272 subdirs = set()
273 subdirs = set()
273 revlogv1 = self.revlogv1
274 revlogv1 = self.revlogv1
274 for f, f2, size in repo.store.datafiles():
275 for f, f2, size in repo.store.datafiles():
275 if not f:
276 if not f:
276 self.err(None, _("cannot decode filename '%s'") % f2)
277 self.err(None, _("cannot decode filename '%s'") % f2)
277 elif (size > 0 or not revlogv1) and f.startswith('meta/'):
278 elif (size > 0 or not revlogv1) and f.startswith('meta/'):
278 storefiles.add(_normpath(f))
279 storefiles.add(_normpath(f))
279 subdirs.add(os.path.dirname(f))
280 subdirs.add(os.path.dirname(f))
280 subdircount = len(subdirs)
281 subdircount = len(subdirs)
281 currentsubdir = [0]
282 currentsubdir = [0]
282 def progress():
283 def progress():
283 currentsubdir[0] += 1
284 currentsubdir[0] += 1
284 ui.progress(_('checking'), currentsubdir[0], total=subdircount,
285 ui.progress(_('checking'), currentsubdir[0], total=subdircount,
285 unit=_('manifests'))
286 unit=_('manifests'))
286
287
287 for subdir, linkrevs in subdirnodes.iteritems():
288 for subdir, linkrevs in subdirnodes.iteritems():
288 subdirfilenodes = self._verifymanifest(linkrevs, subdir, storefiles,
289 subdirfilenodes = self._verifymanifest(linkrevs, subdir, storefiles,
289 progress)
290 progress)
290 for f, onefilenodes in subdirfilenodes.iteritems():
291 for f, onefilenodes in subdirfilenodes.iteritems():
291 filenodes.setdefault(f, {}).update(onefilenodes)
292 filenodes.setdefault(f, {}).update(onefilenodes)
292
293
293 if not dir and subdirnodes:
294 if not dir and subdirnodes:
294 ui.progress(_('checking'), None)
295 ui.progress(_('checking'), None)
295 for f in sorted(storefiles):
296 for f in sorted(storefiles):
296 self.warn(_("warning: orphan revlog '%s'") % f)
297 self.warn(_("warning: orphan revlog '%s'") % f)
297
298
298 return filenodes
299 return filenodes
299
300
300 def _crosscheckfiles(self, filelinkrevs, filenodes):
301 def _crosscheckfiles(self, filelinkrevs, filenodes):
301 repo = self.repo
302 repo = self.repo
302 ui = self.ui
303 ui = self.ui
303 ui.status(_("crosschecking files in changesets and manifests\n"))
304 ui.status(_("crosschecking files in changesets and manifests\n"))
304
305
305 total = len(filelinkrevs) + len(filenodes)
306 total = len(filelinkrevs) + len(filenodes)
306 count = 0
307 count = 0
307 if self.havemf:
308 if self.havemf:
308 for f in sorted(filelinkrevs):
309 for f in sorted(filelinkrevs):
309 count += 1
310 count += 1
310 ui.progress(_('crosschecking'), count, total=total)
311 ui.progress(_('crosschecking'), count, total=total)
311 if f not in filenodes:
312 if f not in filenodes:
312 lr = filelinkrevs[f][0]
313 lr = filelinkrevs[f][0]
313 self.err(lr, _("in changeset but not in manifest"), f)
314 self.err(lr, _("in changeset but not in manifest"), f)
314
315
315 if self.havecl:
316 if self.havecl:
316 for f in sorted(filenodes):
317 for f in sorted(filenodes):
317 count += 1
318 count += 1
318 ui.progress(_('crosschecking'), count, total=total)
319 ui.progress(_('crosschecking'), count, total=total)
319 if f not in filelinkrevs:
320 if f not in filelinkrevs:
320 try:
321 try:
321 fl = repo.file(f)
322 fl = repo.file(f)
322 lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
323 lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
323 except Exception:
324 except Exception:
324 lr = None
325 lr = None
325 self.err(lr, _("in manifest but not in changeset"), f)
326 self.err(lr, _("in manifest but not in changeset"), f)
326
327
327 ui.progress(_('crosschecking'), None)
328 ui.progress(_('crosschecking'), None)
328
329
329 def _verifyfiles(self, filenodes, filelinkrevs):
330 def _verifyfiles(self, filenodes, filelinkrevs):
330 repo = self.repo
331 repo = self.repo
331 ui = self.ui
332 ui = self.ui
332 lrugetctx = self.lrugetctx
333 lrugetctx = self.lrugetctx
333 revlogv1 = self.revlogv1
334 revlogv1 = self.revlogv1
334 havemf = self.havemf
335 havemf = self.havemf
335 ui.status(_("checking files\n"))
336 ui.status(_("checking files\n"))
336
337
337 storefiles = set()
338 storefiles = set()
338 for f, f2, size in repo.store.datafiles():
339 for f, f2, size in repo.store.datafiles():
339 if not f:
340 if not f:
340 self.err(None, _("cannot decode filename '%s'") % f2)
341 self.err(None, _("cannot decode filename '%s'") % f2)
341 elif (size > 0 or not revlogv1) and f.startswith('data/'):
342 elif (size > 0 or not revlogv1) and f.startswith('data/'):
342 storefiles.add(_normpath(f))
343 storefiles.add(_normpath(f))
343
344
344 files = sorted(set(filenodes) | set(filelinkrevs))
345 files = sorted(set(filenodes) | set(filelinkrevs))
345 total = len(files)
346 total = len(files)
346 revisions = 0
347 revisions = 0
347 for i, f in enumerate(files):
348 for i, f in enumerate(files):
348 ui.progress(_('checking'), i, item=f, total=total, unit=_('files'))
349 ui.progress(_('checking'), i, item=f, total=total, unit=_('files'))
349 try:
350 try:
350 linkrevs = filelinkrevs[f]
351 linkrevs = filelinkrevs[f]
351 except KeyError:
352 except KeyError:
352 # in manifest but not in changelog
353 # in manifest but not in changelog
353 linkrevs = []
354 linkrevs = []
354
355
355 if linkrevs:
356 if linkrevs:
356 lr = linkrevs[0]
357 lr = linkrevs[0]
357 else:
358 else:
358 lr = None
359 lr = None
359
360
360 try:
361 try:
361 fl = repo.file(f)
362 fl = repo.file(f)
362 except error.RevlogError as e:
363 except error.RevlogError as e:
363 self.err(lr, _("broken revlog! (%s)") % e, f)
364 self.err(lr, _("broken revlog! (%s)") % e, f)
364 continue
365 continue
365
366
366 for ff in fl.files():
367 for ff in fl.files():
367 try:
368 try:
368 storefiles.remove(ff)
369 storefiles.remove(ff)
369 except KeyError:
370 except KeyError:
370 self.warn(_(" warning: revlog '%s' not in fncache!") % ff)
371 self.warn(_(" warning: revlog '%s' not in fncache!") % ff)
371 self.fncachewarned = True
372 self.fncachewarned = True
372
373
373 self.checklog(fl, f, lr)
374 self.checklog(fl, f, lr)
374 seen = {}
375 seen = {}
375 rp = None
376 rp = None
376 for i in fl:
377 for i in fl:
377 revisions += 1
378 revisions += 1
378 n = fl.node(i)
379 n = fl.node(i)
379 lr = self.checkentry(fl, i, n, seen, linkrevs, f)
380 lr = self.checkentry(fl, i, n, seen, linkrevs, f)
380 if f in filenodes:
381 if f in filenodes:
381 if havemf and n not in filenodes[f]:
382 if havemf and n not in filenodes[f]:
382 self.err(lr, _("%s not in manifests") % (short(n)), f)
383 self.err(lr, _("%s not in manifests") % (short(n)), f)
383 else:
384 else:
384 del filenodes[f][n]
385 del filenodes[f][n]
385
386
386 # Verify contents. 4 cases to care about:
387 # Verify contents. 4 cases to care about:
387 #
388 #
388 # common: the most common case
389 # common: the most common case
389 # rename: with a rename
390 # rename: with a rename
390 # meta: file content starts with b'\1\n', the metadata
391 # meta: file content starts with b'\1\n', the metadata
391 # header defined in filelog.py, but without a rename
392 # header defined in filelog.py, but without a rename
392 # ext: content stored externally
393 # ext: content stored externally
393 #
394 #
394 # More formally, their differences are shown below:
395 # More formally, their differences are shown below:
395 #
396 #
396 # | common | rename | meta | ext
397 # | common | rename | meta | ext
397 # -------------------------------------------------------
398 # -------------------------------------------------------
398 # flags() | 0 | 0 | 0 | not 0
399 # flags() | 0 | 0 | 0 | not 0
399 # renamed() | False | True | False | ?
400 # renamed() | False | True | False | ?
400 # rawtext[0:2]=='\1\n'| False | True | True | ?
401 # rawtext[0:2]=='\1\n'| False | True | True | ?
401 #
402 #
402 # "rawtext" means the raw text stored in revlog data, which
403 # "rawtext" means the raw text stored in revlog data, which
403 # could be retrieved by "revision(rev, raw=True)". "text"
404 # could be retrieved by "revision(rev, raw=True)". "text"
404 # mentioned below is "revision(rev, raw=False)".
405 # mentioned below is "revision(rev, raw=False)".
405 #
406 #
406 # There are 3 different lengths stored physically:
407 # There are 3 different lengths stored physically:
407 # 1. L1: rawsize, stored in revlog index
408 # 1. L1: rawsize, stored in revlog index
408 # 2. L2: len(rawtext), stored in revlog data
409 # 2. L2: len(rawtext), stored in revlog data
409 # 3. L3: len(text), stored in revlog data if flags==0, or
410 # 3. L3: len(text), stored in revlog data if flags==0, or
410 # possibly somewhere else if flags!=0
411 # possibly somewhere else if flags!=0
411 #
412 #
412 # L1 should be equal to L2. L3 could be different from them.
413 # L1 should be equal to L2. L3 could be different from them.
413 # "text" may or may not affect commit hash depending on flag
414 # "text" may or may not affect commit hash depending on flag
414 # processors (see revlog.addflagprocessor).
415 # processors (see revlog.addflagprocessor).
415 #
416 #
416 # | common | rename | meta | ext
417 # | common | rename | meta | ext
417 # -------------------------------------------------
418 # -------------------------------------------------
418 # rawsize() | L1 | L1 | L1 | L1
419 # rawsize() | L1 | L1 | L1 | L1
419 # size() | L1 | L2-LM | L1(*) | L1 (?)
420 # size() | L1 | L2-LM | L1(*) | L1 (?)
420 # len(rawtext) | L2 | L2 | L2 | L2
421 # len(rawtext) | L2 | L2 | L2 | L2
421 # len(text) | L2 | L2 | L2 | L3
422 # len(text) | L2 | L2 | L2 | L3
422 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
423 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
423 #
424 #
424 # LM: length of metadata, depending on rawtext
425 # LM: length of metadata, depending on rawtext
425 # (*): not ideal, see comment in filelog.size
426 # (*): not ideal, see comment in filelog.size
426 # (?): could be "- len(meta)" if the resolved content has
427 # (?): could be "- len(meta)" if the resolved content has
427 # rename metadata
428 # rename metadata
428 #
429 #
429 # Checks needed to be done:
430 # Checks needed to be done:
430 # 1. length check: L1 == L2, in all cases.
431 # 1. length check: L1 == L2, in all cases.
431 # 2. hash check: depending on flag processor, we may need to
432 # 2. hash check: depending on flag processor, we may need to
432 # use either "text" (external), or "rawtext" (in revlog).
433 # use either "text" (external), or "rawtext" (in revlog).
433 try:
434 try:
434 skipflags = self.skipflags
435 skipflags = self.skipflags
435 if skipflags:
436 if skipflags:
436 skipflags &= fl.flags(i)
437 skipflags &= fl.flags(i)
437 if not skipflags:
438 if not skipflags:
438 fl.read(n) # side effect: read content and do checkhash
439 fl.read(n) # side effect: read content and do checkhash
439 rp = fl.renamed(n)
440 rp = fl.renamed(n)
440 # the "L1 == L2" check
441 # the "L1 == L2" check
441 l1 = fl.rawsize(i)
442 l1 = fl.rawsize(i)
442 l2 = len(fl.revision(n, raw=True))
443 l2 = len(fl.revision(n, raw=True))
443 if l1 != l2:
444 if l1 != l2:
444 self.err(lr, _("unpacked size is %s, %s expected") %
445 self.err(lr, _("unpacked size is %s, %s expected") %
445 (l2, l1), f)
446 (l2, l1), f)
446 except error.CensoredNodeError:
447 except error.CensoredNodeError:
447 # experimental config: censor.policy
448 # experimental config: censor.policy
448 if ui.config("censor", "policy") == "abort":
449 if ui.config("censor", "policy") == "abort":
449 self.err(lr, _("censored file data"), f)
450 self.err(lr, _("censored file data"), f)
450 except Exception as inst:
451 except Exception as inst:
451 self.exc(lr, _("unpacking %s") % short(n), inst, f)
452 self.exc(lr, _("unpacking %s") % short(n), inst, f)
452
453
453 # check renames
454 # check renames
454 try:
455 try:
455 if rp:
456 if rp:
456 if lr is not None and ui.verbose:
457 if lr is not None and ui.verbose:
457 ctx = lrugetctx(lr)
458 ctx = lrugetctx(lr)
458 found = False
459 found = False
459 for pctx in ctx.parents():
460 for pctx in ctx.parents():
460 if rp[0] in pctx:
461 if rp[0] in pctx:
461 found = True
462 found = True
462 break
463 break
463 if not found:
464 if not found:
464 self.warn(_("warning: copy source of '%s' not"
465 self.warn(_("warning: copy source of '%s' not"
465 " in parents of %s") % (f, ctx))
466 " in parents of %s") % (f, ctx))
466 fl2 = repo.file(rp[0])
467 fl2 = repo.file(rp[0])
467 if not len(fl2):
468 if not len(fl2):
468 self.err(lr, _("empty or missing copy source "
469 self.err(lr, _("empty or missing copy source "
469 "revlog %s:%s") % (rp[0], short(rp[1])), f)
470 "revlog %s:%s") % (rp[0], short(rp[1])), f)
470 elif rp[1] == nullid:
471 elif rp[1] == nullid:
471 ui.note(_("warning: %s@%s: copy source"
472 ui.note(_("warning: %s@%s: copy source"
472 " revision is nullid %s:%s\n")
473 " revision is nullid %s:%s\n")
473 % (f, lr, rp[0], short(rp[1])))
474 % (f, lr, rp[0], short(rp[1])))
474 else:
475 else:
475 fl2.rev(rp[1])
476 fl2.rev(rp[1])
476 except Exception as inst:
477 except Exception as inst:
477 self.exc(lr, _("checking rename of %s") % short(n), inst, f)
478 self.exc(lr, _("checking rename of %s") % short(n), inst, f)
478
479
479 # cross-check
480 # cross-check
480 if f in filenodes:
481 if f in filenodes:
481 fns = [(v, k) for k, v in filenodes[f].iteritems()]
482 fns = [(v, k) for k, v in filenodes[f].iteritems()]
482 for lr, node in sorted(fns):
483 for lr, node in sorted(fns):
483 self.err(lr, _("manifest refers to unknown revision %s") %
484 self.err(lr, _("manifest refers to unknown revision %s") %
484 short(node), f)
485 short(node), f)
485 ui.progress(_('checking'), None)
486 ui.progress(_('checking'), None)
486
487
487 for f in sorted(storefiles):
488 for f in sorted(storefiles):
488 self.warn(_("warning: orphan revlog '%s'") % f)
489 self.warn(_("warning: orphan revlog '%s'") % f)
489
490
490 return len(files), revisions
491 return len(files), revisions
General Comments 0
You need to be logged in to leave comments. Login now