##// END OF EJS Templates
cleanup: remove redundant clearing of mergestate in rebase and shelve...
Martin von Zweigbergk -
r44920:f0021fbe default
parent child Browse files
Show More
@@ -1,2243 +1,2238 b''
1 # rebase.py - rebasing feature for mercurial
1 # rebase.py - rebasing feature for mercurial
2 #
2 #
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to move sets of revisions to a different ancestor
8 '''command to move sets of revisions to a different ancestor
9
9
10 This extension lets you rebase changesets in an existing Mercurial
10 This extension lets you rebase changesets in an existing Mercurial
11 repository.
11 repository.
12
12
13 For more information:
13 For more information:
14 https://mercurial-scm.org/wiki/RebaseExtension
14 https://mercurial-scm.org/wiki/RebaseExtension
15 '''
15 '''
16
16
17 from __future__ import absolute_import
17 from __future__ import absolute_import
18
18
19 import errno
19 import errno
20 import os
20 import os
21
21
22 from mercurial.i18n import _
22 from mercurial.i18n import _
23 from mercurial.node import (
23 from mercurial.node import (
24 nullrev,
24 nullrev,
25 short,
25 short,
26 )
26 )
27 from mercurial.pycompat import open
27 from mercurial.pycompat import open
28 from mercurial import (
28 from mercurial import (
29 bookmarks,
29 bookmarks,
30 cmdutil,
30 cmdutil,
31 commands,
31 commands,
32 copies,
32 copies,
33 destutil,
33 destutil,
34 dirstateguard,
34 dirstateguard,
35 error,
35 error,
36 extensions,
36 extensions,
37 hg,
37 hg,
38 merge as mergemod,
38 merge as mergemod,
39 mergeutil,
39 mergeutil,
40 node as nodemod,
40 node as nodemod,
41 obsolete,
41 obsolete,
42 obsutil,
42 obsutil,
43 patch,
43 patch,
44 phases,
44 phases,
45 pycompat,
45 pycompat,
46 registrar,
46 registrar,
47 repair,
47 repair,
48 revset,
48 revset,
49 revsetlang,
49 revsetlang,
50 rewriteutil,
50 rewriteutil,
51 scmutil,
51 scmutil,
52 smartset,
52 smartset,
53 state as statemod,
53 state as statemod,
54 util,
54 util,
55 )
55 )
56
56
57 # The following constants are used throughout the rebase module. The ordering of
57 # The following constants are used throughout the rebase module. The ordering of
58 # their values must be maintained.
58 # their values must be maintained.
59
59
60 # Indicates that a revision needs to be rebased
60 # Indicates that a revision needs to be rebased
61 revtodo = -1
61 revtodo = -1
62 revtodostr = b'-1'
62 revtodostr = b'-1'
63
63
64 # legacy revstates no longer needed in current code
64 # legacy revstates no longer needed in current code
65 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
65 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
66 legacystates = {b'-2', b'-3', b'-4', b'-5'}
66 legacystates = {b'-2', b'-3', b'-4', b'-5'}
67
67
68 cmdtable = {}
68 cmdtable = {}
69 command = registrar.command(cmdtable)
69 command = registrar.command(cmdtable)
70 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
70 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
71 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
71 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
72 # be specifying the version(s) of Mercurial they are tested with, or
72 # be specifying the version(s) of Mercurial they are tested with, or
73 # leave the attribute unspecified.
73 # leave the attribute unspecified.
74 testedwith = b'ships-with-hg-core'
74 testedwith = b'ships-with-hg-core'
75
75
76
76
77 def _nothingtorebase():
77 def _nothingtorebase():
78 return 1
78 return 1
79
79
80
80
81 def _savegraft(ctx, extra):
81 def _savegraft(ctx, extra):
82 s = ctx.extra().get(b'source', None)
82 s = ctx.extra().get(b'source', None)
83 if s is not None:
83 if s is not None:
84 extra[b'source'] = s
84 extra[b'source'] = s
85 s = ctx.extra().get(b'intermediate-source', None)
85 s = ctx.extra().get(b'intermediate-source', None)
86 if s is not None:
86 if s is not None:
87 extra[b'intermediate-source'] = s
87 extra[b'intermediate-source'] = s
88
88
89
89
90 def _savebranch(ctx, extra):
90 def _savebranch(ctx, extra):
91 extra[b'branch'] = ctx.branch()
91 extra[b'branch'] = ctx.branch()
92
92
93
93
94 def _destrebase(repo, sourceset, destspace=None):
94 def _destrebase(repo, sourceset, destspace=None):
95 """small wrapper around destmerge to pass the right extra args
95 """small wrapper around destmerge to pass the right extra args
96
96
97 Please wrap destutil.destmerge instead."""
97 Please wrap destutil.destmerge instead."""
98 return destutil.destmerge(
98 return destutil.destmerge(
99 repo,
99 repo,
100 action=b'rebase',
100 action=b'rebase',
101 sourceset=sourceset,
101 sourceset=sourceset,
102 onheadcheck=False,
102 onheadcheck=False,
103 destspace=destspace,
103 destspace=destspace,
104 )
104 )
105
105
106
106
107 revsetpredicate = registrar.revsetpredicate()
107 revsetpredicate = registrar.revsetpredicate()
108
108
109
109
110 @revsetpredicate(b'_destrebase')
110 @revsetpredicate(b'_destrebase')
111 def _revsetdestrebase(repo, subset, x):
111 def _revsetdestrebase(repo, subset, x):
112 # ``_rebasedefaultdest()``
112 # ``_rebasedefaultdest()``
113
113
114 # default destination for rebase.
114 # default destination for rebase.
115 # # XXX: Currently private because I expect the signature to change.
115 # # XXX: Currently private because I expect the signature to change.
116 # # XXX: - bailing out in case of ambiguity vs returning all data.
116 # # XXX: - bailing out in case of ambiguity vs returning all data.
117 # i18n: "_rebasedefaultdest" is a keyword
117 # i18n: "_rebasedefaultdest" is a keyword
118 sourceset = None
118 sourceset = None
119 if x is not None:
119 if x is not None:
120 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
120 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
121 return subset & smartset.baseset([_destrebase(repo, sourceset)])
121 return subset & smartset.baseset([_destrebase(repo, sourceset)])
122
122
123
123
124 @revsetpredicate(b'_destautoorphanrebase')
124 @revsetpredicate(b'_destautoorphanrebase')
125 def _revsetdestautoorphanrebase(repo, subset, x):
125 def _revsetdestautoorphanrebase(repo, subset, x):
126 # ``_destautoorphanrebase()``
126 # ``_destautoorphanrebase()``
127
127
128 # automatic rebase destination for a single orphan revision.
128 # automatic rebase destination for a single orphan revision.
129 unfi = repo.unfiltered()
129 unfi = repo.unfiltered()
130 obsoleted = unfi.revs(b'obsolete()')
130 obsoleted = unfi.revs(b'obsolete()')
131
131
132 src = revset.getset(repo, subset, x).first()
132 src = revset.getset(repo, subset, x).first()
133
133
134 # Empty src or already obsoleted - Do not return a destination
134 # Empty src or already obsoleted - Do not return a destination
135 if not src or src in obsoleted:
135 if not src or src in obsoleted:
136 return smartset.baseset()
136 return smartset.baseset()
137 dests = destutil.orphanpossibledestination(repo, src)
137 dests = destutil.orphanpossibledestination(repo, src)
138 if len(dests) > 1:
138 if len(dests) > 1:
139 raise error.Abort(
139 raise error.Abort(
140 _(b"ambiguous automatic rebase: %r could end up on any of %r")
140 _(b"ambiguous automatic rebase: %r could end up on any of %r")
141 % (src, dests)
141 % (src, dests)
142 )
142 )
143 # We have zero or one destination, so we can just return here.
143 # We have zero or one destination, so we can just return here.
144 return smartset.baseset(dests)
144 return smartset.baseset(dests)
145
145
146
146
147 def _ctxdesc(ctx):
147 def _ctxdesc(ctx):
148 """short description for a context"""
148 """short description for a context"""
149 desc = b'%d:%s "%s"' % (
149 desc = b'%d:%s "%s"' % (
150 ctx.rev(),
150 ctx.rev(),
151 ctx,
151 ctx,
152 ctx.description().split(b'\n', 1)[0],
152 ctx.description().split(b'\n', 1)[0],
153 )
153 )
154 repo = ctx.repo()
154 repo = ctx.repo()
155 names = []
155 names = []
156 for nsname, ns in pycompat.iteritems(repo.names):
156 for nsname, ns in pycompat.iteritems(repo.names):
157 if nsname == b'branches':
157 if nsname == b'branches':
158 continue
158 continue
159 names.extend(ns.names(repo, ctx.node()))
159 names.extend(ns.names(repo, ctx.node()))
160 if names:
160 if names:
161 desc += b' (%s)' % b' '.join(names)
161 desc += b' (%s)' % b' '.join(names)
162 return desc
162 return desc
163
163
164
164
165 class rebaseruntime(object):
165 class rebaseruntime(object):
166 """This class is a container for rebase runtime state"""
166 """This class is a container for rebase runtime state"""
167
167
168 def __init__(self, repo, ui, inmemory=False, opts=None):
168 def __init__(self, repo, ui, inmemory=False, opts=None):
169 if opts is None:
169 if opts is None:
170 opts = {}
170 opts = {}
171
171
172 # prepared: whether we have rebasestate prepared or not. Currently it
172 # prepared: whether we have rebasestate prepared or not. Currently it
173 # decides whether "self.repo" is unfiltered or not.
173 # decides whether "self.repo" is unfiltered or not.
174 # The rebasestate has explicit hash to hash instructions not depending
174 # The rebasestate has explicit hash to hash instructions not depending
175 # on visibility. If rebasestate exists (in-memory or on-disk), use
175 # on visibility. If rebasestate exists (in-memory or on-disk), use
176 # unfiltered repo to avoid visibility issues.
176 # unfiltered repo to avoid visibility issues.
177 # Before knowing rebasestate (i.e. when starting a new rebase (not
177 # Before knowing rebasestate (i.e. when starting a new rebase (not
178 # --continue or --abort)), the original repo should be used so
178 # --continue or --abort)), the original repo should be used so
179 # visibility-dependent revsets are correct.
179 # visibility-dependent revsets are correct.
180 self.prepared = False
180 self.prepared = False
181 self.resume = False
181 self.resume = False
182 self._repo = repo
182 self._repo = repo
183
183
184 self.ui = ui
184 self.ui = ui
185 self.opts = opts
185 self.opts = opts
186 self.originalwd = None
186 self.originalwd = None
187 self.external = nullrev
187 self.external = nullrev
188 # Mapping between the old revision id and either what is the new rebased
188 # Mapping between the old revision id and either what is the new rebased
189 # revision or what needs to be done with the old revision. The state
189 # revision or what needs to be done with the old revision. The state
190 # dict will be what contains most of the rebase progress state.
190 # dict will be what contains most of the rebase progress state.
191 self.state = {}
191 self.state = {}
192 self.activebookmark = None
192 self.activebookmark = None
193 self.destmap = {}
193 self.destmap = {}
194 self.skipped = set()
194 self.skipped = set()
195
195
196 self.collapsef = opts.get(b'collapse', False)
196 self.collapsef = opts.get(b'collapse', False)
197 self.collapsemsg = cmdutil.logmessage(ui, opts)
197 self.collapsemsg = cmdutil.logmessage(ui, opts)
198 self.date = opts.get(b'date', None)
198 self.date = opts.get(b'date', None)
199
199
200 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
200 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
201 self.extrafns = [_savegraft]
201 self.extrafns = [_savegraft]
202 if e:
202 if e:
203 self.extrafns = [e]
203 self.extrafns = [e]
204
204
205 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
205 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
206 self.keepf = opts.get(b'keep', False)
206 self.keepf = opts.get(b'keep', False)
207 self.keepbranchesf = opts.get(b'keepbranches', False)
207 self.keepbranchesf = opts.get(b'keepbranches', False)
208 self.obsoletenotrebased = {}
208 self.obsoletenotrebased = {}
209 self.obsoletewithoutsuccessorindestination = set()
209 self.obsoletewithoutsuccessorindestination = set()
210 self.inmemory = inmemory
210 self.inmemory = inmemory
211 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
211 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
212
212
213 @property
213 @property
214 def repo(self):
214 def repo(self):
215 if self.prepared:
215 if self.prepared:
216 return self._repo.unfiltered()
216 return self._repo.unfiltered()
217 else:
217 else:
218 return self._repo
218 return self._repo
219
219
220 def storestatus(self, tr=None):
220 def storestatus(self, tr=None):
221 """Store the current status to allow recovery"""
221 """Store the current status to allow recovery"""
222 if tr:
222 if tr:
223 tr.addfilegenerator(
223 tr.addfilegenerator(
224 b'rebasestate',
224 b'rebasestate',
225 (b'rebasestate',),
225 (b'rebasestate',),
226 self._writestatus,
226 self._writestatus,
227 location=b'plain',
227 location=b'plain',
228 )
228 )
229 else:
229 else:
230 with self.repo.vfs(b"rebasestate", b"w") as f:
230 with self.repo.vfs(b"rebasestate", b"w") as f:
231 self._writestatus(f)
231 self._writestatus(f)
232
232
233 def _writestatus(self, f):
233 def _writestatus(self, f):
234 repo = self.repo
234 repo = self.repo
235 assert repo.filtername is None
235 assert repo.filtername is None
236 f.write(repo[self.originalwd].hex() + b'\n')
236 f.write(repo[self.originalwd].hex() + b'\n')
237 # was "dest". we now write dest per src root below.
237 # was "dest". we now write dest per src root below.
238 f.write(b'\n')
238 f.write(b'\n')
239 f.write(repo[self.external].hex() + b'\n')
239 f.write(repo[self.external].hex() + b'\n')
240 f.write(b'%d\n' % int(self.collapsef))
240 f.write(b'%d\n' % int(self.collapsef))
241 f.write(b'%d\n' % int(self.keepf))
241 f.write(b'%d\n' % int(self.keepf))
242 f.write(b'%d\n' % int(self.keepbranchesf))
242 f.write(b'%d\n' % int(self.keepbranchesf))
243 f.write(b'%s\n' % (self.activebookmark or b''))
243 f.write(b'%s\n' % (self.activebookmark or b''))
244 destmap = self.destmap
244 destmap = self.destmap
245 for d, v in pycompat.iteritems(self.state):
245 for d, v in pycompat.iteritems(self.state):
246 oldrev = repo[d].hex()
246 oldrev = repo[d].hex()
247 if v >= 0:
247 if v >= 0:
248 newrev = repo[v].hex()
248 newrev = repo[v].hex()
249 else:
249 else:
250 newrev = b"%d" % v
250 newrev = b"%d" % v
251 destnode = repo[destmap[d]].hex()
251 destnode = repo[destmap[d]].hex()
252 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
252 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
253 repo.ui.debug(b'rebase status stored\n')
253 repo.ui.debug(b'rebase status stored\n')
254
254
255 def restorestatus(self):
255 def restorestatus(self):
256 """Restore a previously stored status"""
256 """Restore a previously stored status"""
257 if not self.stateobj.exists():
257 if not self.stateobj.exists():
258 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
258 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
259
259
260 data = self._read()
260 data = self._read()
261 self.repo.ui.debug(b'rebase status resumed\n')
261 self.repo.ui.debug(b'rebase status resumed\n')
262
262
263 self.originalwd = data[b'originalwd']
263 self.originalwd = data[b'originalwd']
264 self.destmap = data[b'destmap']
264 self.destmap = data[b'destmap']
265 self.state = data[b'state']
265 self.state = data[b'state']
266 self.skipped = data[b'skipped']
266 self.skipped = data[b'skipped']
267 self.collapsef = data[b'collapse']
267 self.collapsef = data[b'collapse']
268 self.keepf = data[b'keep']
268 self.keepf = data[b'keep']
269 self.keepbranchesf = data[b'keepbranches']
269 self.keepbranchesf = data[b'keepbranches']
270 self.external = data[b'external']
270 self.external = data[b'external']
271 self.activebookmark = data[b'activebookmark']
271 self.activebookmark = data[b'activebookmark']
272
272
273 def _read(self):
273 def _read(self):
274 self.prepared = True
274 self.prepared = True
275 repo = self.repo
275 repo = self.repo
276 assert repo.filtername is None
276 assert repo.filtername is None
277 data = {
277 data = {
278 b'keepbranches': None,
278 b'keepbranches': None,
279 b'collapse': None,
279 b'collapse': None,
280 b'activebookmark': None,
280 b'activebookmark': None,
281 b'external': nullrev,
281 b'external': nullrev,
282 b'keep': None,
282 b'keep': None,
283 b'originalwd': None,
283 b'originalwd': None,
284 }
284 }
285 legacydest = None
285 legacydest = None
286 state = {}
286 state = {}
287 destmap = {}
287 destmap = {}
288
288
289 if True:
289 if True:
290 f = repo.vfs(b"rebasestate")
290 f = repo.vfs(b"rebasestate")
291 for i, l in enumerate(f.read().splitlines()):
291 for i, l in enumerate(f.read().splitlines()):
292 if i == 0:
292 if i == 0:
293 data[b'originalwd'] = repo[l].rev()
293 data[b'originalwd'] = repo[l].rev()
294 elif i == 1:
294 elif i == 1:
295 # this line should be empty in newer version. but legacy
295 # this line should be empty in newer version. but legacy
296 # clients may still use it
296 # clients may still use it
297 if l:
297 if l:
298 legacydest = repo[l].rev()
298 legacydest = repo[l].rev()
299 elif i == 2:
299 elif i == 2:
300 data[b'external'] = repo[l].rev()
300 data[b'external'] = repo[l].rev()
301 elif i == 3:
301 elif i == 3:
302 data[b'collapse'] = bool(int(l))
302 data[b'collapse'] = bool(int(l))
303 elif i == 4:
303 elif i == 4:
304 data[b'keep'] = bool(int(l))
304 data[b'keep'] = bool(int(l))
305 elif i == 5:
305 elif i == 5:
306 data[b'keepbranches'] = bool(int(l))
306 data[b'keepbranches'] = bool(int(l))
307 elif i == 6 and not (len(l) == 81 and b':' in l):
307 elif i == 6 and not (len(l) == 81 and b':' in l):
308 # line 6 is a recent addition, so for backwards
308 # line 6 is a recent addition, so for backwards
309 # compatibility check that the line doesn't look like the
309 # compatibility check that the line doesn't look like the
310 # oldrev:newrev lines
310 # oldrev:newrev lines
311 data[b'activebookmark'] = l
311 data[b'activebookmark'] = l
312 else:
312 else:
313 args = l.split(b':')
313 args = l.split(b':')
314 oldrev = repo[args[0]].rev()
314 oldrev = repo[args[0]].rev()
315 newrev = args[1]
315 newrev = args[1]
316 if newrev in legacystates:
316 if newrev in legacystates:
317 continue
317 continue
318 if len(args) > 2:
318 if len(args) > 2:
319 destrev = repo[args[2]].rev()
319 destrev = repo[args[2]].rev()
320 else:
320 else:
321 destrev = legacydest
321 destrev = legacydest
322 destmap[oldrev] = destrev
322 destmap[oldrev] = destrev
323 if newrev == revtodostr:
323 if newrev == revtodostr:
324 state[oldrev] = revtodo
324 state[oldrev] = revtodo
325 # Legacy compat special case
325 # Legacy compat special case
326 else:
326 else:
327 state[oldrev] = repo[newrev].rev()
327 state[oldrev] = repo[newrev].rev()
328
328
329 if data[b'keepbranches'] is None:
329 if data[b'keepbranches'] is None:
330 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
330 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
331
331
332 data[b'destmap'] = destmap
332 data[b'destmap'] = destmap
333 data[b'state'] = state
333 data[b'state'] = state
334 skipped = set()
334 skipped = set()
335 # recompute the set of skipped revs
335 # recompute the set of skipped revs
336 if not data[b'collapse']:
336 if not data[b'collapse']:
337 seen = set(destmap.values())
337 seen = set(destmap.values())
338 for old, new in sorted(state.items()):
338 for old, new in sorted(state.items()):
339 if new != revtodo and new in seen:
339 if new != revtodo and new in seen:
340 skipped.add(old)
340 skipped.add(old)
341 seen.add(new)
341 seen.add(new)
342 data[b'skipped'] = skipped
342 data[b'skipped'] = skipped
343 repo.ui.debug(
343 repo.ui.debug(
344 b'computed skipped revs: %s\n'
344 b'computed skipped revs: %s\n'
345 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
345 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
346 )
346 )
347
347
348 return data
348 return data
349
349
350 def _handleskippingobsolete(self, obsoleterevs, destmap):
350 def _handleskippingobsolete(self, obsoleterevs, destmap):
351 """Compute structures necessary for skipping obsolete revisions
351 """Compute structures necessary for skipping obsolete revisions
352
352
353 obsoleterevs: iterable of all obsolete revisions in rebaseset
353 obsoleterevs: iterable of all obsolete revisions in rebaseset
354 destmap: {srcrev: destrev} destination revisions
354 destmap: {srcrev: destrev} destination revisions
355 """
355 """
356 self.obsoletenotrebased = {}
356 self.obsoletenotrebased = {}
357 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
357 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
358 return
358 return
359 obsoleteset = set(obsoleterevs)
359 obsoleteset = set(obsoleterevs)
360 (
360 (
361 self.obsoletenotrebased,
361 self.obsoletenotrebased,
362 self.obsoletewithoutsuccessorindestination,
362 self.obsoletewithoutsuccessorindestination,
363 obsoleteextinctsuccessors,
363 obsoleteextinctsuccessors,
364 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
364 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
365 skippedset = set(self.obsoletenotrebased)
365 skippedset = set(self.obsoletenotrebased)
366 skippedset.update(self.obsoletewithoutsuccessorindestination)
366 skippedset.update(self.obsoletewithoutsuccessorindestination)
367 skippedset.update(obsoleteextinctsuccessors)
367 skippedset.update(obsoleteextinctsuccessors)
368 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
368 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
369
369
370 def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False):
370 def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False):
371 self.resume = True
371 self.resume = True
372 try:
372 try:
373 self.restorestatus()
373 self.restorestatus()
374 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
374 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
375 except error.RepoLookupError:
375 except error.RepoLookupError:
376 if isabort:
376 if isabort:
377 clearstatus(self.repo)
377 clearstatus(self.repo)
378 clearcollapsemsg(self.repo)
378 clearcollapsemsg(self.repo)
379 self.repo.ui.warn(
379 self.repo.ui.warn(
380 _(
380 _(
381 b'rebase aborted (no revision is removed,'
381 b'rebase aborted (no revision is removed,'
382 b' only broken state is cleared)\n'
382 b' only broken state is cleared)\n'
383 )
383 )
384 )
384 )
385 return 0
385 return 0
386 else:
386 else:
387 msg = _(b'cannot continue inconsistent rebase')
387 msg = _(b'cannot continue inconsistent rebase')
388 hint = _(b'use "hg rebase --abort" to clear broken state')
388 hint = _(b'use "hg rebase --abort" to clear broken state')
389 raise error.Abort(msg, hint=hint)
389 raise error.Abort(msg, hint=hint)
390
390
391 if isabort:
391 if isabort:
392 backup = backup and self.backupf
392 backup = backup and self.backupf
393 return self._abort(backup=backup, suppwarns=suppwarns)
393 return self._abort(backup=backup, suppwarns=suppwarns)
394
394
395 def _preparenewrebase(self, destmap):
395 def _preparenewrebase(self, destmap):
396 if not destmap:
396 if not destmap:
397 return _nothingtorebase()
397 return _nothingtorebase()
398
398
399 rebaseset = destmap.keys()
399 rebaseset = destmap.keys()
400 if not self.keepf:
400 if not self.keepf:
401 try:
401 try:
402 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
402 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
403 except error.Abort as e:
403 except error.Abort as e:
404 if e.hint is None:
404 if e.hint is None:
405 e.hint = _(b'use --keep to keep original changesets')
405 e.hint = _(b'use --keep to keep original changesets')
406 raise e
406 raise e
407
407
408 result = buildstate(self.repo, destmap, self.collapsef)
408 result = buildstate(self.repo, destmap, self.collapsef)
409
409
410 if not result:
410 if not result:
411 # Empty state built, nothing to rebase
411 # Empty state built, nothing to rebase
412 self.ui.status(_(b'nothing to rebase\n'))
412 self.ui.status(_(b'nothing to rebase\n'))
413 return _nothingtorebase()
413 return _nothingtorebase()
414
414
415 (self.originalwd, self.destmap, self.state) = result
415 (self.originalwd, self.destmap, self.state) = result
416 if self.collapsef:
416 if self.collapsef:
417 dests = set(self.destmap.values())
417 dests = set(self.destmap.values())
418 if len(dests) != 1:
418 if len(dests) != 1:
419 raise error.Abort(
419 raise error.Abort(
420 _(b'--collapse does not work with multiple destinations')
420 _(b'--collapse does not work with multiple destinations')
421 )
421 )
422 destrev = next(iter(dests))
422 destrev = next(iter(dests))
423 destancestors = self.repo.changelog.ancestors(
423 destancestors = self.repo.changelog.ancestors(
424 [destrev], inclusive=True
424 [destrev], inclusive=True
425 )
425 )
426 self.external = externalparent(self.repo, self.state, destancestors)
426 self.external = externalparent(self.repo, self.state, destancestors)
427
427
428 for destrev in sorted(set(destmap.values())):
428 for destrev in sorted(set(destmap.values())):
429 dest = self.repo[destrev]
429 dest = self.repo[destrev]
430 if dest.closesbranch() and not self.keepbranchesf:
430 if dest.closesbranch() and not self.keepbranchesf:
431 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
431 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
432
432
433 self.prepared = True
433 self.prepared = True
434
434
435 def _assignworkingcopy(self):
435 def _assignworkingcopy(self):
436 if self.inmemory:
436 if self.inmemory:
437 from mercurial.context import overlayworkingctx
437 from mercurial.context import overlayworkingctx
438
438
439 self.wctx = overlayworkingctx(self.repo)
439 self.wctx = overlayworkingctx(self.repo)
440 self.repo.ui.debug(b"rebasing in-memory\n")
440 self.repo.ui.debug(b"rebasing in-memory\n")
441 else:
441 else:
442 self.wctx = self.repo[None]
442 self.wctx = self.repo[None]
443 self.repo.ui.debug(b"rebasing on disk\n")
443 self.repo.ui.debug(b"rebasing on disk\n")
444 self.repo.ui.log(
444 self.repo.ui.log(
445 b"rebase",
445 b"rebase",
446 b"using in-memory rebase: %r\n",
446 b"using in-memory rebase: %r\n",
447 self.inmemory,
447 self.inmemory,
448 rebase_imm_used=self.inmemory,
448 rebase_imm_used=self.inmemory,
449 )
449 )
450
450
451 def _performrebase(self, tr):
451 def _performrebase(self, tr):
452 self._assignworkingcopy()
452 self._assignworkingcopy()
453 repo, ui = self.repo, self.ui
453 repo, ui = self.repo, self.ui
454 if self.keepbranchesf:
454 if self.keepbranchesf:
455 # insert _savebranch at the start of extrafns so if
455 # insert _savebranch at the start of extrafns so if
456 # there's a user-provided extrafn it can clobber branch if
456 # there's a user-provided extrafn it can clobber branch if
457 # desired
457 # desired
458 self.extrafns.insert(0, _savebranch)
458 self.extrafns.insert(0, _savebranch)
459 if self.collapsef:
459 if self.collapsef:
460 branches = set()
460 branches = set()
461 for rev in self.state:
461 for rev in self.state:
462 branches.add(repo[rev].branch())
462 branches.add(repo[rev].branch())
463 if len(branches) > 1:
463 if len(branches) > 1:
464 raise error.Abort(
464 raise error.Abort(
465 _(b'cannot collapse multiple named branches')
465 _(b'cannot collapse multiple named branches')
466 )
466 )
467
467
468 # Calculate self.obsoletenotrebased
468 # Calculate self.obsoletenotrebased
469 obsrevs = _filterobsoleterevs(self.repo, self.state)
469 obsrevs = _filterobsoleterevs(self.repo, self.state)
470 self._handleskippingobsolete(obsrevs, self.destmap)
470 self._handleskippingobsolete(obsrevs, self.destmap)
471
471
472 # Keep track of the active bookmarks in order to reset them later
472 # Keep track of the active bookmarks in order to reset them later
473 self.activebookmark = self.activebookmark or repo._activebookmark
473 self.activebookmark = self.activebookmark or repo._activebookmark
474 if self.activebookmark:
474 if self.activebookmark:
475 bookmarks.deactivate(repo)
475 bookmarks.deactivate(repo)
476
476
477 # Store the state before we begin so users can run 'hg rebase --abort'
477 # Store the state before we begin so users can run 'hg rebase --abort'
478 # if we fail before the transaction closes.
478 # if we fail before the transaction closes.
479 self.storestatus()
479 self.storestatus()
480 if tr:
480 if tr:
481 # When using single transaction, store state when transaction
481 # When using single transaction, store state when transaction
482 # commits.
482 # commits.
483 self.storestatus(tr)
483 self.storestatus(tr)
484
484
485 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
485 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
486 p = repo.ui.makeprogress(
486 p = repo.ui.makeprogress(
487 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
487 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
488 )
488 )
489
489
490 def progress(ctx):
490 def progress(ctx):
491 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
491 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
492
492
493 allowdivergence = self.ui.configbool(
493 allowdivergence = self.ui.configbool(
494 b'experimental', b'evolution.allowdivergence'
494 b'experimental', b'evolution.allowdivergence'
495 )
495 )
496 for subset in sortsource(self.destmap):
496 for subset in sortsource(self.destmap):
497 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
497 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
498 if not allowdivergence:
498 if not allowdivergence:
499 sortedrevs -= self.repo.revs(
499 sortedrevs -= self.repo.revs(
500 b'descendants(%ld) and not %ld',
500 b'descendants(%ld) and not %ld',
501 self.obsoletewithoutsuccessorindestination,
501 self.obsoletewithoutsuccessorindestination,
502 self.obsoletewithoutsuccessorindestination,
502 self.obsoletewithoutsuccessorindestination,
503 )
503 )
504 for rev in sortedrevs:
504 for rev in sortedrevs:
505 self._rebasenode(tr, rev, allowdivergence, progress)
505 self._rebasenode(tr, rev, allowdivergence, progress)
506 p.complete()
506 p.complete()
507 ui.note(_(b'rebase merging completed\n'))
507 ui.note(_(b'rebase merging completed\n'))
508
508
509 def _concludenode(self, rev, p1, editor, commitmsg=None):
509 def _concludenode(self, rev, p1, editor, commitmsg=None):
510 '''Commit the wd changes with parents p1 and p2.
510 '''Commit the wd changes with parents p1 and p2.
511
511
512 Reuse commit info from rev but also store useful information in extra.
512 Reuse commit info from rev but also store useful information in extra.
513 Return node of committed revision.'''
513 Return node of committed revision.'''
514 repo = self.repo
514 repo = self.repo
515 ctx = repo[rev]
515 ctx = repo[rev]
516 if commitmsg is None:
516 if commitmsg is None:
517 commitmsg = ctx.description()
517 commitmsg = ctx.description()
518 date = self.date
518 date = self.date
519 if date is None:
519 if date is None:
520 date = ctx.date()
520 date = ctx.date()
521 extra = {b'rebase_source': ctx.hex()}
521 extra = {b'rebase_source': ctx.hex()}
522 for c in self.extrafns:
522 for c in self.extrafns:
523 c(ctx, extra)
523 c(ctx, extra)
524 keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch()
524 keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch()
525 destphase = max(ctx.phase(), phases.draft)
525 destphase = max(ctx.phase(), phases.draft)
526 overrides = {(b'phases', b'new-commit'): destphase}
526 overrides = {(b'phases', b'new-commit'): destphase}
527 if keepbranch:
527 if keepbranch:
528 overrides[(b'ui', b'allowemptycommit')] = True
528 overrides[(b'ui', b'allowemptycommit')] = True
529 with repo.ui.configoverride(overrides, b'rebase'):
529 with repo.ui.configoverride(overrides, b'rebase'):
530 if self.inmemory:
530 if self.inmemory:
531 newnode = commitmemorynode(
531 newnode = commitmemorynode(
532 repo,
532 repo,
533 wctx=self.wctx,
533 wctx=self.wctx,
534 extra=extra,
534 extra=extra,
535 commitmsg=commitmsg,
535 commitmsg=commitmsg,
536 editor=editor,
536 editor=editor,
537 user=ctx.user(),
537 user=ctx.user(),
538 date=date,
538 date=date,
539 )
539 )
540 mergemod.mergestate.clean(repo)
540 mergemod.mergestate.clean(repo)
541 else:
541 else:
542 newnode = commitnode(
542 newnode = commitnode(
543 repo,
543 repo,
544 extra=extra,
544 extra=extra,
545 commitmsg=commitmsg,
545 commitmsg=commitmsg,
546 editor=editor,
546 editor=editor,
547 user=ctx.user(),
547 user=ctx.user(),
548 date=date,
548 date=date,
549 )
549 )
550
550
551 if newnode is None:
552 # If it ended up being a no-op commit, then the normal
553 # merge state clean-up path doesn't happen, so do it
554 # here. Fix issue5494
555 mergemod.mergestate.clean(repo)
556 return newnode
551 return newnode
557
552
558 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
553 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
559 repo, ui, opts = self.repo, self.ui, self.opts
554 repo, ui, opts = self.repo, self.ui, self.opts
560 dest = self.destmap[rev]
555 dest = self.destmap[rev]
561 ctx = repo[rev]
556 ctx = repo[rev]
562 desc = _ctxdesc(ctx)
557 desc = _ctxdesc(ctx)
563 if self.state[rev] == rev:
558 if self.state[rev] == rev:
564 ui.status(_(b'already rebased %s\n') % desc)
559 ui.status(_(b'already rebased %s\n') % desc)
565 elif (
560 elif (
566 not allowdivergence
561 not allowdivergence
567 and rev in self.obsoletewithoutsuccessorindestination
562 and rev in self.obsoletewithoutsuccessorindestination
568 ):
563 ):
569 msg = (
564 msg = (
570 _(
565 _(
571 b'note: not rebasing %s and its descendants as '
566 b'note: not rebasing %s and its descendants as '
572 b'this would cause divergence\n'
567 b'this would cause divergence\n'
573 )
568 )
574 % desc
569 % desc
575 )
570 )
576 repo.ui.status(msg)
571 repo.ui.status(msg)
577 self.skipped.add(rev)
572 self.skipped.add(rev)
578 elif rev in self.obsoletenotrebased:
573 elif rev in self.obsoletenotrebased:
579 succ = self.obsoletenotrebased[rev]
574 succ = self.obsoletenotrebased[rev]
580 if succ is None:
575 if succ is None:
581 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
576 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
582 else:
577 else:
583 succdesc = _ctxdesc(repo[succ])
578 succdesc = _ctxdesc(repo[succ])
584 msg = _(
579 msg = _(
585 b'note: not rebasing %s, already in destination as %s\n'
580 b'note: not rebasing %s, already in destination as %s\n'
586 ) % (desc, succdesc)
581 ) % (desc, succdesc)
587 repo.ui.status(msg)
582 repo.ui.status(msg)
588 # Make clearrebased aware state[rev] is not a true successor
583 # Make clearrebased aware state[rev] is not a true successor
589 self.skipped.add(rev)
584 self.skipped.add(rev)
590 # Record rev as moved to its desired destination in self.state.
585 # Record rev as moved to its desired destination in self.state.
591 # This helps bookmark and working parent movement.
586 # This helps bookmark and working parent movement.
592 dest = max(
587 dest = max(
593 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
588 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
594 )
589 )
595 self.state[rev] = dest
590 self.state[rev] = dest
596 elif self.state[rev] == revtodo:
591 elif self.state[rev] == revtodo:
597 ui.status(_(b'rebasing %s\n') % desc)
592 ui.status(_(b'rebasing %s\n') % desc)
598 progressfn(ctx)
593 progressfn(ctx)
599 p1, p2, base = defineparents(
594 p1, p2, base = defineparents(
600 repo,
595 repo,
601 rev,
596 rev,
602 self.destmap,
597 self.destmap,
603 self.state,
598 self.state,
604 self.skipped,
599 self.skipped,
605 self.obsoletenotrebased,
600 self.obsoletenotrebased,
606 )
601 )
607 if self.resume and self.wctx.p1().rev() == p1:
602 if self.resume and self.wctx.p1().rev() == p1:
608 repo.ui.debug(b'resuming interrupted rebase\n')
603 repo.ui.debug(b'resuming interrupted rebase\n')
609 self.resume = False
604 self.resume = False
610 else:
605 else:
611 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
606 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
612 with ui.configoverride(overrides, b'rebase'):
607 with ui.configoverride(overrides, b'rebase'):
613 stats = rebasenode(
608 stats = rebasenode(
614 repo,
609 repo,
615 rev,
610 rev,
616 p1,
611 p1,
617 p2,
612 p2,
618 base,
613 base,
619 self.collapsef,
614 self.collapsef,
620 dest,
615 dest,
621 wctx=self.wctx,
616 wctx=self.wctx,
622 )
617 )
623 if stats.unresolvedcount > 0:
618 if stats.unresolvedcount > 0:
624 if self.inmemory:
619 if self.inmemory:
625 raise error.InMemoryMergeConflictsError()
620 raise error.InMemoryMergeConflictsError()
626 else:
621 else:
627 raise error.InterventionRequired(
622 raise error.InterventionRequired(
628 _(
623 _(
629 b'unresolved conflicts (see hg '
624 b'unresolved conflicts (see hg '
630 b'resolve, then hg rebase --continue)'
625 b'resolve, then hg rebase --continue)'
631 )
626 )
632 )
627 )
633 if not self.collapsef:
628 if not self.collapsef:
634 merging = p2 != nullrev
629 merging = p2 != nullrev
635 editform = cmdutil.mergeeditform(merging, b'rebase')
630 editform = cmdutil.mergeeditform(merging, b'rebase')
636 editor = cmdutil.getcommiteditor(
631 editor = cmdutil.getcommiteditor(
637 editform=editform, **pycompat.strkwargs(opts)
632 editform=editform, **pycompat.strkwargs(opts)
638 )
633 )
639 newnode = self._concludenode(rev, p1, editor)
634 newnode = self._concludenode(rev, p1, editor)
640 else:
635 else:
641 # Skip commit if we are collapsing
636 # Skip commit if we are collapsing
642 newnode = None
637 newnode = None
643 # Update the state
638 # Update the state
644 if newnode is not None:
639 if newnode is not None:
645 self.state[rev] = repo[newnode].rev()
640 self.state[rev] = repo[newnode].rev()
646 ui.debug(b'rebased as %s\n' % short(newnode))
641 ui.debug(b'rebased as %s\n' % short(newnode))
647 else:
642 else:
648 if not self.collapsef:
643 if not self.collapsef:
649 ui.warn(
644 ui.warn(
650 _(
645 _(
651 b'note: not rebasing %s, its destination already '
646 b'note: not rebasing %s, its destination already '
652 b'has all its changes\n'
647 b'has all its changes\n'
653 )
648 )
654 % desc
649 % desc
655 )
650 )
656 self.skipped.add(rev)
651 self.skipped.add(rev)
657 self.state[rev] = p1
652 self.state[rev] = p1
658 ui.debug(b'next revision set to %d\n' % p1)
653 ui.debug(b'next revision set to %d\n' % p1)
659 else:
654 else:
660 ui.status(
655 ui.status(
661 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
656 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
662 )
657 )
663 if not tr:
658 if not tr:
664 # When not using single transaction, store state after each
659 # When not using single transaction, store state after each
665 # commit is completely done. On InterventionRequired, we thus
660 # commit is completely done. On InterventionRequired, we thus
666 # won't store the status. Instead, we'll hit the "len(parents) == 2"
661 # won't store the status. Instead, we'll hit the "len(parents) == 2"
667 # case and realize that the commit was in progress.
662 # case and realize that the commit was in progress.
668 self.storestatus()
663 self.storestatus()
669
664
670 def _finishrebase(self):
665 def _finishrebase(self):
671 repo, ui, opts = self.repo, self.ui, self.opts
666 repo, ui, opts = self.repo, self.ui, self.opts
672 fm = ui.formatter(b'rebase', opts)
667 fm = ui.formatter(b'rebase', opts)
673 fm.startitem()
668 fm.startitem()
674 if self.collapsef:
669 if self.collapsef:
675 p1, p2, _base = defineparents(
670 p1, p2, _base = defineparents(
676 repo,
671 repo,
677 min(self.state),
672 min(self.state),
678 self.destmap,
673 self.destmap,
679 self.state,
674 self.state,
680 self.skipped,
675 self.skipped,
681 self.obsoletenotrebased,
676 self.obsoletenotrebased,
682 )
677 )
683 editopt = opts.get(b'edit')
678 editopt = opts.get(b'edit')
684 editform = b'rebase.collapse'
679 editform = b'rebase.collapse'
685 if self.collapsemsg:
680 if self.collapsemsg:
686 commitmsg = self.collapsemsg
681 commitmsg = self.collapsemsg
687 else:
682 else:
688 commitmsg = b'Collapsed revision'
683 commitmsg = b'Collapsed revision'
689 for rebased in sorted(self.state):
684 for rebased in sorted(self.state):
690 if rebased not in self.skipped:
685 if rebased not in self.skipped:
691 commitmsg += b'\n* %s' % repo[rebased].description()
686 commitmsg += b'\n* %s' % repo[rebased].description()
692 editopt = True
687 editopt = True
693 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
688 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
694 revtoreuse = max(self.state)
689 revtoreuse = max(self.state)
695
690
696 self.wctx.setparents(repo[p1].node(), repo[self.external].node())
691 self.wctx.setparents(repo[p1].node(), repo[self.external].node())
697 newnode = self._concludenode(
692 newnode = self._concludenode(
698 revtoreuse, p1, editor, commitmsg=commitmsg
693 revtoreuse, p1, editor, commitmsg=commitmsg
699 )
694 )
700
695
701 if newnode is not None:
696 if newnode is not None:
702 newrev = repo[newnode].rev()
697 newrev = repo[newnode].rev()
703 for oldrev in self.state:
698 for oldrev in self.state:
704 self.state[oldrev] = newrev
699 self.state[oldrev] = newrev
705
700
706 if b'qtip' in repo.tags():
701 if b'qtip' in repo.tags():
707 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
702 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
708
703
709 # restore original working directory
704 # restore original working directory
710 # (we do this before stripping)
705 # (we do this before stripping)
711 newwd = self.state.get(self.originalwd, self.originalwd)
706 newwd = self.state.get(self.originalwd, self.originalwd)
712 if newwd < 0:
707 if newwd < 0:
713 # original directory is a parent of rebase set root or ignored
708 # original directory is a parent of rebase set root or ignored
714 newwd = self.originalwd
709 newwd = self.originalwd
715 if newwd not in [c.rev() for c in repo[None].parents()]:
710 if newwd not in [c.rev() for c in repo[None].parents()]:
716 ui.note(_(b"update back to initial working directory parent\n"))
711 ui.note(_(b"update back to initial working directory parent\n"))
717 hg.updaterepo(repo, newwd, overwrite=False)
712 hg.updaterepo(repo, newwd, overwrite=False)
718
713
719 collapsedas = None
714 collapsedas = None
720 if self.collapsef and not self.keepf:
715 if self.collapsef and not self.keepf:
721 collapsedas = newnode
716 collapsedas = newnode
722 clearrebased(
717 clearrebased(
723 ui,
718 ui,
724 repo,
719 repo,
725 self.destmap,
720 self.destmap,
726 self.state,
721 self.state,
727 self.skipped,
722 self.skipped,
728 collapsedas,
723 collapsedas,
729 self.keepf,
724 self.keepf,
730 fm=fm,
725 fm=fm,
731 backup=self.backupf,
726 backup=self.backupf,
732 )
727 )
733
728
734 clearstatus(repo)
729 clearstatus(repo)
735 clearcollapsemsg(repo)
730 clearcollapsemsg(repo)
736
731
737 ui.note(_(b"rebase completed\n"))
732 ui.note(_(b"rebase completed\n"))
738 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
733 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
739 if self.skipped:
734 if self.skipped:
740 skippedlen = len(self.skipped)
735 skippedlen = len(self.skipped)
741 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
736 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
742 fm.end()
737 fm.end()
743
738
744 if (
739 if (
745 self.activebookmark
740 self.activebookmark
746 and self.activebookmark in repo._bookmarks
741 and self.activebookmark in repo._bookmarks
747 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
742 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
748 ):
743 ):
749 bookmarks.activate(repo, self.activebookmark)
744 bookmarks.activate(repo, self.activebookmark)
750
745
751 def _abort(self, backup=True, suppwarns=False):
746 def _abort(self, backup=True, suppwarns=False):
752 '''Restore the repository to its original state.'''
747 '''Restore the repository to its original state.'''
753
748
754 repo = self.repo
749 repo = self.repo
755 try:
750 try:
756 # If the first commits in the rebased set get skipped during the
751 # If the first commits in the rebased set get skipped during the
757 # rebase, their values within the state mapping will be the dest
752 # rebase, their values within the state mapping will be the dest
758 # rev id. The rebased list must must not contain the dest rev
753 # rev id. The rebased list must must not contain the dest rev
759 # (issue4896)
754 # (issue4896)
760 rebased = [
755 rebased = [
761 s
756 s
762 for r, s in self.state.items()
757 for r, s in self.state.items()
763 if s >= 0 and s != r and s != self.destmap[r]
758 if s >= 0 and s != r and s != self.destmap[r]
764 ]
759 ]
765 immutable = [d for d in rebased if not repo[d].mutable()]
760 immutable = [d for d in rebased if not repo[d].mutable()]
766 cleanup = True
761 cleanup = True
767 if immutable:
762 if immutable:
768 repo.ui.warn(
763 repo.ui.warn(
769 _(b"warning: can't clean up public changesets %s\n")
764 _(b"warning: can't clean up public changesets %s\n")
770 % b', '.join(bytes(repo[r]) for r in immutable),
765 % b', '.join(bytes(repo[r]) for r in immutable),
771 hint=_(b"see 'hg help phases' for details"),
766 hint=_(b"see 'hg help phases' for details"),
772 )
767 )
773 cleanup = False
768 cleanup = False
774
769
775 descendants = set()
770 descendants = set()
776 if rebased:
771 if rebased:
777 descendants = set(repo.changelog.descendants(rebased))
772 descendants = set(repo.changelog.descendants(rebased))
778 if descendants - set(rebased):
773 if descendants - set(rebased):
779 repo.ui.warn(
774 repo.ui.warn(
780 _(
775 _(
781 b"warning: new changesets detected on "
776 b"warning: new changesets detected on "
782 b"destination branch, can't strip\n"
777 b"destination branch, can't strip\n"
783 )
778 )
784 )
779 )
785 cleanup = False
780 cleanup = False
786
781
787 if cleanup:
782 if cleanup:
788 if rebased:
783 if rebased:
789 strippoints = [
784 strippoints = [
790 c.node() for c in repo.set(b'roots(%ld)', rebased)
785 c.node() for c in repo.set(b'roots(%ld)', rebased)
791 ]
786 ]
792
787
793 updateifonnodes = set(rebased)
788 updateifonnodes = set(rebased)
794 updateifonnodes.update(self.destmap.values())
789 updateifonnodes.update(self.destmap.values())
795 updateifonnodes.add(self.originalwd)
790 updateifonnodes.add(self.originalwd)
796 shouldupdate = repo[b'.'].rev() in updateifonnodes
791 shouldupdate = repo[b'.'].rev() in updateifonnodes
797
792
798 # Update away from the rebase if necessary
793 # Update away from the rebase if necessary
799 if shouldupdate:
794 if shouldupdate:
800 mergemod.clean_update(repo[self.originalwd])
795 mergemod.clean_update(repo[self.originalwd])
801
796
802 # Strip from the first rebased revision
797 # Strip from the first rebased revision
803 if rebased:
798 if rebased:
804 repair.strip(repo.ui, repo, strippoints, backup=backup)
799 repair.strip(repo.ui, repo, strippoints, backup=backup)
805
800
806 if self.activebookmark and self.activebookmark in repo._bookmarks:
801 if self.activebookmark and self.activebookmark in repo._bookmarks:
807 bookmarks.activate(repo, self.activebookmark)
802 bookmarks.activate(repo, self.activebookmark)
808
803
809 finally:
804 finally:
810 clearstatus(repo)
805 clearstatus(repo)
811 clearcollapsemsg(repo)
806 clearcollapsemsg(repo)
812 if not suppwarns:
807 if not suppwarns:
813 repo.ui.warn(_(b'rebase aborted\n'))
808 repo.ui.warn(_(b'rebase aborted\n'))
814 return 0
809 return 0
815
810
816
811
817 @command(
812 @command(
818 b'rebase',
813 b'rebase',
819 [
814 [
820 (
815 (
821 b's',
816 b's',
822 b'source',
817 b'source',
823 b'',
818 b'',
824 _(b'rebase the specified changeset and descendants'),
819 _(b'rebase the specified changeset and descendants'),
825 _(b'REV'),
820 _(b'REV'),
826 ),
821 ),
827 (
822 (
828 b'b',
823 b'b',
829 b'base',
824 b'base',
830 b'',
825 b'',
831 _(b'rebase everything from branching point of specified changeset'),
826 _(b'rebase everything from branching point of specified changeset'),
832 _(b'REV'),
827 _(b'REV'),
833 ),
828 ),
834 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
829 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
835 (
830 (
836 b'd',
831 b'd',
837 b'dest',
832 b'dest',
838 b'',
833 b'',
839 _(b'rebase onto the specified changeset'),
834 _(b'rebase onto the specified changeset'),
840 _(b'REV'),
835 _(b'REV'),
841 ),
836 ),
842 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
837 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
843 (
838 (
844 b'm',
839 b'm',
845 b'message',
840 b'message',
846 b'',
841 b'',
847 _(b'use text as collapse commit message'),
842 _(b'use text as collapse commit message'),
848 _(b'TEXT'),
843 _(b'TEXT'),
849 ),
844 ),
850 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
845 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
851 (
846 (
852 b'l',
847 b'l',
853 b'logfile',
848 b'logfile',
854 b'',
849 b'',
855 _(b'read collapse commit message from file'),
850 _(b'read collapse commit message from file'),
856 _(b'FILE'),
851 _(b'FILE'),
857 ),
852 ),
858 (b'k', b'keep', False, _(b'keep original changesets')),
853 (b'k', b'keep', False, _(b'keep original changesets')),
859 (b'', b'keepbranches', False, _(b'keep original branch names')),
854 (b'', b'keepbranches', False, _(b'keep original branch names')),
860 (b'D', b'detach', False, _(b'(DEPRECATED)')),
855 (b'D', b'detach', False, _(b'(DEPRECATED)')),
861 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
856 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
862 (b't', b'tool', b'', _(b'specify merge tool')),
857 (b't', b'tool', b'', _(b'specify merge tool')),
863 (b'', b'stop', False, _(b'stop interrupted rebase')),
858 (b'', b'stop', False, _(b'stop interrupted rebase')),
864 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
859 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
865 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
860 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
866 (
861 (
867 b'',
862 b'',
868 b'auto-orphans',
863 b'auto-orphans',
869 b'',
864 b'',
870 _(
865 _(
871 b'automatically rebase orphan revisions '
866 b'automatically rebase orphan revisions '
872 b'in the specified revset (EXPERIMENTAL)'
867 b'in the specified revset (EXPERIMENTAL)'
873 ),
868 ),
874 ),
869 ),
875 ]
870 ]
876 + cmdutil.dryrunopts
871 + cmdutil.dryrunopts
877 + cmdutil.formatteropts
872 + cmdutil.formatteropts
878 + cmdutil.confirmopts,
873 + cmdutil.confirmopts,
879 _(b'[-s REV | -b REV] [-d REV] [OPTION]'),
874 _(b'[-s REV | -b REV] [-d REV] [OPTION]'),
880 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
875 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
881 )
876 )
882 def rebase(ui, repo, **opts):
877 def rebase(ui, repo, **opts):
883 """move changeset (and descendants) to a different branch
878 """move changeset (and descendants) to a different branch
884
879
885 Rebase uses repeated merging to graft changesets from one part of
880 Rebase uses repeated merging to graft changesets from one part of
886 history (the source) onto another (the destination). This can be
881 history (the source) onto another (the destination). This can be
887 useful for linearizing *local* changes relative to a master
882 useful for linearizing *local* changes relative to a master
888 development tree.
883 development tree.
889
884
890 Published commits cannot be rebased (see :hg:`help phases`).
885 Published commits cannot be rebased (see :hg:`help phases`).
891 To copy commits, see :hg:`help graft`.
886 To copy commits, see :hg:`help graft`.
892
887
893 If you don't specify a destination changeset (``-d/--dest``), rebase
888 If you don't specify a destination changeset (``-d/--dest``), rebase
894 will use the same logic as :hg:`merge` to pick a destination. if
889 will use the same logic as :hg:`merge` to pick a destination. if
895 the current branch contains exactly one other head, the other head
890 the current branch contains exactly one other head, the other head
896 is merged with by default. Otherwise, an explicit revision with
891 is merged with by default. Otherwise, an explicit revision with
897 which to merge with must be provided. (destination changeset is not
892 which to merge with must be provided. (destination changeset is not
898 modified by rebasing, but new changesets are added as its
893 modified by rebasing, but new changesets are added as its
899 descendants.)
894 descendants.)
900
895
901 Here are the ways to select changesets:
896 Here are the ways to select changesets:
902
897
903 1. Explicitly select them using ``--rev``.
898 1. Explicitly select them using ``--rev``.
904
899
905 2. Use ``--source`` to select a root changeset and include all of its
900 2. Use ``--source`` to select a root changeset and include all of its
906 descendants.
901 descendants.
907
902
908 3. Use ``--base`` to select a changeset; rebase will find ancestors
903 3. Use ``--base`` to select a changeset; rebase will find ancestors
909 and their descendants which are not also ancestors of the destination.
904 and their descendants which are not also ancestors of the destination.
910
905
911 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
906 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
912 rebase will use ``--base .`` as above.
907 rebase will use ``--base .`` as above.
913
908
914 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
909 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
915 can be used in ``--dest``. Destination would be calculated per source
910 can be used in ``--dest``. Destination would be calculated per source
916 revision with ``SRC`` substituted by that single source revision and
911 revision with ``SRC`` substituted by that single source revision and
917 ``ALLSRC`` substituted by all source revisions.
912 ``ALLSRC`` substituted by all source revisions.
918
913
919 Rebase will destroy original changesets unless you use ``--keep``.
914 Rebase will destroy original changesets unless you use ``--keep``.
920 It will also move your bookmarks (even if you do).
915 It will also move your bookmarks (even if you do).
921
916
922 Some changesets may be dropped if they do not contribute changes
917 Some changesets may be dropped if they do not contribute changes
923 (e.g. merges from the destination branch).
918 (e.g. merges from the destination branch).
924
919
925 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
920 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
926 a named branch with two heads. You will need to explicitly specify source
921 a named branch with two heads. You will need to explicitly specify source
927 and/or destination.
922 and/or destination.
928
923
929 If you need to use a tool to automate merge/conflict decisions, you
924 If you need to use a tool to automate merge/conflict decisions, you
930 can specify one with ``--tool``, see :hg:`help merge-tools`.
925 can specify one with ``--tool``, see :hg:`help merge-tools`.
931 As a caveat: the tool will not be used to mediate when a file was
926 As a caveat: the tool will not be used to mediate when a file was
932 deleted, there is no hook presently available for this.
927 deleted, there is no hook presently available for this.
933
928
934 If a rebase is interrupted to manually resolve a conflict, it can be
929 If a rebase is interrupted to manually resolve a conflict, it can be
935 continued with --continue/-c, aborted with --abort/-a, or stopped with
930 continued with --continue/-c, aborted with --abort/-a, or stopped with
936 --stop.
931 --stop.
937
932
938 .. container:: verbose
933 .. container:: verbose
939
934
940 Examples:
935 Examples:
941
936
942 - move "local changes" (current commit back to branching point)
937 - move "local changes" (current commit back to branching point)
943 to the current branch tip after a pull::
938 to the current branch tip after a pull::
944
939
945 hg rebase
940 hg rebase
946
941
947 - move a single changeset to the stable branch::
942 - move a single changeset to the stable branch::
948
943
949 hg rebase -r 5f493448 -d stable
944 hg rebase -r 5f493448 -d stable
950
945
951 - splice a commit and all its descendants onto another part of history::
946 - splice a commit and all its descendants onto another part of history::
952
947
953 hg rebase --source c0c3 --dest 4cf9
948 hg rebase --source c0c3 --dest 4cf9
954
949
955 - rebase everything on a branch marked by a bookmark onto the
950 - rebase everything on a branch marked by a bookmark onto the
956 default branch::
951 default branch::
957
952
958 hg rebase --base myfeature --dest default
953 hg rebase --base myfeature --dest default
959
954
960 - collapse a sequence of changes into a single commit::
955 - collapse a sequence of changes into a single commit::
961
956
962 hg rebase --collapse -r 1520:1525 -d .
957 hg rebase --collapse -r 1520:1525 -d .
963
958
964 - move a named branch while preserving its name::
959 - move a named branch while preserving its name::
965
960
966 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
961 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
967
962
968 - stabilize orphaned changesets so history looks linear::
963 - stabilize orphaned changesets so history looks linear::
969
964
970 hg rebase -r 'orphan()-obsolete()'\
965 hg rebase -r 'orphan()-obsolete()'\
971 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
966 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
972 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
967 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
973
968
974 Configuration Options:
969 Configuration Options:
975
970
976 You can make rebase require a destination if you set the following config
971 You can make rebase require a destination if you set the following config
977 option::
972 option::
978
973
979 [commands]
974 [commands]
980 rebase.requiredest = True
975 rebase.requiredest = True
981
976
982 By default, rebase will close the transaction after each commit. For
977 By default, rebase will close the transaction after each commit. For
983 performance purposes, you can configure rebase to use a single transaction
978 performance purposes, you can configure rebase to use a single transaction
984 across the entire rebase. WARNING: This setting introduces a significant
979 across the entire rebase. WARNING: This setting introduces a significant
985 risk of losing the work you've done in a rebase if the rebase aborts
980 risk of losing the work you've done in a rebase if the rebase aborts
986 unexpectedly::
981 unexpectedly::
987
982
988 [rebase]
983 [rebase]
989 singletransaction = True
984 singletransaction = True
990
985
991 By default, rebase writes to the working copy, but you can configure it to
986 By default, rebase writes to the working copy, but you can configure it to
992 run in-memory for better performance. When the rebase is not moving the
987 run in-memory for better performance. When the rebase is not moving the
993 parent(s) of the working copy (AKA the "currently checked out changesets"),
988 parent(s) of the working copy (AKA the "currently checked out changesets"),
994 this may also allow it to run even if the working copy is dirty::
989 this may also allow it to run even if the working copy is dirty::
995
990
996 [rebase]
991 [rebase]
997 experimental.inmemory = True
992 experimental.inmemory = True
998
993
999 Return Values:
994 Return Values:
1000
995
1001 Returns 0 on success, 1 if nothing to rebase or there are
996 Returns 0 on success, 1 if nothing to rebase or there are
1002 unresolved conflicts.
997 unresolved conflicts.
1003
998
1004 """
999 """
1005 opts = pycompat.byteskwargs(opts)
1000 opts = pycompat.byteskwargs(opts)
1006 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1001 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1007 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1002 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1008 if action:
1003 if action:
1009 cmdutil.check_incompatible_arguments(
1004 cmdutil.check_incompatible_arguments(
1010 opts, action, [b'confirm', b'dry_run']
1005 opts, action, [b'confirm', b'dry_run']
1011 )
1006 )
1012 cmdutil.check_incompatible_arguments(
1007 cmdutil.check_incompatible_arguments(
1013 opts, action, [b'rev', b'source', b'base', b'dest']
1008 opts, action, [b'rev', b'source', b'base', b'dest']
1014 )
1009 )
1015 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1010 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1016 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1011 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1017
1012
1018 if action or repo.currenttransaction() is not None:
1013 if action or repo.currenttransaction() is not None:
1019 # in-memory rebase is not compatible with resuming rebases.
1014 # in-memory rebase is not compatible with resuming rebases.
1020 # (Or if it is run within a transaction, since the restart logic can
1015 # (Or if it is run within a transaction, since the restart logic can
1021 # fail the entire transaction.)
1016 # fail the entire transaction.)
1022 inmemory = False
1017 inmemory = False
1023
1018
1024 if opts.get(b'auto_orphans'):
1019 if opts.get(b'auto_orphans'):
1025 disallowed_opts = set(opts) - {b'auto_orphans'}
1020 disallowed_opts = set(opts) - {b'auto_orphans'}
1026 cmdutil.check_incompatible_arguments(
1021 cmdutil.check_incompatible_arguments(
1027 opts, b'auto_orphans', disallowed_opts
1022 opts, b'auto_orphans', disallowed_opts
1028 )
1023 )
1029
1024
1030 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1025 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1031 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1026 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1032 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1027 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1033
1028
1034 if opts.get(b'dry_run') or opts.get(b'confirm'):
1029 if opts.get(b'dry_run') or opts.get(b'confirm'):
1035 return _dryrunrebase(ui, repo, action, opts)
1030 return _dryrunrebase(ui, repo, action, opts)
1036 elif action == b'stop':
1031 elif action == b'stop':
1037 rbsrt = rebaseruntime(repo, ui)
1032 rbsrt = rebaseruntime(repo, ui)
1038 with repo.wlock(), repo.lock():
1033 with repo.wlock(), repo.lock():
1039 rbsrt.restorestatus()
1034 rbsrt.restorestatus()
1040 if rbsrt.collapsef:
1035 if rbsrt.collapsef:
1041 raise error.Abort(_(b"cannot stop in --collapse session"))
1036 raise error.Abort(_(b"cannot stop in --collapse session"))
1042 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1037 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1043 if not (rbsrt.keepf or allowunstable):
1038 if not (rbsrt.keepf or allowunstable):
1044 raise error.Abort(
1039 raise error.Abort(
1045 _(
1040 _(
1046 b"cannot remove original changesets with"
1041 b"cannot remove original changesets with"
1047 b" unrebased descendants"
1042 b" unrebased descendants"
1048 ),
1043 ),
1049 hint=_(
1044 hint=_(
1050 b'either enable obsmarkers to allow unstable '
1045 b'either enable obsmarkers to allow unstable '
1051 b'revisions or use --keep to keep original '
1046 b'revisions or use --keep to keep original '
1052 b'changesets'
1047 b'changesets'
1053 ),
1048 ),
1054 )
1049 )
1055 # update to the current working revision
1050 # update to the current working revision
1056 # to clear interrupted merge
1051 # to clear interrupted merge
1057 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1052 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1058 rbsrt._finishrebase()
1053 rbsrt._finishrebase()
1059 return 0
1054 return 0
1060 elif inmemory:
1055 elif inmemory:
1061 try:
1056 try:
1062 # in-memory merge doesn't support conflicts, so if we hit any, abort
1057 # in-memory merge doesn't support conflicts, so if we hit any, abort
1063 # and re-run as an on-disk merge.
1058 # and re-run as an on-disk merge.
1064 overrides = {(b'rebase', b'singletransaction'): True}
1059 overrides = {(b'rebase', b'singletransaction'): True}
1065 with ui.configoverride(overrides, b'rebase'):
1060 with ui.configoverride(overrides, b'rebase'):
1066 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1061 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1067 except error.InMemoryMergeConflictsError:
1062 except error.InMemoryMergeConflictsError:
1068 ui.warn(
1063 ui.warn(
1069 _(
1064 _(
1070 b'hit merge conflicts; re-running rebase without in-memory'
1065 b'hit merge conflicts; re-running rebase without in-memory'
1071 b' merge\n'
1066 b' merge\n'
1072 )
1067 )
1073 )
1068 )
1074 # TODO: Make in-memory merge not use the on-disk merge state, so
1069 # TODO: Make in-memory merge not use the on-disk merge state, so
1075 # we don't have to clean it here
1070 # we don't have to clean it here
1076 mergemod.mergestate.clean(repo)
1071 mergemod.mergestate.clean(repo)
1077 clearstatus(repo)
1072 clearstatus(repo)
1078 clearcollapsemsg(repo)
1073 clearcollapsemsg(repo)
1079 return _dorebase(ui, repo, action, opts, inmemory=False)
1074 return _dorebase(ui, repo, action, opts, inmemory=False)
1080 else:
1075 else:
1081 return _dorebase(ui, repo, action, opts)
1076 return _dorebase(ui, repo, action, opts)
1082
1077
1083
1078
1084 def _dryrunrebase(ui, repo, action, opts):
1079 def _dryrunrebase(ui, repo, action, opts):
1085 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
1080 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
1086 confirm = opts.get(b'confirm')
1081 confirm = opts.get(b'confirm')
1087 if confirm:
1082 if confirm:
1088 ui.status(_(b'starting in-memory rebase\n'))
1083 ui.status(_(b'starting in-memory rebase\n'))
1089 else:
1084 else:
1090 ui.status(
1085 ui.status(
1091 _(b'starting dry-run rebase; repository will not be changed\n')
1086 _(b'starting dry-run rebase; repository will not be changed\n')
1092 )
1087 )
1093 with repo.wlock(), repo.lock():
1088 with repo.wlock(), repo.lock():
1094 needsabort = True
1089 needsabort = True
1095 try:
1090 try:
1096 overrides = {(b'rebase', b'singletransaction'): True}
1091 overrides = {(b'rebase', b'singletransaction'): True}
1097 with ui.configoverride(overrides, b'rebase'):
1092 with ui.configoverride(overrides, b'rebase'):
1098 _origrebase(
1093 _origrebase(
1099 ui,
1094 ui,
1100 repo,
1095 repo,
1101 action,
1096 action,
1102 opts,
1097 opts,
1103 rbsrt,
1098 rbsrt,
1104 inmemory=True,
1099 inmemory=True,
1105 leaveunfinished=True,
1100 leaveunfinished=True,
1106 )
1101 )
1107 except error.InMemoryMergeConflictsError:
1102 except error.InMemoryMergeConflictsError:
1108 ui.status(_(b'hit a merge conflict\n'))
1103 ui.status(_(b'hit a merge conflict\n'))
1109 return 1
1104 return 1
1110 except error.Abort:
1105 except error.Abort:
1111 needsabort = False
1106 needsabort = False
1112 raise
1107 raise
1113 else:
1108 else:
1114 if confirm:
1109 if confirm:
1115 ui.status(_(b'rebase completed successfully\n'))
1110 ui.status(_(b'rebase completed successfully\n'))
1116 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1111 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1117 # finish unfinished rebase
1112 # finish unfinished rebase
1118 rbsrt._finishrebase()
1113 rbsrt._finishrebase()
1119 else:
1114 else:
1120 rbsrt._prepareabortorcontinue(
1115 rbsrt._prepareabortorcontinue(
1121 isabort=True, backup=False, suppwarns=True
1116 isabort=True, backup=False, suppwarns=True
1122 )
1117 )
1123 needsabort = False
1118 needsabort = False
1124 else:
1119 else:
1125 ui.status(
1120 ui.status(
1126 _(
1121 _(
1127 b'dry-run rebase completed successfully; run without'
1122 b'dry-run rebase completed successfully; run without'
1128 b' -n/--dry-run to perform this rebase\n'
1123 b' -n/--dry-run to perform this rebase\n'
1129 )
1124 )
1130 )
1125 )
1131 return 0
1126 return 0
1132 finally:
1127 finally:
1133 if needsabort:
1128 if needsabort:
1134 # no need to store backup in case of dryrun
1129 # no need to store backup in case of dryrun
1135 rbsrt._prepareabortorcontinue(
1130 rbsrt._prepareabortorcontinue(
1136 isabort=True, backup=False, suppwarns=True
1131 isabort=True, backup=False, suppwarns=True
1137 )
1132 )
1138
1133
1139
1134
1140 def _dorebase(ui, repo, action, opts, inmemory=False):
1135 def _dorebase(ui, repo, action, opts, inmemory=False):
1141 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
1136 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
1142 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
1137 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
1143
1138
1144
1139
1145 def _origrebase(
1140 def _origrebase(
1146 ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
1141 ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
1147 ):
1142 ):
1148 assert action != b'stop'
1143 assert action != b'stop'
1149 with repo.wlock(), repo.lock():
1144 with repo.wlock(), repo.lock():
1150 if opts.get(b'interactive'):
1145 if opts.get(b'interactive'):
1151 try:
1146 try:
1152 if extensions.find(b'histedit'):
1147 if extensions.find(b'histedit'):
1153 enablehistedit = b''
1148 enablehistedit = b''
1154 except KeyError:
1149 except KeyError:
1155 enablehistedit = b" --config extensions.histedit="
1150 enablehistedit = b" --config extensions.histedit="
1156 help = b"hg%s help -e histedit" % enablehistedit
1151 help = b"hg%s help -e histedit" % enablehistedit
1157 msg = (
1152 msg = (
1158 _(
1153 _(
1159 b"interactive history editing is supported by the "
1154 b"interactive history editing is supported by the "
1160 b"'histedit' extension (see \"%s\")"
1155 b"'histedit' extension (see \"%s\")"
1161 )
1156 )
1162 % help
1157 % help
1163 )
1158 )
1164 raise error.Abort(msg)
1159 raise error.Abort(msg)
1165
1160
1166 if rbsrt.collapsemsg and not rbsrt.collapsef:
1161 if rbsrt.collapsemsg and not rbsrt.collapsef:
1167 raise error.Abort(_(b'message can only be specified with collapse'))
1162 raise error.Abort(_(b'message can only be specified with collapse'))
1168
1163
1169 if action:
1164 if action:
1170 if rbsrt.collapsef:
1165 if rbsrt.collapsef:
1171 raise error.Abort(
1166 raise error.Abort(
1172 _(b'cannot use collapse with continue or abort')
1167 _(b'cannot use collapse with continue or abort')
1173 )
1168 )
1174 if action == b'abort' and opts.get(b'tool', False):
1169 if action == b'abort' and opts.get(b'tool', False):
1175 ui.warn(_(b'tool option will be ignored\n'))
1170 ui.warn(_(b'tool option will be ignored\n'))
1176 if action == b'continue':
1171 if action == b'continue':
1177 ms = mergemod.mergestate.read(repo)
1172 ms = mergemod.mergestate.read(repo)
1178 mergeutil.checkunresolved(ms)
1173 mergeutil.checkunresolved(ms)
1179
1174
1180 retcode = rbsrt._prepareabortorcontinue(
1175 retcode = rbsrt._prepareabortorcontinue(
1181 isabort=(action == b'abort')
1176 isabort=(action == b'abort')
1182 )
1177 )
1183 if retcode is not None:
1178 if retcode is not None:
1184 return retcode
1179 return retcode
1185 else:
1180 else:
1186 # search default destination in this space
1181 # search default destination in this space
1187 # used in the 'hg pull --rebase' case, see issue 5214.
1182 # used in the 'hg pull --rebase' case, see issue 5214.
1188 destspace = opts.get(b'_destspace')
1183 destspace = opts.get(b'_destspace')
1189 destmap = _definedestmap(
1184 destmap = _definedestmap(
1190 ui,
1185 ui,
1191 repo,
1186 repo,
1192 inmemory,
1187 inmemory,
1193 opts.get(b'dest', None),
1188 opts.get(b'dest', None),
1194 opts.get(b'source', None),
1189 opts.get(b'source', None),
1195 opts.get(b'base', None),
1190 opts.get(b'base', None),
1196 opts.get(b'rev', []),
1191 opts.get(b'rev', []),
1197 destspace=destspace,
1192 destspace=destspace,
1198 )
1193 )
1199 retcode = rbsrt._preparenewrebase(destmap)
1194 retcode = rbsrt._preparenewrebase(destmap)
1200 if retcode is not None:
1195 if retcode is not None:
1201 return retcode
1196 return retcode
1202 storecollapsemsg(repo, rbsrt.collapsemsg)
1197 storecollapsemsg(repo, rbsrt.collapsemsg)
1203
1198
1204 tr = None
1199 tr = None
1205
1200
1206 singletr = ui.configbool(b'rebase', b'singletransaction')
1201 singletr = ui.configbool(b'rebase', b'singletransaction')
1207 if singletr:
1202 if singletr:
1208 tr = repo.transaction(b'rebase')
1203 tr = repo.transaction(b'rebase')
1209
1204
1210 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1205 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1211 # one transaction here. Otherwise, transactions are obtained when
1206 # one transaction here. Otherwise, transactions are obtained when
1212 # committing each node, which is slower but allows partial success.
1207 # committing each node, which is slower but allows partial success.
1213 with util.acceptintervention(tr):
1208 with util.acceptintervention(tr):
1214 # Same logic for the dirstate guard, except we don't create one when
1209 # Same logic for the dirstate guard, except we don't create one when
1215 # rebasing in-memory (it's not needed).
1210 # rebasing in-memory (it's not needed).
1216 dsguard = None
1211 dsguard = None
1217 if singletr and not inmemory:
1212 if singletr and not inmemory:
1218 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1213 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1219 with util.acceptintervention(dsguard):
1214 with util.acceptintervention(dsguard):
1220 rbsrt._performrebase(tr)
1215 rbsrt._performrebase(tr)
1221 if not leaveunfinished:
1216 if not leaveunfinished:
1222 rbsrt._finishrebase()
1217 rbsrt._finishrebase()
1223
1218
1224
1219
1225 def _definedestmap(
1220 def _definedestmap(
1226 ui,
1221 ui,
1227 repo,
1222 repo,
1228 inmemory,
1223 inmemory,
1229 destf=None,
1224 destf=None,
1230 srcf=None,
1225 srcf=None,
1231 basef=None,
1226 basef=None,
1232 revf=None,
1227 revf=None,
1233 destspace=None,
1228 destspace=None,
1234 ):
1229 ):
1235 """use revisions argument to define destmap {srcrev: destrev}"""
1230 """use revisions argument to define destmap {srcrev: destrev}"""
1236 if revf is None:
1231 if revf is None:
1237 revf = []
1232 revf = []
1238
1233
1239 # destspace is here to work around issues with `hg pull --rebase` see
1234 # destspace is here to work around issues with `hg pull --rebase` see
1240 # issue5214 for details
1235 # issue5214 for details
1241
1236
1242 cmdutil.checkunfinished(repo)
1237 cmdutil.checkunfinished(repo)
1243 if not inmemory:
1238 if not inmemory:
1244 cmdutil.bailifchanged(repo)
1239 cmdutil.bailifchanged(repo)
1245
1240
1246 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1241 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1247 raise error.Abort(
1242 raise error.Abort(
1248 _(b'you must specify a destination'),
1243 _(b'you must specify a destination'),
1249 hint=_(b'use: hg rebase -d REV'),
1244 hint=_(b'use: hg rebase -d REV'),
1250 )
1245 )
1251
1246
1252 dest = None
1247 dest = None
1253
1248
1254 if revf:
1249 if revf:
1255 rebaseset = scmutil.revrange(repo, revf)
1250 rebaseset = scmutil.revrange(repo, revf)
1256 if not rebaseset:
1251 if not rebaseset:
1257 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1252 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1258 return None
1253 return None
1259 elif srcf:
1254 elif srcf:
1260 src = scmutil.revrange(repo, [srcf])
1255 src = scmutil.revrange(repo, [srcf])
1261 if not src:
1256 if not src:
1262 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1257 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1263 return None
1258 return None
1264 rebaseset = repo.revs(b'(%ld)::', src) or src
1259 rebaseset = repo.revs(b'(%ld)::', src) or src
1265 else:
1260 else:
1266 base = scmutil.revrange(repo, [basef or b'.'])
1261 base = scmutil.revrange(repo, [basef or b'.'])
1267 if not base:
1262 if not base:
1268 ui.status(
1263 ui.status(
1269 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1264 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1270 )
1265 )
1271 return None
1266 return None
1272 if destf:
1267 if destf:
1273 # --base does not support multiple destinations
1268 # --base does not support multiple destinations
1274 dest = scmutil.revsingle(repo, destf)
1269 dest = scmutil.revsingle(repo, destf)
1275 else:
1270 else:
1276 dest = repo[_destrebase(repo, base, destspace=destspace)]
1271 dest = repo[_destrebase(repo, base, destspace=destspace)]
1277 destf = bytes(dest)
1272 destf = bytes(dest)
1278
1273
1279 roots = [] # selected children of branching points
1274 roots = [] # selected children of branching points
1280 bpbase = {} # {branchingpoint: [origbase]}
1275 bpbase = {} # {branchingpoint: [origbase]}
1281 for b in base: # group bases by branching points
1276 for b in base: # group bases by branching points
1282 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1277 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1283 bpbase[bp] = bpbase.get(bp, []) + [b]
1278 bpbase[bp] = bpbase.get(bp, []) + [b]
1284 if None in bpbase:
1279 if None in bpbase:
1285 # emulate the old behavior, showing "nothing to rebase" (a better
1280 # emulate the old behavior, showing "nothing to rebase" (a better
1286 # behavior may be abort with "cannot find branching point" error)
1281 # behavior may be abort with "cannot find branching point" error)
1287 bpbase.clear()
1282 bpbase.clear()
1288 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1283 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1289 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1284 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1290
1285
1291 rebaseset = repo.revs(b'%ld::', roots)
1286 rebaseset = repo.revs(b'%ld::', roots)
1292
1287
1293 if not rebaseset:
1288 if not rebaseset:
1294 # transform to list because smartsets are not comparable to
1289 # transform to list because smartsets are not comparable to
1295 # lists. This should be improved to honor laziness of
1290 # lists. This should be improved to honor laziness of
1296 # smartset.
1291 # smartset.
1297 if list(base) == [dest.rev()]:
1292 if list(base) == [dest.rev()]:
1298 if basef:
1293 if basef:
1299 ui.status(
1294 ui.status(
1300 _(
1295 _(
1301 b'nothing to rebase - %s is both "base"'
1296 b'nothing to rebase - %s is both "base"'
1302 b' and destination\n'
1297 b' and destination\n'
1303 )
1298 )
1304 % dest
1299 % dest
1305 )
1300 )
1306 else:
1301 else:
1307 ui.status(
1302 ui.status(
1308 _(
1303 _(
1309 b'nothing to rebase - working directory '
1304 b'nothing to rebase - working directory '
1310 b'parent is also destination\n'
1305 b'parent is also destination\n'
1311 )
1306 )
1312 )
1307 )
1313 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1308 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1314 if basef:
1309 if basef:
1315 ui.status(
1310 ui.status(
1316 _(
1311 _(
1317 b'nothing to rebase - "base" %s is '
1312 b'nothing to rebase - "base" %s is '
1318 b'already an ancestor of destination '
1313 b'already an ancestor of destination '
1319 b'%s\n'
1314 b'%s\n'
1320 )
1315 )
1321 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1316 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1322 )
1317 )
1323 else:
1318 else:
1324 ui.status(
1319 ui.status(
1325 _(
1320 _(
1326 b'nothing to rebase - working '
1321 b'nothing to rebase - working '
1327 b'directory parent is already an '
1322 b'directory parent is already an '
1328 b'ancestor of destination %s\n'
1323 b'ancestor of destination %s\n'
1329 )
1324 )
1330 % dest
1325 % dest
1331 )
1326 )
1332 else: # can it happen?
1327 else: # can it happen?
1333 ui.status(
1328 ui.status(
1334 _(b'nothing to rebase from %s to %s\n')
1329 _(b'nothing to rebase from %s to %s\n')
1335 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1330 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1336 )
1331 )
1337 return None
1332 return None
1338
1333
1339 if nodemod.wdirrev in rebaseset:
1334 if nodemod.wdirrev in rebaseset:
1340 raise error.Abort(_(b'cannot rebase the working copy'))
1335 raise error.Abort(_(b'cannot rebase the working copy'))
1341 rebasingwcp = repo[b'.'].rev() in rebaseset
1336 rebasingwcp = repo[b'.'].rev() in rebaseset
1342 ui.log(
1337 ui.log(
1343 b"rebase",
1338 b"rebase",
1344 b"rebasing working copy parent: %r\n",
1339 b"rebasing working copy parent: %r\n",
1345 rebasingwcp,
1340 rebasingwcp,
1346 rebase_rebasing_wcp=rebasingwcp,
1341 rebase_rebasing_wcp=rebasingwcp,
1347 )
1342 )
1348 if inmemory and rebasingwcp:
1343 if inmemory and rebasingwcp:
1349 # Check these since we did not before.
1344 # Check these since we did not before.
1350 cmdutil.checkunfinished(repo)
1345 cmdutil.checkunfinished(repo)
1351 cmdutil.bailifchanged(repo)
1346 cmdutil.bailifchanged(repo)
1352
1347
1353 if not destf:
1348 if not destf:
1354 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1349 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1355 destf = bytes(dest)
1350 destf = bytes(dest)
1356
1351
1357 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1352 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1358 alias = {b'ALLSRC': allsrc}
1353 alias = {b'ALLSRC': allsrc}
1359
1354
1360 if dest is None:
1355 if dest is None:
1361 try:
1356 try:
1362 # fast path: try to resolve dest without SRC alias
1357 # fast path: try to resolve dest without SRC alias
1363 dest = scmutil.revsingle(repo, destf, localalias=alias)
1358 dest = scmutil.revsingle(repo, destf, localalias=alias)
1364 except error.RepoLookupError:
1359 except error.RepoLookupError:
1365 # multi-dest path: resolve dest for each SRC separately
1360 # multi-dest path: resolve dest for each SRC separately
1366 destmap = {}
1361 destmap = {}
1367 for r in rebaseset:
1362 for r in rebaseset:
1368 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1363 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1369 # use repo.anyrevs instead of scmutil.revsingle because we
1364 # use repo.anyrevs instead of scmutil.revsingle because we
1370 # don't want to abort if destset is empty.
1365 # don't want to abort if destset is empty.
1371 destset = repo.anyrevs([destf], user=True, localalias=alias)
1366 destset = repo.anyrevs([destf], user=True, localalias=alias)
1372 size = len(destset)
1367 size = len(destset)
1373 if size == 1:
1368 if size == 1:
1374 destmap[r] = destset.first()
1369 destmap[r] = destset.first()
1375 elif size == 0:
1370 elif size == 0:
1376 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1371 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1377 else:
1372 else:
1378 raise error.Abort(
1373 raise error.Abort(
1379 _(b'rebase destination for %s is not unique') % repo[r]
1374 _(b'rebase destination for %s is not unique') % repo[r]
1380 )
1375 )
1381
1376
1382 if dest is not None:
1377 if dest is not None:
1383 # single-dest case: assign dest to each rev in rebaseset
1378 # single-dest case: assign dest to each rev in rebaseset
1384 destrev = dest.rev()
1379 destrev = dest.rev()
1385 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1380 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1386
1381
1387 if not destmap:
1382 if not destmap:
1388 ui.status(_(b'nothing to rebase - empty destination\n'))
1383 ui.status(_(b'nothing to rebase - empty destination\n'))
1389 return None
1384 return None
1390
1385
1391 return destmap
1386 return destmap
1392
1387
1393
1388
1394 def externalparent(repo, state, destancestors):
1389 def externalparent(repo, state, destancestors):
1395 """Return the revision that should be used as the second parent
1390 """Return the revision that should be used as the second parent
1396 when the revisions in state is collapsed on top of destancestors.
1391 when the revisions in state is collapsed on top of destancestors.
1397 Abort if there is more than one parent.
1392 Abort if there is more than one parent.
1398 """
1393 """
1399 parents = set()
1394 parents = set()
1400 source = min(state)
1395 source = min(state)
1401 for rev in state:
1396 for rev in state:
1402 if rev == source:
1397 if rev == source:
1403 continue
1398 continue
1404 for p in repo[rev].parents():
1399 for p in repo[rev].parents():
1405 if p.rev() not in state and p.rev() not in destancestors:
1400 if p.rev() not in state and p.rev() not in destancestors:
1406 parents.add(p.rev())
1401 parents.add(p.rev())
1407 if not parents:
1402 if not parents:
1408 return nullrev
1403 return nullrev
1409 if len(parents) == 1:
1404 if len(parents) == 1:
1410 return parents.pop()
1405 return parents.pop()
1411 raise error.Abort(
1406 raise error.Abort(
1412 _(
1407 _(
1413 b'unable to collapse on top of %d, there is more '
1408 b'unable to collapse on top of %d, there is more '
1414 b'than one external parent: %s'
1409 b'than one external parent: %s'
1415 )
1410 )
1416 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1411 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1417 )
1412 )
1418
1413
1419
1414
1420 def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
1415 def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
1421 '''Commit the memory changes with parents p1 and p2.
1416 '''Commit the memory changes with parents p1 and p2.
1422 Return node of committed revision.'''
1417 Return node of committed revision.'''
1423 # Replicates the empty check in ``repo.commit``.
1418 # Replicates the empty check in ``repo.commit``.
1424 if wctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1419 if wctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1425 return None
1420 return None
1426
1421
1427 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1422 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1428 # ``branch`` (used when passing ``--keepbranches``).
1423 # ``branch`` (used when passing ``--keepbranches``).
1429 branch = None
1424 branch = None
1430 if b'branch' in extra:
1425 if b'branch' in extra:
1431 branch = extra[b'branch']
1426 branch = extra[b'branch']
1432
1427
1433 memctx = wctx.tomemctx(
1428 memctx = wctx.tomemctx(
1434 commitmsg,
1429 commitmsg,
1435 date=date,
1430 date=date,
1436 extra=extra,
1431 extra=extra,
1437 user=user,
1432 user=user,
1438 branch=branch,
1433 branch=branch,
1439 editor=editor,
1434 editor=editor,
1440 )
1435 )
1441 commitres = repo.commitctx(memctx)
1436 commitres = repo.commitctx(memctx)
1442 wctx.clean() # Might be reused
1437 wctx.clean() # Might be reused
1443 return commitres
1438 return commitres
1444
1439
1445
1440
1446 def commitnode(repo, editor, extra, user, date, commitmsg):
1441 def commitnode(repo, editor, extra, user, date, commitmsg):
1447 '''Commit the wd changes with parents p1 and p2.
1442 '''Commit the wd changes with parents p1 and p2.
1448 Return node of committed revision.'''
1443 Return node of committed revision.'''
1449 dsguard = util.nullcontextmanager()
1444 dsguard = util.nullcontextmanager()
1450 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1445 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1451 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1446 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1452 with dsguard:
1447 with dsguard:
1453 # Commit might fail if unresolved files exist
1448 # Commit might fail if unresolved files exist
1454 newnode = repo.commit(
1449 newnode = repo.commit(
1455 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1450 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1456 )
1451 )
1457
1452
1458 repo.dirstate.setbranch(repo[newnode].branch())
1453 repo.dirstate.setbranch(repo[newnode].branch())
1459 return newnode
1454 return newnode
1460
1455
1461
1456
1462 def rebasenode(repo, rev, p1, p2, base, collapse, dest, wctx):
1457 def rebasenode(repo, rev, p1, p2, base, collapse, dest, wctx):
1463 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1458 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1464 # Merge phase
1459 # Merge phase
1465 # Update to destination and merge it with local
1460 # Update to destination and merge it with local
1466 p1ctx = repo[p1]
1461 p1ctx = repo[p1]
1467 if wctx.isinmemory():
1462 if wctx.isinmemory():
1468 wctx.setbase(p1ctx)
1463 wctx.setbase(p1ctx)
1469 else:
1464 else:
1470 if repo[b'.'].rev() != p1:
1465 if repo[b'.'].rev() != p1:
1471 repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
1466 repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
1472 mergemod.clean_update(p1ctx)
1467 mergemod.clean_update(p1ctx)
1473 else:
1468 else:
1474 repo.ui.debug(b" already in destination\n")
1469 repo.ui.debug(b" already in destination\n")
1475 # This is, alas, necessary to invalidate workingctx's manifest cache,
1470 # This is, alas, necessary to invalidate workingctx's manifest cache,
1476 # as well as other data we litter on it in other places.
1471 # as well as other data we litter on it in other places.
1477 wctx = repo[None]
1472 wctx = repo[None]
1478 repo.dirstate.write(repo.currenttransaction())
1473 repo.dirstate.write(repo.currenttransaction())
1479 ctx = repo[rev]
1474 ctx = repo[rev]
1480 repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx))
1475 repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx))
1481 if base is not None:
1476 if base is not None:
1482 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1477 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1483
1478
1484 # See explanation in merge.graft()
1479 # See explanation in merge.graft()
1485 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1480 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1486 stats = mergemod.update(
1481 stats = mergemod.update(
1487 repo,
1482 repo,
1488 rev,
1483 rev,
1489 branchmerge=True,
1484 branchmerge=True,
1490 force=True,
1485 force=True,
1491 ancestor=base,
1486 ancestor=base,
1492 mergeancestor=mergeancestor,
1487 mergeancestor=mergeancestor,
1493 labels=[b'dest', b'source'],
1488 labels=[b'dest', b'source'],
1494 wc=wctx,
1489 wc=wctx,
1495 )
1490 )
1496 wctx.setparents(p1ctx.node(), repo[p2].node())
1491 wctx.setparents(p1ctx.node(), repo[p2].node())
1497 if collapse:
1492 if collapse:
1498 copies.graftcopies(wctx, ctx, repo[dest])
1493 copies.graftcopies(wctx, ctx, repo[dest])
1499 else:
1494 else:
1500 # If we're not using --collapse, we need to
1495 # If we're not using --collapse, we need to
1501 # duplicate copies between the revision we're
1496 # duplicate copies between the revision we're
1502 # rebasing and its first parent.
1497 # rebasing and its first parent.
1503 copies.graftcopies(wctx, ctx, ctx.p1())
1498 copies.graftcopies(wctx, ctx, ctx.p1())
1504 return stats
1499 return stats
1505
1500
1506
1501
1507 def adjustdest(repo, rev, destmap, state, skipped):
1502 def adjustdest(repo, rev, destmap, state, skipped):
1508 r"""adjust rebase destination given the current rebase state
1503 r"""adjust rebase destination given the current rebase state
1509
1504
1510 rev is what is being rebased. Return a list of two revs, which are the
1505 rev is what is being rebased. Return a list of two revs, which are the
1511 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1506 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1512 nullrev, return dest without adjustment for it.
1507 nullrev, return dest without adjustment for it.
1513
1508
1514 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1509 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1515 to B1, and E's destination will be adjusted from F to B1.
1510 to B1, and E's destination will be adjusted from F to B1.
1516
1511
1517 B1 <- written during rebasing B
1512 B1 <- written during rebasing B
1518 |
1513 |
1519 F <- original destination of B, E
1514 F <- original destination of B, E
1520 |
1515 |
1521 | E <- rev, which is being rebased
1516 | E <- rev, which is being rebased
1522 | |
1517 | |
1523 | D <- prev, one parent of rev being checked
1518 | D <- prev, one parent of rev being checked
1524 | |
1519 | |
1525 | x <- skipped, ex. no successor or successor in (::dest)
1520 | x <- skipped, ex. no successor or successor in (::dest)
1526 | |
1521 | |
1527 | C <- rebased as C', different destination
1522 | C <- rebased as C', different destination
1528 | |
1523 | |
1529 | B <- rebased as B1 C'
1524 | B <- rebased as B1 C'
1530 |/ |
1525 |/ |
1531 A G <- destination of C, different
1526 A G <- destination of C, different
1532
1527
1533 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1528 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1534 first move C to C1, G to G1, and when it's checking H, the adjusted
1529 first move C to C1, G to G1, and when it's checking H, the adjusted
1535 destinations will be [C1, G1].
1530 destinations will be [C1, G1].
1536
1531
1537 H C1 G1
1532 H C1 G1
1538 /| | /
1533 /| | /
1539 F G |/
1534 F G |/
1540 K | | -> K
1535 K | | -> K
1541 | C D |
1536 | C D |
1542 | |/ |
1537 | |/ |
1543 | B | ...
1538 | B | ...
1544 |/ |/
1539 |/ |/
1545 A A
1540 A A
1546
1541
1547 Besides, adjust dest according to existing rebase information. For example,
1542 Besides, adjust dest according to existing rebase information. For example,
1548
1543
1549 B C D B needs to be rebased on top of C, C needs to be rebased on top
1544 B C D B needs to be rebased on top of C, C needs to be rebased on top
1550 \|/ of D. We will rebase C first.
1545 \|/ of D. We will rebase C first.
1551 A
1546 A
1552
1547
1553 C' After rebasing C, when considering B's destination, use C'
1548 C' After rebasing C, when considering B's destination, use C'
1554 | instead of the original C.
1549 | instead of the original C.
1555 B D
1550 B D
1556 \ /
1551 \ /
1557 A
1552 A
1558 """
1553 """
1559 # pick already rebased revs with same dest from state as interesting source
1554 # pick already rebased revs with same dest from state as interesting source
1560 dest = destmap[rev]
1555 dest = destmap[rev]
1561 source = [
1556 source = [
1562 s
1557 s
1563 for s, d in state.items()
1558 for s, d in state.items()
1564 if d > 0 and destmap[s] == dest and s not in skipped
1559 if d > 0 and destmap[s] == dest and s not in skipped
1565 ]
1560 ]
1566
1561
1567 result = []
1562 result = []
1568 for prev in repo.changelog.parentrevs(rev):
1563 for prev in repo.changelog.parentrevs(rev):
1569 adjusted = dest
1564 adjusted = dest
1570 if prev != nullrev:
1565 if prev != nullrev:
1571 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1566 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1572 if candidate is not None:
1567 if candidate is not None:
1573 adjusted = state[candidate]
1568 adjusted = state[candidate]
1574 if adjusted == dest and dest in state:
1569 if adjusted == dest and dest in state:
1575 adjusted = state[dest]
1570 adjusted = state[dest]
1576 if adjusted == revtodo:
1571 if adjusted == revtodo:
1577 # sortsource should produce an order that makes this impossible
1572 # sortsource should produce an order that makes this impossible
1578 raise error.ProgrammingError(
1573 raise error.ProgrammingError(
1579 b'rev %d should be rebased already at this time' % dest
1574 b'rev %d should be rebased already at this time' % dest
1580 )
1575 )
1581 result.append(adjusted)
1576 result.append(adjusted)
1582 return result
1577 return result
1583
1578
1584
1579
1585 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1580 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1586 """
1581 """
1587 Abort if rebase will create divergence or rebase is noop because of markers
1582 Abort if rebase will create divergence or rebase is noop because of markers
1588
1583
1589 `rebaseobsrevs`: set of obsolete revision in source
1584 `rebaseobsrevs`: set of obsolete revision in source
1590 `rebaseobsskipped`: set of revisions from source skipped because they have
1585 `rebaseobsskipped`: set of revisions from source skipped because they have
1591 successors in destination or no non-obsolete successor.
1586 successors in destination or no non-obsolete successor.
1592 """
1587 """
1593 # Obsolete node with successors not in dest leads to divergence
1588 # Obsolete node with successors not in dest leads to divergence
1594 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1589 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1595 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1590 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1596
1591
1597 if divergencebasecandidates and not divergenceok:
1592 if divergencebasecandidates and not divergenceok:
1598 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1593 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1599 msg = _(b"this rebase will cause divergences from: %s")
1594 msg = _(b"this rebase will cause divergences from: %s")
1600 h = _(
1595 h = _(
1601 b"to force the rebase please set "
1596 b"to force the rebase please set "
1602 b"experimental.evolution.allowdivergence=True"
1597 b"experimental.evolution.allowdivergence=True"
1603 )
1598 )
1604 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1599 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1605
1600
1606
1601
1607 def successorrevs(unfi, rev):
1602 def successorrevs(unfi, rev):
1608 """yield revision numbers for successors of rev"""
1603 """yield revision numbers for successors of rev"""
1609 assert unfi.filtername is None
1604 assert unfi.filtername is None
1610 get_rev = unfi.changelog.index.get_rev
1605 get_rev = unfi.changelog.index.get_rev
1611 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1606 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1612 r = get_rev(s)
1607 r = get_rev(s)
1613 if r is not None:
1608 if r is not None:
1614 yield r
1609 yield r
1615
1610
1616
1611
1617 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1612 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1618 """Return new parents and optionally a merge base for rev being rebased
1613 """Return new parents and optionally a merge base for rev being rebased
1619
1614
1620 The destination specified by "dest" cannot always be used directly because
1615 The destination specified by "dest" cannot always be used directly because
1621 previously rebase result could affect destination. For example,
1616 previously rebase result could affect destination. For example,
1622
1617
1623 D E rebase -r C+D+E -d B
1618 D E rebase -r C+D+E -d B
1624 |/ C will be rebased to C'
1619 |/ C will be rebased to C'
1625 B C D's new destination will be C' instead of B
1620 B C D's new destination will be C' instead of B
1626 |/ E's new destination will be C' instead of B
1621 |/ E's new destination will be C' instead of B
1627 A
1622 A
1628
1623
1629 The new parents of a merge is slightly more complicated. See the comment
1624 The new parents of a merge is slightly more complicated. See the comment
1630 block below.
1625 block below.
1631 """
1626 """
1632 # use unfiltered changelog since successorrevs may return filtered nodes
1627 # use unfiltered changelog since successorrevs may return filtered nodes
1633 assert repo.filtername is None
1628 assert repo.filtername is None
1634 cl = repo.changelog
1629 cl = repo.changelog
1635 isancestor = cl.isancestorrev
1630 isancestor = cl.isancestorrev
1636
1631
1637 dest = destmap[rev]
1632 dest = destmap[rev]
1638 oldps = repo.changelog.parentrevs(rev) # old parents
1633 oldps = repo.changelog.parentrevs(rev) # old parents
1639 newps = [nullrev, nullrev] # new parents
1634 newps = [nullrev, nullrev] # new parents
1640 dests = adjustdest(repo, rev, destmap, state, skipped)
1635 dests = adjustdest(repo, rev, destmap, state, skipped)
1641 bases = list(oldps) # merge base candidates, initially just old parents
1636 bases = list(oldps) # merge base candidates, initially just old parents
1642
1637
1643 if all(r == nullrev for r in oldps[1:]):
1638 if all(r == nullrev for r in oldps[1:]):
1644 # For non-merge changeset, just move p to adjusted dest as requested.
1639 # For non-merge changeset, just move p to adjusted dest as requested.
1645 newps[0] = dests[0]
1640 newps[0] = dests[0]
1646 else:
1641 else:
1647 # For merge changeset, if we move p to dests[i] unconditionally, both
1642 # For merge changeset, if we move p to dests[i] unconditionally, both
1648 # parents may change and the end result looks like "the merge loses a
1643 # parents may change and the end result looks like "the merge loses a
1649 # parent", which is a surprise. This is a limit because "--dest" only
1644 # parent", which is a surprise. This is a limit because "--dest" only
1650 # accepts one dest per src.
1645 # accepts one dest per src.
1651 #
1646 #
1652 # Therefore, only move p with reasonable conditions (in this order):
1647 # Therefore, only move p with reasonable conditions (in this order):
1653 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1648 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1654 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1649 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1655 #
1650 #
1656 # Comparing with adjustdest, the logic here does some additional work:
1651 # Comparing with adjustdest, the logic here does some additional work:
1657 # 1. decide which parents will not be moved towards dest
1652 # 1. decide which parents will not be moved towards dest
1658 # 2. if the above decision is "no", should a parent still be moved
1653 # 2. if the above decision is "no", should a parent still be moved
1659 # because it was rebased?
1654 # because it was rebased?
1660 #
1655 #
1661 # For example:
1656 # For example:
1662 #
1657 #
1663 # C # "rebase -r C -d D" is an error since none of the parents
1658 # C # "rebase -r C -d D" is an error since none of the parents
1664 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1659 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1665 # A B D # B (using rule "2."), since B will be rebased.
1660 # A B D # B (using rule "2."), since B will be rebased.
1666 #
1661 #
1667 # The loop tries to be not rely on the fact that a Mercurial node has
1662 # The loop tries to be not rely on the fact that a Mercurial node has
1668 # at most 2 parents.
1663 # at most 2 parents.
1669 for i, p in enumerate(oldps):
1664 for i, p in enumerate(oldps):
1670 np = p # new parent
1665 np = p # new parent
1671 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1666 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1672 np = dests[i]
1667 np = dests[i]
1673 elif p in state and state[p] > 0:
1668 elif p in state and state[p] > 0:
1674 np = state[p]
1669 np = state[p]
1675
1670
1676 # If one parent becomes an ancestor of the other, drop the ancestor
1671 # If one parent becomes an ancestor of the other, drop the ancestor
1677 for j, x in enumerate(newps[:i]):
1672 for j, x in enumerate(newps[:i]):
1678 if x == nullrev:
1673 if x == nullrev:
1679 continue
1674 continue
1680 if isancestor(np, x): # CASE-1
1675 if isancestor(np, x): # CASE-1
1681 np = nullrev
1676 np = nullrev
1682 elif isancestor(x, np): # CASE-2
1677 elif isancestor(x, np): # CASE-2
1683 newps[j] = np
1678 newps[j] = np
1684 np = nullrev
1679 np = nullrev
1685 # New parents forming an ancestor relationship does not
1680 # New parents forming an ancestor relationship does not
1686 # mean the old parents have a similar relationship. Do not
1681 # mean the old parents have a similar relationship. Do not
1687 # set bases[x] to nullrev.
1682 # set bases[x] to nullrev.
1688 bases[j], bases[i] = bases[i], bases[j]
1683 bases[j], bases[i] = bases[i], bases[j]
1689
1684
1690 newps[i] = np
1685 newps[i] = np
1691
1686
1692 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1687 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1693 # base. If only p2 changes, merging using unchanged p1 as merge base is
1688 # base. If only p2 changes, merging using unchanged p1 as merge base is
1694 # suboptimal. Therefore swap parents to make the merge sane.
1689 # suboptimal. Therefore swap parents to make the merge sane.
1695 if newps[1] != nullrev and oldps[0] == newps[0]:
1690 if newps[1] != nullrev and oldps[0] == newps[0]:
1696 assert len(newps) == 2 and len(oldps) == 2
1691 assert len(newps) == 2 and len(oldps) == 2
1697 newps.reverse()
1692 newps.reverse()
1698 bases.reverse()
1693 bases.reverse()
1699
1694
1700 # No parent change might be an error because we fail to make rev a
1695 # No parent change might be an error because we fail to make rev a
1701 # descendent of requested dest. This can happen, for example:
1696 # descendent of requested dest. This can happen, for example:
1702 #
1697 #
1703 # C # rebase -r C -d D
1698 # C # rebase -r C -d D
1704 # /| # None of A and B will be changed to D and rebase fails.
1699 # /| # None of A and B will be changed to D and rebase fails.
1705 # A B D
1700 # A B D
1706 if set(newps) == set(oldps) and dest not in newps:
1701 if set(newps) == set(oldps) and dest not in newps:
1707 raise error.Abort(
1702 raise error.Abort(
1708 _(
1703 _(
1709 b'cannot rebase %d:%s without '
1704 b'cannot rebase %d:%s without '
1710 b'moving at least one of its parents'
1705 b'moving at least one of its parents'
1711 )
1706 )
1712 % (rev, repo[rev])
1707 % (rev, repo[rev])
1713 )
1708 )
1714
1709
1715 # Source should not be ancestor of dest. The check here guarantees it's
1710 # Source should not be ancestor of dest. The check here guarantees it's
1716 # impossible. With multi-dest, the initial check does not cover complex
1711 # impossible. With multi-dest, the initial check does not cover complex
1717 # cases since we don't have abstractions to dry-run rebase cheaply.
1712 # cases since we don't have abstractions to dry-run rebase cheaply.
1718 if any(p != nullrev and isancestor(rev, p) for p in newps):
1713 if any(p != nullrev and isancestor(rev, p) for p in newps):
1719 raise error.Abort(_(b'source is ancestor of destination'))
1714 raise error.Abort(_(b'source is ancestor of destination'))
1720
1715
1721 # Check if the merge will contain unwanted changes. That may happen if
1716 # Check if the merge will contain unwanted changes. That may happen if
1722 # there are multiple special (non-changelog ancestor) merge bases, which
1717 # there are multiple special (non-changelog ancestor) merge bases, which
1723 # cannot be handled well by the 3-way merge algorithm. For example:
1718 # cannot be handled well by the 3-way merge algorithm. For example:
1724 #
1719 #
1725 # F
1720 # F
1726 # /|
1721 # /|
1727 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1722 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1728 # | | # as merge base, the difference between D and F will include
1723 # | | # as merge base, the difference between D and F will include
1729 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1724 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1730 # |/ # chosen, the rebased F will contain B.
1725 # |/ # chosen, the rebased F will contain B.
1731 # A Z
1726 # A Z
1732 #
1727 #
1733 # But our merge base candidates (D and E in above case) could still be
1728 # But our merge base candidates (D and E in above case) could still be
1734 # better than the default (ancestor(F, Z) == null). Therefore still
1729 # better than the default (ancestor(F, Z) == null). Therefore still
1735 # pick one (so choose p1 above).
1730 # pick one (so choose p1 above).
1736 if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1:
1731 if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1:
1737 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1732 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1738 for i, base in enumerate(bases):
1733 for i, base in enumerate(bases):
1739 if base == nullrev or base in newps:
1734 if base == nullrev or base in newps:
1740 continue
1735 continue
1741 # Revisions in the side (not chosen as merge base) branch that
1736 # Revisions in the side (not chosen as merge base) branch that
1742 # might contain "surprising" contents
1737 # might contain "surprising" contents
1743 other_bases = set(bases) - {base}
1738 other_bases = set(bases) - {base}
1744 siderevs = list(
1739 siderevs = list(
1745 repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
1740 repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
1746 )
1741 )
1747
1742
1748 # If those revisions are covered by rebaseset, the result is good.
1743 # If those revisions are covered by rebaseset, the result is good.
1749 # A merge in rebaseset would be considered to cover its ancestors.
1744 # A merge in rebaseset would be considered to cover its ancestors.
1750 if siderevs:
1745 if siderevs:
1751 rebaseset = [
1746 rebaseset = [
1752 r for r, d in state.items() if d > 0 and r not in obsskipped
1747 r for r, d in state.items() if d > 0 and r not in obsskipped
1753 ]
1748 ]
1754 merges = [
1749 merges = [
1755 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1750 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1756 ]
1751 ]
1757 unwanted[i] = list(
1752 unwanted[i] = list(
1758 repo.revs(
1753 repo.revs(
1759 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1754 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1760 )
1755 )
1761 )
1756 )
1762
1757
1763 if any(revs is not None for revs in unwanted):
1758 if any(revs is not None for revs in unwanted):
1764 # Choose a merge base that has a minimal number of unwanted revs.
1759 # Choose a merge base that has a minimal number of unwanted revs.
1765 l, i = min(
1760 l, i = min(
1766 (len(revs), i)
1761 (len(revs), i)
1767 for i, revs in enumerate(unwanted)
1762 for i, revs in enumerate(unwanted)
1768 if revs is not None
1763 if revs is not None
1769 )
1764 )
1770
1765
1771 # The merge will include unwanted revisions. Abort now. Revisit this if
1766 # The merge will include unwanted revisions. Abort now. Revisit this if
1772 # we have a more advanced merge algorithm that handles multiple bases.
1767 # we have a more advanced merge algorithm that handles multiple bases.
1773 if l > 0:
1768 if l > 0:
1774 unwanteddesc = _(b' or ').join(
1769 unwanteddesc = _(b' or ').join(
1775 (
1770 (
1776 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1771 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1777 for revs in unwanted
1772 for revs in unwanted
1778 if revs is not None
1773 if revs is not None
1779 )
1774 )
1780 )
1775 )
1781 raise error.Abort(
1776 raise error.Abort(
1782 _(b'rebasing %d:%s will include unwanted changes from %s')
1777 _(b'rebasing %d:%s will include unwanted changes from %s')
1783 % (rev, repo[rev], unwanteddesc)
1778 % (rev, repo[rev], unwanteddesc)
1784 )
1779 )
1785
1780
1786 # newps[0] should match merge base if possible. Currently, if newps[i]
1781 # newps[0] should match merge base if possible. Currently, if newps[i]
1787 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1782 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1788 # the other's ancestor. In that case, it's fine to not swap newps here.
1783 # the other's ancestor. In that case, it's fine to not swap newps here.
1789 # (see CASE-1 and CASE-2 above)
1784 # (see CASE-1 and CASE-2 above)
1790 if i != 0:
1785 if i != 0:
1791 if newps[i] != nullrev:
1786 if newps[i] != nullrev:
1792 newps[0], newps[i] = newps[i], newps[0]
1787 newps[0], newps[i] = newps[i], newps[0]
1793 bases[0], bases[i] = bases[i], bases[0]
1788 bases[0], bases[i] = bases[i], bases[0]
1794
1789
1795 # "rebasenode" updates to new p1, use the corresponding merge base.
1790 # "rebasenode" updates to new p1, use the corresponding merge base.
1796 base = bases[0]
1791 base = bases[0]
1797
1792
1798 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1793 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1799
1794
1800 return newps[0], newps[1], base
1795 return newps[0], newps[1], base
1801
1796
1802
1797
1803 def isagitpatch(repo, patchname):
1798 def isagitpatch(repo, patchname):
1804 """Return true if the given patch is in git format"""
1799 """Return true if the given patch is in git format"""
1805 mqpatch = os.path.join(repo.mq.path, patchname)
1800 mqpatch = os.path.join(repo.mq.path, patchname)
1806 for line in patch.linereader(open(mqpatch, b'rb')):
1801 for line in patch.linereader(open(mqpatch, b'rb')):
1807 if line.startswith(b'diff --git'):
1802 if line.startswith(b'diff --git'):
1808 return True
1803 return True
1809 return False
1804 return False
1810
1805
1811
1806
1812 def updatemq(repo, state, skipped, **opts):
1807 def updatemq(repo, state, skipped, **opts):
1813 """Update rebased mq patches - finalize and then import them"""
1808 """Update rebased mq patches - finalize and then import them"""
1814 mqrebase = {}
1809 mqrebase = {}
1815 mq = repo.mq
1810 mq = repo.mq
1816 original_series = mq.fullseries[:]
1811 original_series = mq.fullseries[:]
1817 skippedpatches = set()
1812 skippedpatches = set()
1818
1813
1819 for p in mq.applied:
1814 for p in mq.applied:
1820 rev = repo[p.node].rev()
1815 rev = repo[p.node].rev()
1821 if rev in state:
1816 if rev in state:
1822 repo.ui.debug(
1817 repo.ui.debug(
1823 b'revision %d is an mq patch (%s), finalize it.\n'
1818 b'revision %d is an mq patch (%s), finalize it.\n'
1824 % (rev, p.name)
1819 % (rev, p.name)
1825 )
1820 )
1826 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1821 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1827 else:
1822 else:
1828 # Applied but not rebased, not sure this should happen
1823 # Applied but not rebased, not sure this should happen
1829 skippedpatches.add(p.name)
1824 skippedpatches.add(p.name)
1830
1825
1831 if mqrebase:
1826 if mqrebase:
1832 mq.finish(repo, mqrebase.keys())
1827 mq.finish(repo, mqrebase.keys())
1833
1828
1834 # We must start import from the newest revision
1829 # We must start import from the newest revision
1835 for rev in sorted(mqrebase, reverse=True):
1830 for rev in sorted(mqrebase, reverse=True):
1836 if rev not in skipped:
1831 if rev not in skipped:
1837 name, isgit = mqrebase[rev]
1832 name, isgit = mqrebase[rev]
1838 repo.ui.note(
1833 repo.ui.note(
1839 _(b'updating mq patch %s to %d:%s\n')
1834 _(b'updating mq patch %s to %d:%s\n')
1840 % (name, state[rev], repo[state[rev]])
1835 % (name, state[rev], repo[state[rev]])
1841 )
1836 )
1842 mq.qimport(
1837 mq.qimport(
1843 repo,
1838 repo,
1844 (),
1839 (),
1845 patchname=name,
1840 patchname=name,
1846 git=isgit,
1841 git=isgit,
1847 rev=[b"%d" % state[rev]],
1842 rev=[b"%d" % state[rev]],
1848 )
1843 )
1849 else:
1844 else:
1850 # Rebased and skipped
1845 # Rebased and skipped
1851 skippedpatches.add(mqrebase[rev][0])
1846 skippedpatches.add(mqrebase[rev][0])
1852
1847
1853 # Patches were either applied and rebased and imported in
1848 # Patches were either applied and rebased and imported in
1854 # order, applied and removed or unapplied. Discard the removed
1849 # order, applied and removed or unapplied. Discard the removed
1855 # ones while preserving the original series order and guards.
1850 # ones while preserving the original series order and guards.
1856 newseries = [
1851 newseries = [
1857 s
1852 s
1858 for s in original_series
1853 for s in original_series
1859 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1854 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1860 ]
1855 ]
1861 mq.fullseries[:] = newseries
1856 mq.fullseries[:] = newseries
1862 mq.seriesdirty = True
1857 mq.seriesdirty = True
1863 mq.savedirty()
1858 mq.savedirty()
1864
1859
1865
1860
1866 def storecollapsemsg(repo, collapsemsg):
1861 def storecollapsemsg(repo, collapsemsg):
1867 """Store the collapse message to allow recovery"""
1862 """Store the collapse message to allow recovery"""
1868 collapsemsg = collapsemsg or b''
1863 collapsemsg = collapsemsg or b''
1869 f = repo.vfs(b"last-message.txt", b"w")
1864 f = repo.vfs(b"last-message.txt", b"w")
1870 f.write(b"%s\n" % collapsemsg)
1865 f.write(b"%s\n" % collapsemsg)
1871 f.close()
1866 f.close()
1872
1867
1873
1868
1874 def clearcollapsemsg(repo):
1869 def clearcollapsemsg(repo):
1875 """Remove collapse message file"""
1870 """Remove collapse message file"""
1876 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1871 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1877
1872
1878
1873
1879 def restorecollapsemsg(repo, isabort):
1874 def restorecollapsemsg(repo, isabort):
1880 """Restore previously stored collapse message"""
1875 """Restore previously stored collapse message"""
1881 try:
1876 try:
1882 f = repo.vfs(b"last-message.txt")
1877 f = repo.vfs(b"last-message.txt")
1883 collapsemsg = f.readline().strip()
1878 collapsemsg = f.readline().strip()
1884 f.close()
1879 f.close()
1885 except IOError as err:
1880 except IOError as err:
1886 if err.errno != errno.ENOENT:
1881 if err.errno != errno.ENOENT:
1887 raise
1882 raise
1888 if isabort:
1883 if isabort:
1889 # Oh well, just abort like normal
1884 # Oh well, just abort like normal
1890 collapsemsg = b''
1885 collapsemsg = b''
1891 else:
1886 else:
1892 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1887 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1893 return collapsemsg
1888 return collapsemsg
1894
1889
1895
1890
1896 def clearstatus(repo):
1891 def clearstatus(repo):
1897 """Remove the status files"""
1892 """Remove the status files"""
1898 # Make sure the active transaction won't write the state file
1893 # Make sure the active transaction won't write the state file
1899 tr = repo.currenttransaction()
1894 tr = repo.currenttransaction()
1900 if tr:
1895 if tr:
1901 tr.removefilegenerator(b'rebasestate')
1896 tr.removefilegenerator(b'rebasestate')
1902 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1897 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1903
1898
1904
1899
1905 def sortsource(destmap):
1900 def sortsource(destmap):
1906 """yield source revisions in an order that we only rebase things once
1901 """yield source revisions in an order that we only rebase things once
1907
1902
1908 If source and destination overlaps, we should filter out revisions
1903 If source and destination overlaps, we should filter out revisions
1909 depending on other revisions which hasn't been rebased yet.
1904 depending on other revisions which hasn't been rebased yet.
1910
1905
1911 Yield a sorted list of revisions each time.
1906 Yield a sorted list of revisions each time.
1912
1907
1913 For example, when rebasing A to B, B to C. This function yields [B], then
1908 For example, when rebasing A to B, B to C. This function yields [B], then
1914 [A], indicating B needs to be rebased first.
1909 [A], indicating B needs to be rebased first.
1915
1910
1916 Raise if there is a cycle so the rebase is impossible.
1911 Raise if there is a cycle so the rebase is impossible.
1917 """
1912 """
1918 srcset = set(destmap)
1913 srcset = set(destmap)
1919 while srcset:
1914 while srcset:
1920 srclist = sorted(srcset)
1915 srclist = sorted(srcset)
1921 result = []
1916 result = []
1922 for r in srclist:
1917 for r in srclist:
1923 if destmap[r] not in srcset:
1918 if destmap[r] not in srcset:
1924 result.append(r)
1919 result.append(r)
1925 if not result:
1920 if not result:
1926 raise error.Abort(_(b'source and destination form a cycle'))
1921 raise error.Abort(_(b'source and destination form a cycle'))
1927 srcset -= set(result)
1922 srcset -= set(result)
1928 yield result
1923 yield result
1929
1924
1930
1925
1931 def buildstate(repo, destmap, collapse):
1926 def buildstate(repo, destmap, collapse):
1932 '''Define which revisions are going to be rebased and where
1927 '''Define which revisions are going to be rebased and where
1933
1928
1934 repo: repo
1929 repo: repo
1935 destmap: {srcrev: destrev}
1930 destmap: {srcrev: destrev}
1936 '''
1931 '''
1937 rebaseset = destmap.keys()
1932 rebaseset = destmap.keys()
1938 originalwd = repo[b'.'].rev()
1933 originalwd = repo[b'.'].rev()
1939
1934
1940 # This check isn't strictly necessary, since mq detects commits over an
1935 # This check isn't strictly necessary, since mq detects commits over an
1941 # applied patch. But it prevents messing up the working directory when
1936 # applied patch. But it prevents messing up the working directory when
1942 # a partially completed rebase is blocked by mq.
1937 # a partially completed rebase is blocked by mq.
1943 if b'qtip' in repo.tags():
1938 if b'qtip' in repo.tags():
1944 mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
1939 mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
1945 if set(destmap.values()) & mqapplied:
1940 if set(destmap.values()) & mqapplied:
1946 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1941 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1947
1942
1948 # Get "cycle" error early by exhausting the generator.
1943 # Get "cycle" error early by exhausting the generator.
1949 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1944 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1950 if not sortedsrc:
1945 if not sortedsrc:
1951 raise error.Abort(_(b'no matching revisions'))
1946 raise error.Abort(_(b'no matching revisions'))
1952
1947
1953 # Only check the first batch of revisions to rebase not depending on other
1948 # Only check the first batch of revisions to rebase not depending on other
1954 # rebaseset. This means "source is ancestor of destination" for the second
1949 # rebaseset. This means "source is ancestor of destination" for the second
1955 # (and following) batches of revisions are not checked here. We rely on
1950 # (and following) batches of revisions are not checked here. We rely on
1956 # "defineparents" to do that check.
1951 # "defineparents" to do that check.
1957 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1952 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1958 if not roots:
1953 if not roots:
1959 raise error.Abort(_(b'no matching revisions'))
1954 raise error.Abort(_(b'no matching revisions'))
1960
1955
1961 def revof(r):
1956 def revof(r):
1962 return r.rev()
1957 return r.rev()
1963
1958
1964 roots = sorted(roots, key=revof)
1959 roots = sorted(roots, key=revof)
1965 state = dict.fromkeys(rebaseset, revtodo)
1960 state = dict.fromkeys(rebaseset, revtodo)
1966 emptyrebase = len(sortedsrc) == 1
1961 emptyrebase = len(sortedsrc) == 1
1967 for root in roots:
1962 for root in roots:
1968 dest = repo[destmap[root.rev()]]
1963 dest = repo[destmap[root.rev()]]
1969 commonbase = root.ancestor(dest)
1964 commonbase = root.ancestor(dest)
1970 if commonbase == root:
1965 if commonbase == root:
1971 raise error.Abort(_(b'source is ancestor of destination'))
1966 raise error.Abort(_(b'source is ancestor of destination'))
1972 if commonbase == dest:
1967 if commonbase == dest:
1973 wctx = repo[None]
1968 wctx = repo[None]
1974 if dest == wctx.p1():
1969 if dest == wctx.p1():
1975 # when rebasing to '.', it will use the current wd branch name
1970 # when rebasing to '.', it will use the current wd branch name
1976 samebranch = root.branch() == wctx.branch()
1971 samebranch = root.branch() == wctx.branch()
1977 else:
1972 else:
1978 samebranch = root.branch() == dest.branch()
1973 samebranch = root.branch() == dest.branch()
1979 if not collapse and samebranch and dest in root.parents():
1974 if not collapse and samebranch and dest in root.parents():
1980 # mark the revision as done by setting its new revision
1975 # mark the revision as done by setting its new revision
1981 # equal to its old (current) revisions
1976 # equal to its old (current) revisions
1982 state[root.rev()] = root.rev()
1977 state[root.rev()] = root.rev()
1983 repo.ui.debug(b'source is a child of destination\n')
1978 repo.ui.debug(b'source is a child of destination\n')
1984 continue
1979 continue
1985
1980
1986 emptyrebase = False
1981 emptyrebase = False
1987 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
1982 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
1988 if emptyrebase:
1983 if emptyrebase:
1989 return None
1984 return None
1990 for rev in sorted(state):
1985 for rev in sorted(state):
1991 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1986 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1992 # if all parents of this revision are done, then so is this revision
1987 # if all parents of this revision are done, then so is this revision
1993 if parents and all((state.get(p) == p for p in parents)):
1988 if parents and all((state.get(p) == p for p in parents)):
1994 state[rev] = rev
1989 state[rev] = rev
1995 return originalwd, destmap, state
1990 return originalwd, destmap, state
1996
1991
1997
1992
1998 def clearrebased(
1993 def clearrebased(
1999 ui,
1994 ui,
2000 repo,
1995 repo,
2001 destmap,
1996 destmap,
2002 state,
1997 state,
2003 skipped,
1998 skipped,
2004 collapsedas=None,
1999 collapsedas=None,
2005 keepf=False,
2000 keepf=False,
2006 fm=None,
2001 fm=None,
2007 backup=True,
2002 backup=True,
2008 ):
2003 ):
2009 """dispose of rebased revision at the end of the rebase
2004 """dispose of rebased revision at the end of the rebase
2010
2005
2011 If `collapsedas` is not None, the rebase was a collapse whose result if the
2006 If `collapsedas` is not None, the rebase was a collapse whose result if the
2012 `collapsedas` node.
2007 `collapsedas` node.
2013
2008
2014 If `keepf` is not True, the rebase has --keep set and no nodes should be
2009 If `keepf` is not True, the rebase has --keep set and no nodes should be
2015 removed (but bookmarks still need to be moved).
2010 removed (but bookmarks still need to be moved).
2016
2011
2017 If `backup` is False, no backup will be stored when stripping rebased
2012 If `backup` is False, no backup will be stored when stripping rebased
2018 revisions.
2013 revisions.
2019 """
2014 """
2020 tonode = repo.changelog.node
2015 tonode = repo.changelog.node
2021 replacements = {}
2016 replacements = {}
2022 moves = {}
2017 moves = {}
2023 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2018 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2024
2019
2025 collapsednodes = []
2020 collapsednodes = []
2026 for rev, newrev in sorted(state.items()):
2021 for rev, newrev in sorted(state.items()):
2027 if newrev >= 0 and newrev != rev:
2022 if newrev >= 0 and newrev != rev:
2028 oldnode = tonode(rev)
2023 oldnode = tonode(rev)
2029 newnode = collapsedas or tonode(newrev)
2024 newnode = collapsedas or tonode(newrev)
2030 moves[oldnode] = newnode
2025 moves[oldnode] = newnode
2031 succs = None
2026 succs = None
2032 if rev in skipped:
2027 if rev in skipped:
2033 if stripcleanup or not repo[rev].obsolete():
2028 if stripcleanup or not repo[rev].obsolete():
2034 succs = ()
2029 succs = ()
2035 elif collapsedas:
2030 elif collapsedas:
2036 collapsednodes.append(oldnode)
2031 collapsednodes.append(oldnode)
2037 else:
2032 else:
2038 succs = (newnode,)
2033 succs = (newnode,)
2039 if succs is not None:
2034 if succs is not None:
2040 replacements[(oldnode,)] = succs
2035 replacements[(oldnode,)] = succs
2041 if collapsednodes:
2036 if collapsednodes:
2042 replacements[tuple(collapsednodes)] = (collapsedas,)
2037 replacements[tuple(collapsednodes)] = (collapsedas,)
2043 if fm:
2038 if fm:
2044 hf = fm.hexfunc
2039 hf = fm.hexfunc
2045 fl = fm.formatlist
2040 fl = fm.formatlist
2046 fd = fm.formatdict
2041 fd = fm.formatdict
2047 changes = {}
2042 changes = {}
2048 for oldns, newn in pycompat.iteritems(replacements):
2043 for oldns, newn in pycompat.iteritems(replacements):
2049 for oldn in oldns:
2044 for oldn in oldns:
2050 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2045 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2051 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2046 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2052 fm.data(nodechanges=nodechanges)
2047 fm.data(nodechanges=nodechanges)
2053 if keepf:
2048 if keepf:
2054 replacements = {}
2049 replacements = {}
2055 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2050 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2056
2051
2057
2052
2058 def pullrebase(orig, ui, repo, *args, **opts):
2053 def pullrebase(orig, ui, repo, *args, **opts):
2059 """Call rebase after pull if the latter has been invoked with --rebase"""
2054 """Call rebase after pull if the latter has been invoked with --rebase"""
2060 if opts.get('rebase'):
2055 if opts.get('rebase'):
2061 if ui.configbool(b'commands', b'rebase.requiredest'):
2056 if ui.configbool(b'commands', b'rebase.requiredest'):
2062 msg = _(b'rebase destination required by configuration')
2057 msg = _(b'rebase destination required by configuration')
2063 hint = _(b'use hg pull followed by hg rebase -d DEST')
2058 hint = _(b'use hg pull followed by hg rebase -d DEST')
2064 raise error.Abort(msg, hint=hint)
2059 raise error.Abort(msg, hint=hint)
2065
2060
2066 with repo.wlock(), repo.lock():
2061 with repo.wlock(), repo.lock():
2067 if opts.get('update'):
2062 if opts.get('update'):
2068 del opts['update']
2063 del opts['update']
2069 ui.debug(
2064 ui.debug(
2070 b'--update and --rebase are not compatible, ignoring '
2065 b'--update and --rebase are not compatible, ignoring '
2071 b'the update flag\n'
2066 b'the update flag\n'
2072 )
2067 )
2073
2068
2074 cmdutil.checkunfinished(repo, skipmerge=True)
2069 cmdutil.checkunfinished(repo, skipmerge=True)
2075 cmdutil.bailifchanged(
2070 cmdutil.bailifchanged(
2076 repo,
2071 repo,
2077 hint=_(
2072 hint=_(
2078 b'cannot pull with rebase: '
2073 b'cannot pull with rebase: '
2079 b'please commit or shelve your changes first'
2074 b'please commit or shelve your changes first'
2080 ),
2075 ),
2081 )
2076 )
2082
2077
2083 revsprepull = len(repo)
2078 revsprepull = len(repo)
2084 origpostincoming = commands.postincoming
2079 origpostincoming = commands.postincoming
2085
2080
2086 def _dummy(*args, **kwargs):
2081 def _dummy(*args, **kwargs):
2087 pass
2082 pass
2088
2083
2089 commands.postincoming = _dummy
2084 commands.postincoming = _dummy
2090 try:
2085 try:
2091 ret = orig(ui, repo, *args, **opts)
2086 ret = orig(ui, repo, *args, **opts)
2092 finally:
2087 finally:
2093 commands.postincoming = origpostincoming
2088 commands.postincoming = origpostincoming
2094 revspostpull = len(repo)
2089 revspostpull = len(repo)
2095 if revspostpull > revsprepull:
2090 if revspostpull > revsprepull:
2096 # --rev option from pull conflict with rebase own --rev
2091 # --rev option from pull conflict with rebase own --rev
2097 # dropping it
2092 # dropping it
2098 if 'rev' in opts:
2093 if 'rev' in opts:
2099 del opts['rev']
2094 del opts['rev']
2100 # positional argument from pull conflicts with rebase's own
2095 # positional argument from pull conflicts with rebase's own
2101 # --source.
2096 # --source.
2102 if 'source' in opts:
2097 if 'source' in opts:
2103 del opts['source']
2098 del opts['source']
2104 # revsprepull is the len of the repo, not revnum of tip.
2099 # revsprepull is the len of the repo, not revnum of tip.
2105 destspace = list(repo.changelog.revs(start=revsprepull))
2100 destspace = list(repo.changelog.revs(start=revsprepull))
2106 opts['_destspace'] = destspace
2101 opts['_destspace'] = destspace
2107 try:
2102 try:
2108 rebase(ui, repo, **opts)
2103 rebase(ui, repo, **opts)
2109 except error.NoMergeDestAbort:
2104 except error.NoMergeDestAbort:
2110 # we can maybe update instead
2105 # we can maybe update instead
2111 rev, _a, _b = destutil.destupdate(repo)
2106 rev, _a, _b = destutil.destupdate(repo)
2112 if rev == repo[b'.'].rev():
2107 if rev == repo[b'.'].rev():
2113 ui.status(_(b'nothing to rebase\n'))
2108 ui.status(_(b'nothing to rebase\n'))
2114 else:
2109 else:
2115 ui.status(_(b'nothing to rebase - updating instead\n'))
2110 ui.status(_(b'nothing to rebase - updating instead\n'))
2116 # not passing argument to get the bare update behavior
2111 # not passing argument to get the bare update behavior
2117 # with warning and trumpets
2112 # with warning and trumpets
2118 commands.update(ui, repo)
2113 commands.update(ui, repo)
2119 else:
2114 else:
2120 if opts.get('tool'):
2115 if opts.get('tool'):
2121 raise error.Abort(_(b'--tool can only be used with --rebase'))
2116 raise error.Abort(_(b'--tool can only be used with --rebase'))
2122 ret = orig(ui, repo, *args, **opts)
2117 ret = orig(ui, repo, *args, **opts)
2123
2118
2124 return ret
2119 return ret
2125
2120
2126
2121
2127 def _filterobsoleterevs(repo, revs):
2122 def _filterobsoleterevs(repo, revs):
2128 """returns a set of the obsolete revisions in revs"""
2123 """returns a set of the obsolete revisions in revs"""
2129 return set(r for r in revs if repo[r].obsolete())
2124 return set(r for r in revs if repo[r].obsolete())
2130
2125
2131
2126
2132 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2127 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2133 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2128 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2134
2129
2135 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2130 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2136 obsolete nodes to be rebased given in `rebaseobsrevs`.
2131 obsolete nodes to be rebased given in `rebaseobsrevs`.
2137
2132
2138 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2133 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2139 without a successor in destination.
2134 without a successor in destination.
2140
2135
2141 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2136 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2142 obsolete successors.
2137 obsolete successors.
2143 """
2138 """
2144 obsoletenotrebased = {}
2139 obsoletenotrebased = {}
2145 obsoletewithoutsuccessorindestination = set()
2140 obsoletewithoutsuccessorindestination = set()
2146 obsoleteextinctsuccessors = set()
2141 obsoleteextinctsuccessors = set()
2147
2142
2148 assert repo.filtername is None
2143 assert repo.filtername is None
2149 cl = repo.changelog
2144 cl = repo.changelog
2150 get_rev = cl.index.get_rev
2145 get_rev = cl.index.get_rev
2151 extinctrevs = set(repo.revs(b'extinct()'))
2146 extinctrevs = set(repo.revs(b'extinct()'))
2152 for srcrev in rebaseobsrevs:
2147 for srcrev in rebaseobsrevs:
2153 srcnode = cl.node(srcrev)
2148 srcnode = cl.node(srcrev)
2154 # XXX: more advanced APIs are required to handle split correctly
2149 # XXX: more advanced APIs are required to handle split correctly
2155 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2150 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2156 # obsutil.allsuccessors includes node itself
2151 # obsutil.allsuccessors includes node itself
2157 successors.remove(srcnode)
2152 successors.remove(srcnode)
2158 succrevs = {get_rev(s) for s in successors}
2153 succrevs = {get_rev(s) for s in successors}
2159 succrevs.discard(None)
2154 succrevs.discard(None)
2160 if succrevs.issubset(extinctrevs):
2155 if succrevs.issubset(extinctrevs):
2161 # all successors are extinct
2156 # all successors are extinct
2162 obsoleteextinctsuccessors.add(srcrev)
2157 obsoleteextinctsuccessors.add(srcrev)
2163 if not successors:
2158 if not successors:
2164 # no successor
2159 # no successor
2165 obsoletenotrebased[srcrev] = None
2160 obsoletenotrebased[srcrev] = None
2166 else:
2161 else:
2167 dstrev = destmap[srcrev]
2162 dstrev = destmap[srcrev]
2168 for succrev in succrevs:
2163 for succrev in succrevs:
2169 if cl.isancestorrev(succrev, dstrev):
2164 if cl.isancestorrev(succrev, dstrev):
2170 obsoletenotrebased[srcrev] = succrev
2165 obsoletenotrebased[srcrev] = succrev
2171 break
2166 break
2172 else:
2167 else:
2173 # If 'srcrev' has a successor in rebase set but none in
2168 # If 'srcrev' has a successor in rebase set but none in
2174 # destination (which would be catched above), we shall skip it
2169 # destination (which would be catched above), we shall skip it
2175 # and its descendants to avoid divergence.
2170 # and its descendants to avoid divergence.
2176 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2171 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2177 obsoletewithoutsuccessorindestination.add(srcrev)
2172 obsoletewithoutsuccessorindestination.add(srcrev)
2178
2173
2179 return (
2174 return (
2180 obsoletenotrebased,
2175 obsoletenotrebased,
2181 obsoletewithoutsuccessorindestination,
2176 obsoletewithoutsuccessorindestination,
2182 obsoleteextinctsuccessors,
2177 obsoleteextinctsuccessors,
2183 )
2178 )
2184
2179
2185
2180
2186 def abortrebase(ui, repo):
2181 def abortrebase(ui, repo):
2187 with repo.wlock(), repo.lock():
2182 with repo.wlock(), repo.lock():
2188 rbsrt = rebaseruntime(repo, ui)
2183 rbsrt = rebaseruntime(repo, ui)
2189 rbsrt._prepareabortorcontinue(isabort=True)
2184 rbsrt._prepareabortorcontinue(isabort=True)
2190
2185
2191
2186
2192 def continuerebase(ui, repo):
2187 def continuerebase(ui, repo):
2193 with repo.wlock(), repo.lock():
2188 with repo.wlock(), repo.lock():
2194 rbsrt = rebaseruntime(repo, ui)
2189 rbsrt = rebaseruntime(repo, ui)
2195 ms = mergemod.mergestate.read(repo)
2190 ms = mergemod.mergestate.read(repo)
2196 mergeutil.checkunresolved(ms)
2191 mergeutil.checkunresolved(ms)
2197 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2192 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2198 if retcode is not None:
2193 if retcode is not None:
2199 return retcode
2194 return retcode
2200 rbsrt._performrebase(None)
2195 rbsrt._performrebase(None)
2201 rbsrt._finishrebase()
2196 rbsrt._finishrebase()
2202
2197
2203
2198
2204 def summaryhook(ui, repo):
2199 def summaryhook(ui, repo):
2205 if not repo.vfs.exists(b'rebasestate'):
2200 if not repo.vfs.exists(b'rebasestate'):
2206 return
2201 return
2207 try:
2202 try:
2208 rbsrt = rebaseruntime(repo, ui, {})
2203 rbsrt = rebaseruntime(repo, ui, {})
2209 rbsrt.restorestatus()
2204 rbsrt.restorestatus()
2210 state = rbsrt.state
2205 state = rbsrt.state
2211 except error.RepoLookupError:
2206 except error.RepoLookupError:
2212 # i18n: column positioning for "hg summary"
2207 # i18n: column positioning for "hg summary"
2213 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2208 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2214 ui.write(msg)
2209 ui.write(msg)
2215 return
2210 return
2216 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2211 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2217 # i18n: column positioning for "hg summary"
2212 # i18n: column positioning for "hg summary"
2218 ui.write(
2213 ui.write(
2219 _(b'rebase: %s, %s (rebase --continue)\n')
2214 _(b'rebase: %s, %s (rebase --continue)\n')
2220 % (
2215 % (
2221 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2216 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2222 ui.label(_(b'%d remaining'), b'rebase.remaining')
2217 ui.label(_(b'%d remaining'), b'rebase.remaining')
2223 % (len(state) - numrebased),
2218 % (len(state) - numrebased),
2224 )
2219 )
2225 )
2220 )
2226
2221
2227
2222
2228 def uisetup(ui):
2223 def uisetup(ui):
2229 # Replace pull with a decorator to provide --rebase option
2224 # Replace pull with a decorator to provide --rebase option
2230 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2225 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2231 entry[1].append(
2226 entry[1].append(
2232 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2227 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2233 )
2228 )
2234 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2229 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2235 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2230 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2236 statemod.addunfinished(
2231 statemod.addunfinished(
2237 b'rebase',
2232 b'rebase',
2238 fname=b'rebasestate',
2233 fname=b'rebasestate',
2239 stopflag=True,
2234 stopflag=True,
2240 continueflag=True,
2235 continueflag=True,
2241 abortfunc=abortrebase,
2236 abortfunc=abortrebase,
2242 continuefunc=continuerebase,
2237 continuefunc=continuerebase,
2243 )
2238 )
@@ -1,1182 +1,1174 b''
1 # shelve.py - save/restore working directory state
1 # shelve.py - save/restore working directory state
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """save and restore changes to the working directory
8 """save and restore changes to the working directory
9
9
10 The "hg shelve" command saves changes made to the working directory
10 The "hg shelve" command saves changes made to the working directory
11 and reverts those changes, resetting the working directory to a clean
11 and reverts those changes, resetting the working directory to a clean
12 state.
12 state.
13
13
14 Later on, the "hg unshelve" command restores the changes saved by "hg
14 Later on, the "hg unshelve" command restores the changes saved by "hg
15 shelve". Changes can be restored even after updating to a different
15 shelve". Changes can be restored even after updating to a different
16 parent, in which case Mercurial's merge machinery will resolve any
16 parent, in which case Mercurial's merge machinery will resolve any
17 conflicts if necessary.
17 conflicts if necessary.
18
18
19 You can have more than one shelved change outstanding at a time; each
19 You can have more than one shelved change outstanding at a time; each
20 shelved change has a distinct name. For details, see the help for "hg
20 shelved change has a distinct name. For details, see the help for "hg
21 shelve".
21 shelve".
22 """
22 """
23 from __future__ import absolute_import
23 from __future__ import absolute_import
24
24
25 import collections
25 import collections
26 import errno
26 import errno
27 import itertools
27 import itertools
28 import stat
28 import stat
29
29
30 from .i18n import _
30 from .i18n import _
31 from .pycompat import open
31 from .pycompat import open
32 from . import (
32 from . import (
33 bookmarks,
33 bookmarks,
34 bundle2,
34 bundle2,
35 bundlerepo,
35 bundlerepo,
36 changegroup,
36 changegroup,
37 cmdutil,
37 cmdutil,
38 discovery,
38 discovery,
39 error,
39 error,
40 exchange,
40 exchange,
41 hg,
41 hg,
42 lock as lockmod,
42 lock as lockmod,
43 mdiff,
43 mdiff,
44 merge,
44 merge,
45 node as nodemod,
45 node as nodemod,
46 patch,
46 patch,
47 phases,
47 phases,
48 pycompat,
48 pycompat,
49 repair,
49 repair,
50 scmutil,
50 scmutil,
51 templatefilters,
51 templatefilters,
52 util,
52 util,
53 vfs as vfsmod,
53 vfs as vfsmod,
54 )
54 )
55 from .utils import (
55 from .utils import (
56 dateutil,
56 dateutil,
57 stringutil,
57 stringutil,
58 )
58 )
59
59
60 backupdir = b'shelve-backup'
60 backupdir = b'shelve-backup'
61 shelvedir = b'shelved'
61 shelvedir = b'shelved'
62 shelvefileextensions = [b'hg', b'patch', b'shelve']
62 shelvefileextensions = [b'hg', b'patch', b'shelve']
63 # universal extension is present in all types of shelves
63 # universal extension is present in all types of shelves
64 patchextension = b'patch'
64 patchextension = b'patch'
65
65
66 # we never need the user, so we use a
66 # we never need the user, so we use a
67 # generic user for all shelve operations
67 # generic user for all shelve operations
68 shelveuser = b'shelve@localhost'
68 shelveuser = b'shelve@localhost'
69
69
70
70
71 class shelvedfile(object):
71 class shelvedfile(object):
72 """Helper for the file storing a single shelve
72 """Helper for the file storing a single shelve
73
73
74 Handles common functions on shelve files (.hg/.patch) using
74 Handles common functions on shelve files (.hg/.patch) using
75 the vfs layer"""
75 the vfs layer"""
76
76
77 def __init__(self, repo, name, filetype=None):
77 def __init__(self, repo, name, filetype=None):
78 self.repo = repo
78 self.repo = repo
79 self.name = name
79 self.name = name
80 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
80 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
81 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
81 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
82 self.ui = self.repo.ui
82 self.ui = self.repo.ui
83 if filetype:
83 if filetype:
84 self.fname = name + b'.' + filetype
84 self.fname = name + b'.' + filetype
85 else:
85 else:
86 self.fname = name
86 self.fname = name
87
87
88 def exists(self):
88 def exists(self):
89 return self.vfs.exists(self.fname)
89 return self.vfs.exists(self.fname)
90
90
91 def filename(self):
91 def filename(self):
92 return self.vfs.join(self.fname)
92 return self.vfs.join(self.fname)
93
93
94 def backupfilename(self):
94 def backupfilename(self):
95 def gennames(base):
95 def gennames(base):
96 yield base
96 yield base
97 base, ext = base.rsplit(b'.', 1)
97 base, ext = base.rsplit(b'.', 1)
98 for i in itertools.count(1):
98 for i in itertools.count(1):
99 yield b'%s-%d.%s' % (base, i, ext)
99 yield b'%s-%d.%s' % (base, i, ext)
100
100
101 name = self.backupvfs.join(self.fname)
101 name = self.backupvfs.join(self.fname)
102 for n in gennames(name):
102 for n in gennames(name):
103 if not self.backupvfs.exists(n):
103 if not self.backupvfs.exists(n):
104 return n
104 return n
105
105
106 def movetobackup(self):
106 def movetobackup(self):
107 if not self.backupvfs.isdir():
107 if not self.backupvfs.isdir():
108 self.backupvfs.makedir()
108 self.backupvfs.makedir()
109 util.rename(self.filename(), self.backupfilename())
109 util.rename(self.filename(), self.backupfilename())
110
110
111 def stat(self):
111 def stat(self):
112 return self.vfs.stat(self.fname)
112 return self.vfs.stat(self.fname)
113
113
114 def opener(self, mode=b'rb'):
114 def opener(self, mode=b'rb'):
115 try:
115 try:
116 return self.vfs(self.fname, mode)
116 return self.vfs(self.fname, mode)
117 except IOError as err:
117 except IOError as err:
118 if err.errno != errno.ENOENT:
118 if err.errno != errno.ENOENT:
119 raise
119 raise
120 raise error.Abort(_(b"shelved change '%s' not found") % self.name)
120 raise error.Abort(_(b"shelved change '%s' not found") % self.name)
121
121
122 def applybundle(self, tr):
122 def applybundle(self, tr):
123 fp = self.opener()
123 fp = self.opener()
124 try:
124 try:
125 targetphase = phases.internal
125 targetphase = phases.internal
126 if not phases.supportinternal(self.repo):
126 if not phases.supportinternal(self.repo):
127 targetphase = phases.secret
127 targetphase = phases.secret
128 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
128 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
129 pretip = self.repo[b'tip']
129 pretip = self.repo[b'tip']
130 bundle2.applybundle(
130 bundle2.applybundle(
131 self.repo,
131 self.repo,
132 gen,
132 gen,
133 tr,
133 tr,
134 source=b'unshelve',
134 source=b'unshelve',
135 url=b'bundle:' + self.vfs.join(self.fname),
135 url=b'bundle:' + self.vfs.join(self.fname),
136 targetphase=targetphase,
136 targetphase=targetphase,
137 )
137 )
138 shelvectx = self.repo[b'tip']
138 shelvectx = self.repo[b'tip']
139 if pretip == shelvectx:
139 if pretip == shelvectx:
140 shelverev = tr.changes[b'revduplicates'][-1]
140 shelverev = tr.changes[b'revduplicates'][-1]
141 shelvectx = self.repo[shelverev]
141 shelvectx = self.repo[shelverev]
142 return shelvectx
142 return shelvectx
143 finally:
143 finally:
144 fp.close()
144 fp.close()
145
145
146 def bundlerepo(self):
146 def bundlerepo(self):
147 path = self.vfs.join(self.fname)
147 path = self.vfs.join(self.fname)
148 return bundlerepo.instance(
148 return bundlerepo.instance(
149 self.repo.baseui, b'bundle://%s+%s' % (self.repo.root, path), False
149 self.repo.baseui, b'bundle://%s+%s' % (self.repo.root, path), False
150 )
150 )
151
151
152 def writebundle(self, bases, node):
152 def writebundle(self, bases, node):
153 cgversion = changegroup.safeversion(self.repo)
153 cgversion = changegroup.safeversion(self.repo)
154 if cgversion == b'01':
154 if cgversion == b'01':
155 btype = b'HG10BZ'
155 btype = b'HG10BZ'
156 compression = None
156 compression = None
157 else:
157 else:
158 btype = b'HG20'
158 btype = b'HG20'
159 compression = b'BZ'
159 compression = b'BZ'
160
160
161 repo = self.repo.unfiltered()
161 repo = self.repo.unfiltered()
162
162
163 outgoing = discovery.outgoing(
163 outgoing = discovery.outgoing(
164 repo, missingroots=bases, missingheads=[node]
164 repo, missingroots=bases, missingheads=[node]
165 )
165 )
166 cg = changegroup.makechangegroup(repo, outgoing, cgversion, b'shelve')
166 cg = changegroup.makechangegroup(repo, outgoing, cgversion, b'shelve')
167
167
168 bundle2.writebundle(
168 bundle2.writebundle(
169 self.ui, cg, self.fname, btype, self.vfs, compression=compression
169 self.ui, cg, self.fname, btype, self.vfs, compression=compression
170 )
170 )
171
171
172 def writeinfo(self, info):
172 def writeinfo(self, info):
173 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
173 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
174
174
175 def readinfo(self):
175 def readinfo(self):
176 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
176 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
177
177
178
178
179 class shelvedstate(object):
179 class shelvedstate(object):
180 """Handle persistence during unshelving operations.
180 """Handle persistence during unshelving operations.
181
181
182 Handles saving and restoring a shelved state. Ensures that different
182 Handles saving and restoring a shelved state. Ensures that different
183 versions of a shelved state are possible and handles them appropriately.
183 versions of a shelved state are possible and handles them appropriately.
184 """
184 """
185
185
186 _version = 2
186 _version = 2
187 _filename = b'shelvedstate'
187 _filename = b'shelvedstate'
188 _keep = b'keep'
188 _keep = b'keep'
189 _nokeep = b'nokeep'
189 _nokeep = b'nokeep'
190 # colon is essential to differentiate from a real bookmark name
190 # colon is essential to differentiate from a real bookmark name
191 _noactivebook = b':no-active-bookmark'
191 _noactivebook = b':no-active-bookmark'
192 _interactive = b'interactive'
192 _interactive = b'interactive'
193
193
194 @classmethod
194 @classmethod
195 def _verifyandtransform(cls, d):
195 def _verifyandtransform(cls, d):
196 """Some basic shelvestate syntactic verification and transformation"""
196 """Some basic shelvestate syntactic verification and transformation"""
197 try:
197 try:
198 d[b'originalwctx'] = nodemod.bin(d[b'originalwctx'])
198 d[b'originalwctx'] = nodemod.bin(d[b'originalwctx'])
199 d[b'pendingctx'] = nodemod.bin(d[b'pendingctx'])
199 d[b'pendingctx'] = nodemod.bin(d[b'pendingctx'])
200 d[b'parents'] = [nodemod.bin(h) for h in d[b'parents'].split(b' ')]
200 d[b'parents'] = [nodemod.bin(h) for h in d[b'parents'].split(b' ')]
201 d[b'nodestoremove'] = [
201 d[b'nodestoremove'] = [
202 nodemod.bin(h) for h in d[b'nodestoremove'].split(b' ')
202 nodemod.bin(h) for h in d[b'nodestoremove'].split(b' ')
203 ]
203 ]
204 except (ValueError, TypeError, KeyError) as err:
204 except (ValueError, TypeError, KeyError) as err:
205 raise error.CorruptedState(pycompat.bytestr(err))
205 raise error.CorruptedState(pycompat.bytestr(err))
206
206
207 @classmethod
207 @classmethod
208 def _getversion(cls, repo):
208 def _getversion(cls, repo):
209 """Read version information from shelvestate file"""
209 """Read version information from shelvestate file"""
210 fp = repo.vfs(cls._filename)
210 fp = repo.vfs(cls._filename)
211 try:
211 try:
212 version = int(fp.readline().strip())
212 version = int(fp.readline().strip())
213 except ValueError as err:
213 except ValueError as err:
214 raise error.CorruptedState(pycompat.bytestr(err))
214 raise error.CorruptedState(pycompat.bytestr(err))
215 finally:
215 finally:
216 fp.close()
216 fp.close()
217 return version
217 return version
218
218
219 @classmethod
219 @classmethod
220 def _readold(cls, repo):
220 def _readold(cls, repo):
221 """Read the old position-based version of a shelvestate file"""
221 """Read the old position-based version of a shelvestate file"""
222 # Order is important, because old shelvestate file uses it
222 # Order is important, because old shelvestate file uses it
223 # to detemine values of fields (i.g. name is on the second line,
223 # to detemine values of fields (i.g. name is on the second line,
224 # originalwctx is on the third and so forth). Please do not change.
224 # originalwctx is on the third and so forth). Please do not change.
225 keys = [
225 keys = [
226 b'version',
226 b'version',
227 b'name',
227 b'name',
228 b'originalwctx',
228 b'originalwctx',
229 b'pendingctx',
229 b'pendingctx',
230 b'parents',
230 b'parents',
231 b'nodestoremove',
231 b'nodestoremove',
232 b'branchtorestore',
232 b'branchtorestore',
233 b'keep',
233 b'keep',
234 b'activebook',
234 b'activebook',
235 ]
235 ]
236 # this is executed only seldomly, so it is not a big deal
236 # this is executed only seldomly, so it is not a big deal
237 # that we open this file twice
237 # that we open this file twice
238 fp = repo.vfs(cls._filename)
238 fp = repo.vfs(cls._filename)
239 d = {}
239 d = {}
240 try:
240 try:
241 for key in keys:
241 for key in keys:
242 d[key] = fp.readline().strip()
242 d[key] = fp.readline().strip()
243 finally:
243 finally:
244 fp.close()
244 fp.close()
245 return d
245 return d
246
246
247 @classmethod
247 @classmethod
248 def load(cls, repo):
248 def load(cls, repo):
249 version = cls._getversion(repo)
249 version = cls._getversion(repo)
250 if version < cls._version:
250 if version < cls._version:
251 d = cls._readold(repo)
251 d = cls._readold(repo)
252 elif version == cls._version:
252 elif version == cls._version:
253 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename).read(
253 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename).read(
254 firstlinenonkeyval=True
254 firstlinenonkeyval=True
255 )
255 )
256 else:
256 else:
257 raise error.Abort(
257 raise error.Abort(
258 _(
258 _(
259 b'this version of shelve is incompatible '
259 b'this version of shelve is incompatible '
260 b'with the version used in this repo'
260 b'with the version used in this repo'
261 )
261 )
262 )
262 )
263
263
264 cls._verifyandtransform(d)
264 cls._verifyandtransform(d)
265 try:
265 try:
266 obj = cls()
266 obj = cls()
267 obj.name = d[b'name']
267 obj.name = d[b'name']
268 obj.wctx = repo[d[b'originalwctx']]
268 obj.wctx = repo[d[b'originalwctx']]
269 obj.pendingctx = repo[d[b'pendingctx']]
269 obj.pendingctx = repo[d[b'pendingctx']]
270 obj.parents = d[b'parents']
270 obj.parents = d[b'parents']
271 obj.nodestoremove = d[b'nodestoremove']
271 obj.nodestoremove = d[b'nodestoremove']
272 obj.branchtorestore = d.get(b'branchtorestore', b'')
272 obj.branchtorestore = d.get(b'branchtorestore', b'')
273 obj.keep = d.get(b'keep') == cls._keep
273 obj.keep = d.get(b'keep') == cls._keep
274 obj.activebookmark = b''
274 obj.activebookmark = b''
275 if d.get(b'activebook', b'') != cls._noactivebook:
275 if d.get(b'activebook', b'') != cls._noactivebook:
276 obj.activebookmark = d.get(b'activebook', b'')
276 obj.activebookmark = d.get(b'activebook', b'')
277 obj.interactive = d.get(b'interactive') == cls._interactive
277 obj.interactive = d.get(b'interactive') == cls._interactive
278 except (error.RepoLookupError, KeyError) as err:
278 except (error.RepoLookupError, KeyError) as err:
279 raise error.CorruptedState(pycompat.bytestr(err))
279 raise error.CorruptedState(pycompat.bytestr(err))
280
280
281 return obj
281 return obj
282
282
283 @classmethod
283 @classmethod
284 def save(
284 def save(
285 cls,
285 cls,
286 repo,
286 repo,
287 name,
287 name,
288 originalwctx,
288 originalwctx,
289 pendingctx,
289 pendingctx,
290 nodestoremove,
290 nodestoremove,
291 branchtorestore,
291 branchtorestore,
292 keep=False,
292 keep=False,
293 activebook=b'',
293 activebook=b'',
294 interactive=False,
294 interactive=False,
295 ):
295 ):
296 info = {
296 info = {
297 b"name": name,
297 b"name": name,
298 b"originalwctx": nodemod.hex(originalwctx.node()),
298 b"originalwctx": nodemod.hex(originalwctx.node()),
299 b"pendingctx": nodemod.hex(pendingctx.node()),
299 b"pendingctx": nodemod.hex(pendingctx.node()),
300 b"parents": b' '.join(
300 b"parents": b' '.join(
301 [nodemod.hex(p) for p in repo.dirstate.parents()]
301 [nodemod.hex(p) for p in repo.dirstate.parents()]
302 ),
302 ),
303 b"nodestoremove": b' '.join(
303 b"nodestoremove": b' '.join(
304 [nodemod.hex(n) for n in nodestoremove]
304 [nodemod.hex(n) for n in nodestoremove]
305 ),
305 ),
306 b"branchtorestore": branchtorestore,
306 b"branchtorestore": branchtorestore,
307 b"keep": cls._keep if keep else cls._nokeep,
307 b"keep": cls._keep if keep else cls._nokeep,
308 b"activebook": activebook or cls._noactivebook,
308 b"activebook": activebook or cls._noactivebook,
309 }
309 }
310 if interactive:
310 if interactive:
311 info[b'interactive'] = cls._interactive
311 info[b'interactive'] = cls._interactive
312 scmutil.simplekeyvaluefile(repo.vfs, cls._filename).write(
312 scmutil.simplekeyvaluefile(repo.vfs, cls._filename).write(
313 info, firstline=(b"%d" % cls._version)
313 info, firstline=(b"%d" % cls._version)
314 )
314 )
315
315
316 @classmethod
316 @classmethod
317 def clear(cls, repo):
317 def clear(cls, repo):
318 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
318 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
319
319
320
320
321 def cleanupoldbackups(repo):
321 def cleanupoldbackups(repo):
322 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
322 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
323 maxbackups = repo.ui.configint(b'shelve', b'maxbackups')
323 maxbackups = repo.ui.configint(b'shelve', b'maxbackups')
324 hgfiles = [f for f in vfs.listdir() if f.endswith(b'.' + patchextension)]
324 hgfiles = [f for f in vfs.listdir() if f.endswith(b'.' + patchextension)]
325 hgfiles = sorted([(vfs.stat(f)[stat.ST_MTIME], f) for f in hgfiles])
325 hgfiles = sorted([(vfs.stat(f)[stat.ST_MTIME], f) for f in hgfiles])
326 if maxbackups > 0 and maxbackups < len(hgfiles):
326 if maxbackups > 0 and maxbackups < len(hgfiles):
327 bordermtime = hgfiles[-maxbackups][0]
327 bordermtime = hgfiles[-maxbackups][0]
328 else:
328 else:
329 bordermtime = None
329 bordermtime = None
330 for mtime, f in hgfiles[: len(hgfiles) - maxbackups]:
330 for mtime, f in hgfiles[: len(hgfiles) - maxbackups]:
331 if mtime == bordermtime:
331 if mtime == bordermtime:
332 # keep it, because timestamp can't decide exact order of backups
332 # keep it, because timestamp can't decide exact order of backups
333 continue
333 continue
334 base = f[: -(1 + len(patchextension))]
334 base = f[: -(1 + len(patchextension))]
335 for ext in shelvefileextensions:
335 for ext in shelvefileextensions:
336 vfs.tryunlink(base + b'.' + ext)
336 vfs.tryunlink(base + b'.' + ext)
337
337
338
338
339 def _backupactivebookmark(repo):
339 def _backupactivebookmark(repo):
340 activebookmark = repo._activebookmark
340 activebookmark = repo._activebookmark
341 if activebookmark:
341 if activebookmark:
342 bookmarks.deactivate(repo)
342 bookmarks.deactivate(repo)
343 return activebookmark
343 return activebookmark
344
344
345
345
346 def _restoreactivebookmark(repo, mark):
346 def _restoreactivebookmark(repo, mark):
347 if mark:
347 if mark:
348 bookmarks.activate(repo, mark)
348 bookmarks.activate(repo, mark)
349
349
350
350
351 def _aborttransaction(repo, tr):
351 def _aborttransaction(repo, tr):
352 '''Abort current transaction for shelve/unshelve, but keep dirstate
352 '''Abort current transaction for shelve/unshelve, but keep dirstate
353 '''
353 '''
354 dirstatebackupname = b'dirstate.shelve'
354 dirstatebackupname = b'dirstate.shelve'
355 repo.dirstate.savebackup(tr, dirstatebackupname)
355 repo.dirstate.savebackup(tr, dirstatebackupname)
356 tr.abort()
356 tr.abort()
357 repo.dirstate.restorebackup(None, dirstatebackupname)
357 repo.dirstate.restorebackup(None, dirstatebackupname)
358
358
359
359
360 def getshelvename(repo, parent, opts):
360 def getshelvename(repo, parent, opts):
361 """Decide on the name this shelve is going to have"""
361 """Decide on the name this shelve is going to have"""
362
362
363 def gennames():
363 def gennames():
364 yield label
364 yield label
365 for i in itertools.count(1):
365 for i in itertools.count(1):
366 yield b'%s-%02d' % (label, i)
366 yield b'%s-%02d' % (label, i)
367
367
368 name = opts.get(b'name')
368 name = opts.get(b'name')
369 label = repo._activebookmark or parent.branch() or b'default'
369 label = repo._activebookmark or parent.branch() or b'default'
370 # slashes aren't allowed in filenames, therefore we rename it
370 # slashes aren't allowed in filenames, therefore we rename it
371 label = label.replace(b'/', b'_')
371 label = label.replace(b'/', b'_')
372 label = label.replace(b'\\', b'_')
372 label = label.replace(b'\\', b'_')
373 # filenames must not start with '.' as it should not be hidden
373 # filenames must not start with '.' as it should not be hidden
374 if label.startswith(b'.'):
374 if label.startswith(b'.'):
375 label = label.replace(b'.', b'_', 1)
375 label = label.replace(b'.', b'_', 1)
376
376
377 if name:
377 if name:
378 if shelvedfile(repo, name, patchextension).exists():
378 if shelvedfile(repo, name, patchextension).exists():
379 e = _(b"a shelved change named '%s' already exists") % name
379 e = _(b"a shelved change named '%s' already exists") % name
380 raise error.Abort(e)
380 raise error.Abort(e)
381
381
382 # ensure we are not creating a subdirectory or a hidden file
382 # ensure we are not creating a subdirectory or a hidden file
383 if b'/' in name or b'\\' in name:
383 if b'/' in name or b'\\' in name:
384 raise error.Abort(
384 raise error.Abort(
385 _(b'shelved change names can not contain slashes')
385 _(b'shelved change names can not contain slashes')
386 )
386 )
387 if name.startswith(b'.'):
387 if name.startswith(b'.'):
388 raise error.Abort(_(b"shelved change names can not start with '.'"))
388 raise error.Abort(_(b"shelved change names can not start with '.'"))
389
389
390 else:
390 else:
391 for n in gennames():
391 for n in gennames():
392 if not shelvedfile(repo, n, patchextension).exists():
392 if not shelvedfile(repo, n, patchextension).exists():
393 name = n
393 name = n
394 break
394 break
395
395
396 return name
396 return name
397
397
398
398
399 def mutableancestors(ctx):
399 def mutableancestors(ctx):
400 """return all mutable ancestors for ctx (included)
400 """return all mutable ancestors for ctx (included)
401
401
402 Much faster than the revset ancestors(ctx) & draft()"""
402 Much faster than the revset ancestors(ctx) & draft()"""
403 seen = {nodemod.nullrev}
403 seen = {nodemod.nullrev}
404 visit = collections.deque()
404 visit = collections.deque()
405 visit.append(ctx)
405 visit.append(ctx)
406 while visit:
406 while visit:
407 ctx = visit.popleft()
407 ctx = visit.popleft()
408 yield ctx.node()
408 yield ctx.node()
409 for parent in ctx.parents():
409 for parent in ctx.parents():
410 rev = parent.rev()
410 rev = parent.rev()
411 if rev not in seen:
411 if rev not in seen:
412 seen.add(rev)
412 seen.add(rev)
413 if parent.mutable():
413 if parent.mutable():
414 visit.append(parent)
414 visit.append(parent)
415
415
416
416
417 def getcommitfunc(extra, interactive, editor=False):
417 def getcommitfunc(extra, interactive, editor=False):
418 def commitfunc(ui, repo, message, match, opts):
418 def commitfunc(ui, repo, message, match, opts):
419 hasmq = util.safehasattr(repo, b'mq')
419 hasmq = util.safehasattr(repo, b'mq')
420 if hasmq:
420 if hasmq:
421 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
421 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
422
422
423 targetphase = phases.internal
423 targetphase = phases.internal
424 if not phases.supportinternal(repo):
424 if not phases.supportinternal(repo):
425 targetphase = phases.secret
425 targetphase = phases.secret
426 overrides = {(b'phases', b'new-commit'): targetphase}
426 overrides = {(b'phases', b'new-commit'): targetphase}
427 try:
427 try:
428 editor_ = False
428 editor_ = False
429 if editor:
429 if editor:
430 editor_ = cmdutil.getcommiteditor(
430 editor_ = cmdutil.getcommiteditor(
431 editform=b'shelve.shelve', **pycompat.strkwargs(opts)
431 editform=b'shelve.shelve', **pycompat.strkwargs(opts)
432 )
432 )
433 with repo.ui.configoverride(overrides):
433 with repo.ui.configoverride(overrides):
434 return repo.commit(
434 return repo.commit(
435 message,
435 message,
436 shelveuser,
436 shelveuser,
437 opts.get(b'date'),
437 opts.get(b'date'),
438 match,
438 match,
439 editor=editor_,
439 editor=editor_,
440 extra=extra,
440 extra=extra,
441 )
441 )
442 finally:
442 finally:
443 if hasmq:
443 if hasmq:
444 repo.mq.checkapplied = saved
444 repo.mq.checkapplied = saved
445
445
446 def interactivecommitfunc(ui, repo, *pats, **opts):
446 def interactivecommitfunc(ui, repo, *pats, **opts):
447 opts = pycompat.byteskwargs(opts)
447 opts = pycompat.byteskwargs(opts)
448 match = scmutil.match(repo[b'.'], pats, {})
448 match = scmutil.match(repo[b'.'], pats, {})
449 message = opts[b'message']
449 message = opts[b'message']
450 return commitfunc(ui, repo, message, match, opts)
450 return commitfunc(ui, repo, message, match, opts)
451
451
452 return interactivecommitfunc if interactive else commitfunc
452 return interactivecommitfunc if interactive else commitfunc
453
453
454
454
455 def _nothingtoshelvemessaging(ui, repo, pats, opts):
455 def _nothingtoshelvemessaging(ui, repo, pats, opts):
456 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
456 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
457 if stat.deleted:
457 if stat.deleted:
458 ui.status(
458 ui.status(
459 _(b"nothing changed (%d missing files, see 'hg status')\n")
459 _(b"nothing changed (%d missing files, see 'hg status')\n")
460 % len(stat.deleted)
460 % len(stat.deleted)
461 )
461 )
462 else:
462 else:
463 ui.status(_(b"nothing changed\n"))
463 ui.status(_(b"nothing changed\n"))
464
464
465
465
466 def _shelvecreatedcommit(repo, node, name, match):
466 def _shelvecreatedcommit(repo, node, name, match):
467 info = {b'node': nodemod.hex(node)}
467 info = {b'node': nodemod.hex(node)}
468 shelvedfile(repo, name, b'shelve').writeinfo(info)
468 shelvedfile(repo, name, b'shelve').writeinfo(info)
469 bases = list(mutableancestors(repo[node]))
469 bases = list(mutableancestors(repo[node]))
470 shelvedfile(repo, name, b'hg').writebundle(bases, node)
470 shelvedfile(repo, name, b'hg').writebundle(bases, node)
471 with shelvedfile(repo, name, patchextension).opener(b'wb') as fp:
471 with shelvedfile(repo, name, patchextension).opener(b'wb') as fp:
472 cmdutil.exportfile(
472 cmdutil.exportfile(
473 repo, [node], fp, opts=mdiff.diffopts(git=True), match=match
473 repo, [node], fp, opts=mdiff.diffopts(git=True), match=match
474 )
474 )
475
475
476
476
477 def _includeunknownfiles(repo, pats, opts, extra):
477 def _includeunknownfiles(repo, pats, opts, extra):
478 s = repo.status(match=scmutil.match(repo[None], pats, opts), unknown=True)
478 s = repo.status(match=scmutil.match(repo[None], pats, opts), unknown=True)
479 if s.unknown:
479 if s.unknown:
480 extra[b'shelve_unknown'] = b'\0'.join(s.unknown)
480 extra[b'shelve_unknown'] = b'\0'.join(s.unknown)
481 repo[None].add(s.unknown)
481 repo[None].add(s.unknown)
482
482
483
483
484 def _finishshelve(repo, tr):
484 def _finishshelve(repo, tr):
485 if phases.supportinternal(repo):
485 if phases.supportinternal(repo):
486 tr.close()
486 tr.close()
487 else:
487 else:
488 _aborttransaction(repo, tr)
488 _aborttransaction(repo, tr)
489
489
490
490
491 def createcmd(ui, repo, pats, opts):
491 def createcmd(ui, repo, pats, opts):
492 """subcommand that creates a new shelve"""
492 """subcommand that creates a new shelve"""
493 with repo.wlock():
493 with repo.wlock():
494 cmdutil.checkunfinished(repo)
494 cmdutil.checkunfinished(repo)
495 return _docreatecmd(ui, repo, pats, opts)
495 return _docreatecmd(ui, repo, pats, opts)
496
496
497
497
498 def _docreatecmd(ui, repo, pats, opts):
498 def _docreatecmd(ui, repo, pats, opts):
499 wctx = repo[None]
499 wctx = repo[None]
500 parents = wctx.parents()
500 parents = wctx.parents()
501 parent = parents[0]
501 parent = parents[0]
502 origbranch = wctx.branch()
502 origbranch = wctx.branch()
503
503
504 if parent.node() != nodemod.nullid:
504 if parent.node() != nodemod.nullid:
505 desc = b"changes to: %s" % parent.description().split(b'\n', 1)[0]
505 desc = b"changes to: %s" % parent.description().split(b'\n', 1)[0]
506 else:
506 else:
507 desc = b'(changes in empty repository)'
507 desc = b'(changes in empty repository)'
508
508
509 if not opts.get(b'message'):
509 if not opts.get(b'message'):
510 opts[b'message'] = desc
510 opts[b'message'] = desc
511
511
512 lock = tr = activebookmark = None
512 lock = tr = activebookmark = None
513 try:
513 try:
514 lock = repo.lock()
514 lock = repo.lock()
515
515
516 # use an uncommitted transaction to generate the bundle to avoid
516 # use an uncommitted transaction to generate the bundle to avoid
517 # pull races. ensure we don't print the abort message to stderr.
517 # pull races. ensure we don't print the abort message to stderr.
518 tr = repo.transaction(b'shelve', report=lambda x: None)
518 tr = repo.transaction(b'shelve', report=lambda x: None)
519
519
520 interactive = opts.get(b'interactive', False)
520 interactive = opts.get(b'interactive', False)
521 includeunknown = opts.get(b'unknown', False) and not opts.get(
521 includeunknown = opts.get(b'unknown', False) and not opts.get(
522 b'addremove', False
522 b'addremove', False
523 )
523 )
524
524
525 name = getshelvename(repo, parent, opts)
525 name = getshelvename(repo, parent, opts)
526 activebookmark = _backupactivebookmark(repo)
526 activebookmark = _backupactivebookmark(repo)
527 extra = {b'internal': b'shelve'}
527 extra = {b'internal': b'shelve'}
528 if includeunknown:
528 if includeunknown:
529 _includeunknownfiles(repo, pats, opts, extra)
529 _includeunknownfiles(repo, pats, opts, extra)
530
530
531 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
531 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
532 # In non-bare shelve we don't store newly created branch
532 # In non-bare shelve we don't store newly created branch
533 # at bundled commit
533 # at bundled commit
534 repo.dirstate.setbranch(repo[b'.'].branch())
534 repo.dirstate.setbranch(repo[b'.'].branch())
535
535
536 commitfunc = getcommitfunc(extra, interactive, editor=True)
536 commitfunc = getcommitfunc(extra, interactive, editor=True)
537 if not interactive:
537 if not interactive:
538 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
538 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
539 else:
539 else:
540 node = cmdutil.dorecord(
540 node = cmdutil.dorecord(
541 ui,
541 ui,
542 repo,
542 repo,
543 commitfunc,
543 commitfunc,
544 None,
544 None,
545 False,
545 False,
546 cmdutil.recordfilter,
546 cmdutil.recordfilter,
547 *pats,
547 *pats,
548 **pycompat.strkwargs(opts)
548 **pycompat.strkwargs(opts)
549 )
549 )
550 if not node:
550 if not node:
551 _nothingtoshelvemessaging(ui, repo, pats, opts)
551 _nothingtoshelvemessaging(ui, repo, pats, opts)
552 return 1
552 return 1
553
553
554 # Create a matcher so that prefetch doesn't attempt to fetch
554 # Create a matcher so that prefetch doesn't attempt to fetch
555 # the entire repository pointlessly, and as an optimisation
555 # the entire repository pointlessly, and as an optimisation
556 # for movedirstate, if needed.
556 # for movedirstate, if needed.
557 match = scmutil.matchfiles(repo, repo[node].files())
557 match = scmutil.matchfiles(repo, repo[node].files())
558 _shelvecreatedcommit(repo, node, name, match)
558 _shelvecreatedcommit(repo, node, name, match)
559
559
560 ui.status(_(b'shelved as %s\n') % name)
560 ui.status(_(b'shelved as %s\n') % name)
561 if opts[b'keep']:
561 if opts[b'keep']:
562 with repo.dirstate.parentchange():
562 with repo.dirstate.parentchange():
563 scmutil.movedirstate(repo, parent, match)
563 scmutil.movedirstate(repo, parent, match)
564 else:
564 else:
565 hg.update(repo, parent.node())
565 hg.update(repo, parent.node())
566 if origbranch != repo[b'.'].branch() and not _isbareshelve(pats, opts):
566 if origbranch != repo[b'.'].branch() and not _isbareshelve(pats, opts):
567 repo.dirstate.setbranch(origbranch)
567 repo.dirstate.setbranch(origbranch)
568
568
569 _finishshelve(repo, tr)
569 _finishshelve(repo, tr)
570 finally:
570 finally:
571 _restoreactivebookmark(repo, activebookmark)
571 _restoreactivebookmark(repo, activebookmark)
572 lockmod.release(tr, lock)
572 lockmod.release(tr, lock)
573
573
574
574
575 def _isbareshelve(pats, opts):
575 def _isbareshelve(pats, opts):
576 return (
576 return (
577 not pats
577 not pats
578 and not opts.get(b'interactive', False)
578 and not opts.get(b'interactive', False)
579 and not opts.get(b'include', False)
579 and not opts.get(b'include', False)
580 and not opts.get(b'exclude', False)
580 and not opts.get(b'exclude', False)
581 )
581 )
582
582
583
583
584 def _iswctxonnewbranch(repo):
584 def _iswctxonnewbranch(repo):
585 return repo[None].branch() != repo[b'.'].branch()
585 return repo[None].branch() != repo[b'.'].branch()
586
586
587
587
588 def cleanupcmd(ui, repo):
588 def cleanupcmd(ui, repo):
589 """subcommand that deletes all shelves"""
589 """subcommand that deletes all shelves"""
590
590
591 with repo.wlock():
591 with repo.wlock():
592 for (name, _type) in repo.vfs.readdir(shelvedir):
592 for (name, _type) in repo.vfs.readdir(shelvedir):
593 suffix = name.rsplit(b'.', 1)[-1]
593 suffix = name.rsplit(b'.', 1)[-1]
594 if suffix in shelvefileextensions:
594 if suffix in shelvefileextensions:
595 shelvedfile(repo, name).movetobackup()
595 shelvedfile(repo, name).movetobackup()
596 cleanupoldbackups(repo)
596 cleanupoldbackups(repo)
597
597
598
598
599 def deletecmd(ui, repo, pats):
599 def deletecmd(ui, repo, pats):
600 """subcommand that deletes a specific shelve"""
600 """subcommand that deletes a specific shelve"""
601 if not pats:
601 if not pats:
602 raise error.Abort(_(b'no shelved changes specified!'))
602 raise error.Abort(_(b'no shelved changes specified!'))
603 with repo.wlock():
603 with repo.wlock():
604 for name in pats:
604 for name in pats:
605 try:
605 try:
606 for suffix in shelvefileextensions:
606 for suffix in shelvefileextensions:
607 shfile = shelvedfile(repo, name, suffix)
607 shfile = shelvedfile(repo, name, suffix)
608 # patch file is necessary, as it should
608 # patch file is necessary, as it should
609 # be present for any kind of shelve,
609 # be present for any kind of shelve,
610 # but the .hg file is optional as in future we
610 # but the .hg file is optional as in future we
611 # will add obsolete shelve with does not create a
611 # will add obsolete shelve with does not create a
612 # bundle
612 # bundle
613 if shfile.exists() or suffix == patchextension:
613 if shfile.exists() or suffix == patchextension:
614 shfile.movetobackup()
614 shfile.movetobackup()
615 except OSError as err:
615 except OSError as err:
616 if err.errno != errno.ENOENT:
616 if err.errno != errno.ENOENT:
617 raise
617 raise
618 raise error.Abort(_(b"shelved change '%s' not found") % name)
618 raise error.Abort(_(b"shelved change '%s' not found") % name)
619 cleanupoldbackups(repo)
619 cleanupoldbackups(repo)
620
620
621
621
622 def listshelves(repo):
622 def listshelves(repo):
623 """return all shelves in repo as list of (time, filename)"""
623 """return all shelves in repo as list of (time, filename)"""
624 try:
624 try:
625 names = repo.vfs.readdir(shelvedir)
625 names = repo.vfs.readdir(shelvedir)
626 except OSError as err:
626 except OSError as err:
627 if err.errno != errno.ENOENT:
627 if err.errno != errno.ENOENT:
628 raise
628 raise
629 return []
629 return []
630 info = []
630 info = []
631 for (name, _type) in names:
631 for (name, _type) in names:
632 pfx, sfx = name.rsplit(b'.', 1)
632 pfx, sfx = name.rsplit(b'.', 1)
633 if not pfx or sfx != patchextension:
633 if not pfx or sfx != patchextension:
634 continue
634 continue
635 st = shelvedfile(repo, name).stat()
635 st = shelvedfile(repo, name).stat()
636 info.append((st[stat.ST_MTIME], shelvedfile(repo, pfx).filename()))
636 info.append((st[stat.ST_MTIME], shelvedfile(repo, pfx).filename()))
637 return sorted(info, reverse=True)
637 return sorted(info, reverse=True)
638
638
639
639
640 def listcmd(ui, repo, pats, opts):
640 def listcmd(ui, repo, pats, opts):
641 """subcommand that displays the list of shelves"""
641 """subcommand that displays the list of shelves"""
642 pats = set(pats)
642 pats = set(pats)
643 width = 80
643 width = 80
644 if not ui.plain():
644 if not ui.plain():
645 width = ui.termwidth()
645 width = ui.termwidth()
646 namelabel = b'shelve.newest'
646 namelabel = b'shelve.newest'
647 ui.pager(b'shelve')
647 ui.pager(b'shelve')
648 for mtime, name in listshelves(repo):
648 for mtime, name in listshelves(repo):
649 sname = util.split(name)[1]
649 sname = util.split(name)[1]
650 if pats and sname not in pats:
650 if pats and sname not in pats:
651 continue
651 continue
652 ui.write(sname, label=namelabel)
652 ui.write(sname, label=namelabel)
653 namelabel = b'shelve.name'
653 namelabel = b'shelve.name'
654 if ui.quiet:
654 if ui.quiet:
655 ui.write(b'\n')
655 ui.write(b'\n')
656 continue
656 continue
657 ui.write(b' ' * (16 - len(sname)))
657 ui.write(b' ' * (16 - len(sname)))
658 used = 16
658 used = 16
659 date = dateutil.makedate(mtime)
659 date = dateutil.makedate(mtime)
660 age = b'(%s)' % templatefilters.age(date, abbrev=True)
660 age = b'(%s)' % templatefilters.age(date, abbrev=True)
661 ui.write(age, label=b'shelve.age')
661 ui.write(age, label=b'shelve.age')
662 ui.write(b' ' * (12 - len(age)))
662 ui.write(b' ' * (12 - len(age)))
663 used += 12
663 used += 12
664 with open(name + b'.' + patchextension, b'rb') as fp:
664 with open(name + b'.' + patchextension, b'rb') as fp:
665 while True:
665 while True:
666 line = fp.readline()
666 line = fp.readline()
667 if not line:
667 if not line:
668 break
668 break
669 if not line.startswith(b'#'):
669 if not line.startswith(b'#'):
670 desc = line.rstrip()
670 desc = line.rstrip()
671 if ui.formatted():
671 if ui.formatted():
672 desc = stringutil.ellipsis(desc, width - used)
672 desc = stringutil.ellipsis(desc, width - used)
673 ui.write(desc)
673 ui.write(desc)
674 break
674 break
675 ui.write(b'\n')
675 ui.write(b'\n')
676 if not (opts[b'patch'] or opts[b'stat']):
676 if not (opts[b'patch'] or opts[b'stat']):
677 continue
677 continue
678 difflines = fp.readlines()
678 difflines = fp.readlines()
679 if opts[b'patch']:
679 if opts[b'patch']:
680 for chunk, label in patch.difflabel(iter, difflines):
680 for chunk, label in patch.difflabel(iter, difflines):
681 ui.write(chunk, label=label)
681 ui.write(chunk, label=label)
682 if opts[b'stat']:
682 if opts[b'stat']:
683 for chunk, label in patch.diffstatui(difflines, width=width):
683 for chunk, label in patch.diffstatui(difflines, width=width):
684 ui.write(chunk, label=label)
684 ui.write(chunk, label=label)
685
685
686
686
687 def patchcmds(ui, repo, pats, opts):
687 def patchcmds(ui, repo, pats, opts):
688 """subcommand that displays shelves"""
688 """subcommand that displays shelves"""
689 if len(pats) == 0:
689 if len(pats) == 0:
690 shelves = listshelves(repo)
690 shelves = listshelves(repo)
691 if not shelves:
691 if not shelves:
692 raise error.Abort(_(b"there are no shelves to show"))
692 raise error.Abort(_(b"there are no shelves to show"))
693 mtime, name = shelves[0]
693 mtime, name = shelves[0]
694 sname = util.split(name)[1]
694 sname = util.split(name)[1]
695 pats = [sname]
695 pats = [sname]
696
696
697 for shelfname in pats:
697 for shelfname in pats:
698 if not shelvedfile(repo, shelfname, patchextension).exists():
698 if not shelvedfile(repo, shelfname, patchextension).exists():
699 raise error.Abort(_(b"cannot find shelf %s") % shelfname)
699 raise error.Abort(_(b"cannot find shelf %s") % shelfname)
700
700
701 listcmd(ui, repo, pats, opts)
701 listcmd(ui, repo, pats, opts)
702
702
703
703
704 def checkparents(repo, state):
704 def checkparents(repo, state):
705 """check parent while resuming an unshelve"""
705 """check parent while resuming an unshelve"""
706 if state.parents != repo.dirstate.parents():
706 if state.parents != repo.dirstate.parents():
707 raise error.Abort(
707 raise error.Abort(
708 _(b'working directory parents do not match unshelve state')
708 _(b'working directory parents do not match unshelve state')
709 )
709 )
710
710
711
711
712 def _loadshelvedstate(ui, repo, opts):
712 def _loadshelvedstate(ui, repo, opts):
713 try:
713 try:
714 state = shelvedstate.load(repo)
714 state = shelvedstate.load(repo)
715 if opts.get(b'keep') is None:
715 if opts.get(b'keep') is None:
716 opts[b'keep'] = state.keep
716 opts[b'keep'] = state.keep
717 except IOError as err:
717 except IOError as err:
718 if err.errno != errno.ENOENT:
718 if err.errno != errno.ENOENT:
719 raise
719 raise
720 cmdutil.wrongtooltocontinue(repo, _(b'unshelve'))
720 cmdutil.wrongtooltocontinue(repo, _(b'unshelve'))
721 except error.CorruptedState as err:
721 except error.CorruptedState as err:
722 ui.debug(pycompat.bytestr(err) + b'\n')
722 ui.debug(pycompat.bytestr(err) + b'\n')
723 if opts.get(b'continue'):
723 if opts.get(b'continue'):
724 msg = _(b'corrupted shelved state file')
724 msg = _(b'corrupted shelved state file')
725 hint = _(
725 hint = _(
726 b'please run hg unshelve --abort to abort unshelve '
726 b'please run hg unshelve --abort to abort unshelve '
727 b'operation'
727 b'operation'
728 )
728 )
729 raise error.Abort(msg, hint=hint)
729 raise error.Abort(msg, hint=hint)
730 elif opts.get(b'abort'):
730 elif opts.get(b'abort'):
731 shelvedstate.clear(repo)
731 shelvedstate.clear(repo)
732 raise error.Abort(
732 raise error.Abort(
733 _(
733 _(
734 b'could not read shelved state file, your '
734 b'could not read shelved state file, your '
735 b'working copy may be in an unexpected state\n'
735 b'working copy may be in an unexpected state\n'
736 b'please update to some commit\n'
736 b'please update to some commit\n'
737 )
737 )
738 )
738 )
739 return state
739 return state
740
740
741
741
742 def unshelveabort(ui, repo, state):
742 def unshelveabort(ui, repo, state):
743 """subcommand that abort an in-progress unshelve"""
743 """subcommand that abort an in-progress unshelve"""
744 with repo.lock():
744 with repo.lock():
745 try:
745 try:
746 checkparents(repo, state)
746 checkparents(repo, state)
747
747
748 merge.clean_update(state.pendingctx)
748 merge.clean_update(state.pendingctx)
749 if state.activebookmark and state.activebookmark in repo._bookmarks:
749 if state.activebookmark and state.activebookmark in repo._bookmarks:
750 bookmarks.activate(repo, state.activebookmark)
750 bookmarks.activate(repo, state.activebookmark)
751 mergefiles(ui, repo, state.wctx, state.pendingctx)
751 mergefiles(ui, repo, state.wctx, state.pendingctx)
752 if not phases.supportinternal(repo):
752 if not phases.supportinternal(repo):
753 repair.strip(
753 repair.strip(
754 ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
754 ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
755 )
755 )
756 finally:
756 finally:
757 shelvedstate.clear(repo)
757 shelvedstate.clear(repo)
758 ui.warn(_(b"unshelve of '%s' aborted\n") % state.name)
758 ui.warn(_(b"unshelve of '%s' aborted\n") % state.name)
759
759
760
760
761 def hgabortunshelve(ui, repo):
761 def hgabortunshelve(ui, repo):
762 """logic to abort unshelve using 'hg abort"""
762 """logic to abort unshelve using 'hg abort"""
763 with repo.wlock():
763 with repo.wlock():
764 state = _loadshelvedstate(ui, repo, {b'abort': True})
764 state = _loadshelvedstate(ui, repo, {b'abort': True})
765 return unshelveabort(ui, repo, state)
765 return unshelveabort(ui, repo, state)
766
766
767
767
768 def mergefiles(ui, repo, wctx, shelvectx):
768 def mergefiles(ui, repo, wctx, shelvectx):
769 """updates to wctx and merges the changes from shelvectx into the
769 """updates to wctx and merges the changes from shelvectx into the
770 dirstate."""
770 dirstate."""
771 with ui.configoverride({(b'ui', b'quiet'): True}):
771 with ui.configoverride({(b'ui', b'quiet'): True}):
772 hg.update(repo, wctx.node())
772 hg.update(repo, wctx.node())
773 ui.pushbuffer(True)
773 ui.pushbuffer(True)
774 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents())
774 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents())
775 ui.popbuffer()
775 ui.popbuffer()
776
776
777
777
778 def restorebranch(ui, repo, branchtorestore):
778 def restorebranch(ui, repo, branchtorestore):
779 if branchtorestore and branchtorestore != repo.dirstate.branch():
779 if branchtorestore and branchtorestore != repo.dirstate.branch():
780 repo.dirstate.setbranch(branchtorestore)
780 repo.dirstate.setbranch(branchtorestore)
781 ui.status(
781 ui.status(
782 _(b'marked working directory as branch %s\n') % branchtorestore
782 _(b'marked working directory as branch %s\n') % branchtorestore
783 )
783 )
784
784
785
785
786 def unshelvecleanup(ui, repo, name, opts):
786 def unshelvecleanup(ui, repo, name, opts):
787 """remove related files after an unshelve"""
787 """remove related files after an unshelve"""
788 if not opts.get(b'keep'):
788 if not opts.get(b'keep'):
789 for filetype in shelvefileextensions:
789 for filetype in shelvefileextensions:
790 shfile = shelvedfile(repo, name, filetype)
790 shfile = shelvedfile(repo, name, filetype)
791 if shfile.exists():
791 if shfile.exists():
792 shfile.movetobackup()
792 shfile.movetobackup()
793 cleanupoldbackups(repo)
793 cleanupoldbackups(repo)
794
794
795
795
796 def unshelvecontinue(ui, repo, state, opts):
796 def unshelvecontinue(ui, repo, state, opts):
797 """subcommand to continue an in-progress unshelve"""
797 """subcommand to continue an in-progress unshelve"""
798 # We're finishing off a merge. First parent is our original
798 # We're finishing off a merge. First parent is our original
799 # parent, second is the temporary "fake" commit we're unshelving.
799 # parent, second is the temporary "fake" commit we're unshelving.
800 interactive = state.interactive
800 interactive = state.interactive
801 basename = state.name
801 basename = state.name
802 with repo.lock():
802 with repo.lock():
803 checkparents(repo, state)
803 checkparents(repo, state)
804 ms = merge.mergestate.read(repo)
804 ms = merge.mergestate.read(repo)
805 if list(ms.unresolved()):
805 if list(ms.unresolved()):
806 raise error.Abort(
806 raise error.Abort(
807 _(b"unresolved conflicts, can't continue"),
807 _(b"unresolved conflicts, can't continue"),
808 hint=_(b"see 'hg resolve', then 'hg unshelve --continue'"),
808 hint=_(b"see 'hg resolve', then 'hg unshelve --continue'"),
809 )
809 )
810
810
811 shelvectx = repo[state.parents[1]]
811 shelvectx = repo[state.parents[1]]
812 pendingctx = state.pendingctx
812 pendingctx = state.pendingctx
813
813
814 with repo.dirstate.parentchange():
814 with repo.dirstate.parentchange():
815 repo.setparents(state.pendingctx.node(), nodemod.nullid)
815 repo.setparents(state.pendingctx.node(), nodemod.nullid)
816 repo.dirstate.write(repo.currenttransaction())
816 repo.dirstate.write(repo.currenttransaction())
817
817
818 targetphase = phases.internal
818 targetphase = phases.internal
819 if not phases.supportinternal(repo):
819 if not phases.supportinternal(repo):
820 targetphase = phases.secret
820 targetphase = phases.secret
821 overrides = {(b'phases', b'new-commit'): targetphase}
821 overrides = {(b'phases', b'new-commit'): targetphase}
822 with repo.ui.configoverride(overrides, b'unshelve'):
822 with repo.ui.configoverride(overrides, b'unshelve'):
823 with repo.dirstate.parentchange():
823 with repo.dirstate.parentchange():
824 repo.setparents(state.parents[0], nodemod.nullid)
824 repo.setparents(state.parents[0], nodemod.nullid)
825 newnode, ispartialunshelve = _createunshelvectx(
825 newnode, ispartialunshelve = _createunshelvectx(
826 ui, repo, shelvectx, basename, interactive, opts
826 ui, repo, shelvectx, basename, interactive, opts
827 )
827 )
828
828
829 if newnode is None:
829 if newnode is None:
830 # If it ended up being a no-op commit, then the normal
831 # merge state clean-up path doesn't happen, so do it
832 # here. Fix issue5494
833 merge.mergestate.clean(repo)
834 shelvectx = state.pendingctx
830 shelvectx = state.pendingctx
835 msg = _(
831 msg = _(
836 b'note: unshelved changes already existed '
832 b'note: unshelved changes already existed '
837 b'in the working copy\n'
833 b'in the working copy\n'
838 )
834 )
839 ui.status(msg)
835 ui.status(msg)
840 else:
836 else:
841 # only strip the shelvectx if we produced one
837 # only strip the shelvectx if we produced one
842 state.nodestoremove.append(newnode)
838 state.nodestoremove.append(newnode)
843 shelvectx = repo[newnode]
839 shelvectx = repo[newnode]
844
840
845 hg.updaterepo(repo, pendingctx.node(), overwrite=False)
841 hg.updaterepo(repo, pendingctx.node(), overwrite=False)
846 mergefiles(ui, repo, state.wctx, shelvectx)
842 mergefiles(ui, repo, state.wctx, shelvectx)
847 restorebranch(ui, repo, state.branchtorestore)
843 restorebranch(ui, repo, state.branchtorestore)
848
844
849 if not phases.supportinternal(repo):
845 if not phases.supportinternal(repo):
850 repair.strip(
846 repair.strip(
851 ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
847 ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
852 )
848 )
853 shelvedstate.clear(repo)
849 shelvedstate.clear(repo)
854 if not ispartialunshelve:
850 if not ispartialunshelve:
855 unshelvecleanup(ui, repo, state.name, opts)
851 unshelvecleanup(ui, repo, state.name, opts)
856 _restoreactivebookmark(repo, state.activebookmark)
852 _restoreactivebookmark(repo, state.activebookmark)
857 ui.status(_(b"unshelve of '%s' complete\n") % state.name)
853 ui.status(_(b"unshelve of '%s' complete\n") % state.name)
858
854
859
855
860 def hgcontinueunshelve(ui, repo):
856 def hgcontinueunshelve(ui, repo):
861 """logic to resume unshelve using 'hg continue'"""
857 """logic to resume unshelve using 'hg continue'"""
862 with repo.wlock():
858 with repo.wlock():
863 state = _loadshelvedstate(ui, repo, {b'continue': True})
859 state = _loadshelvedstate(ui, repo, {b'continue': True})
864 return unshelvecontinue(ui, repo, state, {b'keep': state.keep})
860 return unshelvecontinue(ui, repo, state, {b'keep': state.keep})
865
861
866
862
867 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
863 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
868 """Temporarily commit working copy changes before moving unshelve commit"""
864 """Temporarily commit working copy changes before moving unshelve commit"""
869 # Store pending changes in a commit and remember added in case a shelve
865 # Store pending changes in a commit and remember added in case a shelve
870 # contains unknown files that are part of the pending change
866 # contains unknown files that are part of the pending change
871 s = repo.status()
867 s = repo.status()
872 addedbefore = frozenset(s.added)
868 addedbefore = frozenset(s.added)
873 if not (s.modified or s.added or s.removed):
869 if not (s.modified or s.added or s.removed):
874 return tmpwctx, addedbefore
870 return tmpwctx, addedbefore
875 ui.status(
871 ui.status(
876 _(
872 _(
877 b"temporarily committing pending changes "
873 b"temporarily committing pending changes "
878 b"(restore with 'hg unshelve --abort')\n"
874 b"(restore with 'hg unshelve --abort')\n"
879 )
875 )
880 )
876 )
881 extra = {b'internal': b'shelve'}
877 extra = {b'internal': b'shelve'}
882 commitfunc = getcommitfunc(extra=extra, interactive=False, editor=False)
878 commitfunc = getcommitfunc(extra=extra, interactive=False, editor=False)
883 tempopts = {}
879 tempopts = {}
884 tempopts[b'message'] = b"pending changes temporary commit"
880 tempopts[b'message'] = b"pending changes temporary commit"
885 tempopts[b'date'] = opts.get(b'date')
881 tempopts[b'date'] = opts.get(b'date')
886 with ui.configoverride({(b'ui', b'quiet'): True}):
882 with ui.configoverride({(b'ui', b'quiet'): True}):
887 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
883 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
888 tmpwctx = repo[node]
884 tmpwctx = repo[node]
889 return tmpwctx, addedbefore
885 return tmpwctx, addedbefore
890
886
891
887
892 def _unshelverestorecommit(ui, repo, tr, basename):
888 def _unshelverestorecommit(ui, repo, tr, basename):
893 """Recreate commit in the repository during the unshelve"""
889 """Recreate commit in the repository during the unshelve"""
894 repo = repo.unfiltered()
890 repo = repo.unfiltered()
895 node = None
891 node = None
896 if shelvedfile(repo, basename, b'shelve').exists():
892 if shelvedfile(repo, basename, b'shelve').exists():
897 node = shelvedfile(repo, basename, b'shelve').readinfo()[b'node']
893 node = shelvedfile(repo, basename, b'shelve').readinfo()[b'node']
898 if node is None or node not in repo:
894 if node is None or node not in repo:
899 with ui.configoverride({(b'ui', b'quiet'): True}):
895 with ui.configoverride({(b'ui', b'quiet'): True}):
900 shelvectx = shelvedfile(repo, basename, b'hg').applybundle(tr)
896 shelvectx = shelvedfile(repo, basename, b'hg').applybundle(tr)
901 # We might not strip the unbundled changeset, so we should keep track of
897 # We might not strip the unbundled changeset, so we should keep track of
902 # the unshelve node in case we need to reuse it (eg: unshelve --keep)
898 # the unshelve node in case we need to reuse it (eg: unshelve --keep)
903 if node is None:
899 if node is None:
904 info = {b'node': nodemod.hex(shelvectx.node())}
900 info = {b'node': nodemod.hex(shelvectx.node())}
905 shelvedfile(repo, basename, b'shelve').writeinfo(info)
901 shelvedfile(repo, basename, b'shelve').writeinfo(info)
906 else:
902 else:
907 shelvectx = repo[node]
903 shelvectx = repo[node]
908
904
909 return repo, shelvectx
905 return repo, shelvectx
910
906
911
907
912 def _createunshelvectx(ui, repo, shelvectx, basename, interactive, opts):
908 def _createunshelvectx(ui, repo, shelvectx, basename, interactive, opts):
913 """Handles the creation of unshelve commit and updates the shelve if it
909 """Handles the creation of unshelve commit and updates the shelve if it
914 was partially unshelved.
910 was partially unshelved.
915
911
916 If interactive is:
912 If interactive is:
917
913
918 * False: Commits all the changes in the working directory.
914 * False: Commits all the changes in the working directory.
919 * True: Prompts the user to select changes to unshelve and commit them.
915 * True: Prompts the user to select changes to unshelve and commit them.
920 Update the shelve with remaining changes.
916 Update the shelve with remaining changes.
921
917
922 Returns the node of the new commit formed and a bool indicating whether
918 Returns the node of the new commit formed and a bool indicating whether
923 the shelve was partially unshelved.Creates a commit ctx to unshelve
919 the shelve was partially unshelved.Creates a commit ctx to unshelve
924 interactively or non-interactively.
920 interactively or non-interactively.
925
921
926 The user might want to unshelve certain changes only from the stored
922 The user might want to unshelve certain changes only from the stored
927 shelve in interactive. So, we would create two commits. One with requested
923 shelve in interactive. So, we would create two commits. One with requested
928 changes to unshelve at that time and the latter is shelved for future.
924 changes to unshelve at that time and the latter is shelved for future.
929
925
930 Here, we return both the newnode which is created interactively and a
926 Here, we return both the newnode which is created interactively and a
931 bool to know whether the shelve is partly done or completely done.
927 bool to know whether the shelve is partly done or completely done.
932 """
928 """
933 opts[b'message'] = shelvectx.description()
929 opts[b'message'] = shelvectx.description()
934 opts[b'interactive-unshelve'] = True
930 opts[b'interactive-unshelve'] = True
935 pats = []
931 pats = []
936 if not interactive:
932 if not interactive:
937 newnode = repo.commit(
933 newnode = repo.commit(
938 text=shelvectx.description(),
934 text=shelvectx.description(),
939 extra=shelvectx.extra(),
935 extra=shelvectx.extra(),
940 user=shelvectx.user(),
936 user=shelvectx.user(),
941 date=shelvectx.date(),
937 date=shelvectx.date(),
942 )
938 )
943 return newnode, False
939 return newnode, False
944
940
945 commitfunc = getcommitfunc(shelvectx.extra(), interactive=True, editor=True)
941 commitfunc = getcommitfunc(shelvectx.extra(), interactive=True, editor=True)
946 newnode = cmdutil.dorecord(
942 newnode = cmdutil.dorecord(
947 ui,
943 ui,
948 repo,
944 repo,
949 commitfunc,
945 commitfunc,
950 None,
946 None,
951 False,
947 False,
952 cmdutil.recordfilter,
948 cmdutil.recordfilter,
953 *pats,
949 *pats,
954 **pycompat.strkwargs(opts)
950 **pycompat.strkwargs(opts)
955 )
951 )
956 snode = repo.commit(
952 snode = repo.commit(
957 text=shelvectx.description(),
953 text=shelvectx.description(),
958 extra=shelvectx.extra(),
954 extra=shelvectx.extra(),
959 user=shelvectx.user(),
955 user=shelvectx.user(),
960 )
956 )
961 if snode:
957 if snode:
962 m = scmutil.matchfiles(repo, repo[snode].files())
958 m = scmutil.matchfiles(repo, repo[snode].files())
963 _shelvecreatedcommit(repo, snode, basename, m)
959 _shelvecreatedcommit(repo, snode, basename, m)
964
960
965 return newnode, bool(snode)
961 return newnode, bool(snode)
966
962
967
963
968 def _rebaserestoredcommit(
964 def _rebaserestoredcommit(
969 ui,
965 ui,
970 repo,
966 repo,
971 opts,
967 opts,
972 tr,
968 tr,
973 oldtiprev,
969 oldtiprev,
974 basename,
970 basename,
975 pctx,
971 pctx,
976 tmpwctx,
972 tmpwctx,
977 shelvectx,
973 shelvectx,
978 branchtorestore,
974 branchtorestore,
979 activebookmark,
975 activebookmark,
980 ):
976 ):
981 """Rebase restored commit from its original location to a destination"""
977 """Rebase restored commit from its original location to a destination"""
982 # If the shelve is not immediately on top of the commit
978 # If the shelve is not immediately on top of the commit
983 # we'll be merging with, rebase it to be on top.
979 # we'll be merging with, rebase it to be on top.
984 interactive = opts.get(b'interactive')
980 interactive = opts.get(b'interactive')
985 if tmpwctx.node() == shelvectx.p1().node() and not interactive:
981 if tmpwctx.node() == shelvectx.p1().node() and not interactive:
986 # We won't skip on interactive mode because, the user might want to
982 # We won't skip on interactive mode because, the user might want to
987 # unshelve certain changes only.
983 # unshelve certain changes only.
988 return shelvectx, False
984 return shelvectx, False
989
985
990 overrides = {
986 overrides = {
991 (b'ui', b'forcemerge'): opts.get(b'tool', b''),
987 (b'ui', b'forcemerge'): opts.get(b'tool', b''),
992 (b'phases', b'new-commit'): phases.secret,
988 (b'phases', b'new-commit'): phases.secret,
993 }
989 }
994 with repo.ui.configoverride(overrides, b'unshelve'):
990 with repo.ui.configoverride(overrides, b'unshelve'):
995 ui.status(_(b'rebasing shelved changes\n'))
991 ui.status(_(b'rebasing shelved changes\n'))
996 stats = merge.graft(
992 stats = merge.graft(
997 repo,
993 repo,
998 shelvectx,
994 shelvectx,
999 labels=[b'working-copy', b'shelve'],
995 labels=[b'working-copy', b'shelve'],
1000 keepconflictparent=True,
996 keepconflictparent=True,
1001 )
997 )
1002 if stats.unresolvedcount:
998 if stats.unresolvedcount:
1003 tr.close()
999 tr.close()
1004
1000
1005 nodestoremove = [
1001 nodestoremove = [
1006 repo.changelog.node(rev)
1002 repo.changelog.node(rev)
1007 for rev in pycompat.xrange(oldtiprev, len(repo))
1003 for rev in pycompat.xrange(oldtiprev, len(repo))
1008 ]
1004 ]
1009 shelvedstate.save(
1005 shelvedstate.save(
1010 repo,
1006 repo,
1011 basename,
1007 basename,
1012 pctx,
1008 pctx,
1013 tmpwctx,
1009 tmpwctx,
1014 nodestoremove,
1010 nodestoremove,
1015 branchtorestore,
1011 branchtorestore,
1016 opts.get(b'keep'),
1012 opts.get(b'keep'),
1017 activebookmark,
1013 activebookmark,
1018 interactive,
1014 interactive,
1019 )
1015 )
1020 raise error.InterventionRequired(
1016 raise error.InterventionRequired(
1021 _(
1017 _(
1022 b"unresolved conflicts (see 'hg resolve', then "
1018 b"unresolved conflicts (see 'hg resolve', then "
1023 b"'hg unshelve --continue')"
1019 b"'hg unshelve --continue')"
1024 )
1020 )
1025 )
1021 )
1026
1022
1027 with repo.dirstate.parentchange():
1023 with repo.dirstate.parentchange():
1028 repo.setparents(tmpwctx.node(), nodemod.nullid)
1024 repo.setparents(tmpwctx.node(), nodemod.nullid)
1029 newnode, ispartialunshelve = _createunshelvectx(
1025 newnode, ispartialunshelve = _createunshelvectx(
1030 ui, repo, shelvectx, basename, interactive, opts
1026 ui, repo, shelvectx, basename, interactive, opts
1031 )
1027 )
1032
1028
1033 if newnode is None:
1029 if newnode is None:
1034 # If it ended up being a no-op commit, then the normal
1035 # merge state clean-up path doesn't happen, so do it
1036 # here. Fix issue5494
1037 merge.mergestate.clean(repo)
1038 shelvectx = tmpwctx
1030 shelvectx = tmpwctx
1039 msg = _(
1031 msg = _(
1040 b'note: unshelved changes already existed '
1032 b'note: unshelved changes already existed '
1041 b'in the working copy\n'
1033 b'in the working copy\n'
1042 )
1034 )
1043 ui.status(msg)
1035 ui.status(msg)
1044 else:
1036 else:
1045 shelvectx = repo[newnode]
1037 shelvectx = repo[newnode]
1046 hg.updaterepo(repo, tmpwctx.node(), False)
1038 hg.updaterepo(repo, tmpwctx.node(), False)
1047
1039
1048 return shelvectx, ispartialunshelve
1040 return shelvectx, ispartialunshelve
1049
1041
1050
1042
1051 def _forgetunknownfiles(repo, shelvectx, addedbefore):
1043 def _forgetunknownfiles(repo, shelvectx, addedbefore):
1052 # Forget any files that were unknown before the shelve, unknown before
1044 # Forget any files that were unknown before the shelve, unknown before
1053 # unshelve started, but are now added.
1045 # unshelve started, but are now added.
1054 shelveunknown = shelvectx.extra().get(b'shelve_unknown')
1046 shelveunknown = shelvectx.extra().get(b'shelve_unknown')
1055 if not shelveunknown:
1047 if not shelveunknown:
1056 return
1048 return
1057 shelveunknown = frozenset(shelveunknown.split(b'\0'))
1049 shelveunknown = frozenset(shelveunknown.split(b'\0'))
1058 addedafter = frozenset(repo.status().added)
1050 addedafter = frozenset(repo.status().added)
1059 toforget = (addedafter & shelveunknown) - addedbefore
1051 toforget = (addedafter & shelveunknown) - addedbefore
1060 repo[None].forget(toforget)
1052 repo[None].forget(toforget)
1061
1053
1062
1054
1063 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
1055 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
1064 _restoreactivebookmark(repo, activebookmark)
1056 _restoreactivebookmark(repo, activebookmark)
1065 # The transaction aborting will strip all the commits for us,
1057 # The transaction aborting will strip all the commits for us,
1066 # but it doesn't update the inmemory structures, so addchangegroup
1058 # but it doesn't update the inmemory structures, so addchangegroup
1067 # hooks still fire and try to operate on the missing commits.
1059 # hooks still fire and try to operate on the missing commits.
1068 # Clean up manually to prevent this.
1060 # Clean up manually to prevent this.
1069 repo.unfiltered().changelog.strip(oldtiprev, tr)
1061 repo.unfiltered().changelog.strip(oldtiprev, tr)
1070 _aborttransaction(repo, tr)
1062 _aborttransaction(repo, tr)
1071
1063
1072
1064
1073 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
1065 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
1074 """Check potential problems which may result from working
1066 """Check potential problems which may result from working
1075 copy having untracked changes."""
1067 copy having untracked changes."""
1076 wcdeleted = set(repo.status().deleted)
1068 wcdeleted = set(repo.status().deleted)
1077 shelvetouched = set(shelvectx.files())
1069 shelvetouched = set(shelvectx.files())
1078 intersection = wcdeleted.intersection(shelvetouched)
1070 intersection = wcdeleted.intersection(shelvetouched)
1079 if intersection:
1071 if intersection:
1080 m = _(b"shelved change touches missing files")
1072 m = _(b"shelved change touches missing files")
1081 hint = _(b"run hg status to see which files are missing")
1073 hint = _(b"run hg status to see which files are missing")
1082 raise error.Abort(m, hint=hint)
1074 raise error.Abort(m, hint=hint)
1083
1075
1084
1076
1085 def dounshelve(ui, repo, *shelved, **opts):
1077 def dounshelve(ui, repo, *shelved, **opts):
1086 opts = pycompat.byteskwargs(opts)
1078 opts = pycompat.byteskwargs(opts)
1087 abortf = opts.get(b'abort')
1079 abortf = opts.get(b'abort')
1088 continuef = opts.get(b'continue')
1080 continuef = opts.get(b'continue')
1089 interactive = opts.get(b'interactive')
1081 interactive = opts.get(b'interactive')
1090 if not abortf and not continuef:
1082 if not abortf and not continuef:
1091 cmdutil.checkunfinished(repo)
1083 cmdutil.checkunfinished(repo)
1092 shelved = list(shelved)
1084 shelved = list(shelved)
1093 if opts.get(b"name"):
1085 if opts.get(b"name"):
1094 shelved.append(opts[b"name"])
1086 shelved.append(opts[b"name"])
1095
1087
1096 if interactive and opts.get(b'keep'):
1088 if interactive and opts.get(b'keep'):
1097 raise error.Abort(_(b'--keep on --interactive is not yet supported'))
1089 raise error.Abort(_(b'--keep on --interactive is not yet supported'))
1098 if abortf or continuef:
1090 if abortf or continuef:
1099 if abortf and continuef:
1091 if abortf and continuef:
1100 raise error.Abort(_(b'cannot use both abort and continue'))
1092 raise error.Abort(_(b'cannot use both abort and continue'))
1101 if shelved:
1093 if shelved:
1102 raise error.Abort(
1094 raise error.Abort(
1103 _(
1095 _(
1104 b'cannot combine abort/continue with '
1096 b'cannot combine abort/continue with '
1105 b'naming a shelved change'
1097 b'naming a shelved change'
1106 )
1098 )
1107 )
1099 )
1108 if abortf and opts.get(b'tool', False):
1100 if abortf and opts.get(b'tool', False):
1109 ui.warn(_(b'tool option will be ignored\n'))
1101 ui.warn(_(b'tool option will be ignored\n'))
1110
1102
1111 state = _loadshelvedstate(ui, repo, opts)
1103 state = _loadshelvedstate(ui, repo, opts)
1112 if abortf:
1104 if abortf:
1113 return unshelveabort(ui, repo, state)
1105 return unshelveabort(ui, repo, state)
1114 elif continuef and interactive:
1106 elif continuef and interactive:
1115 raise error.Abort(_(b'cannot use both continue and interactive'))
1107 raise error.Abort(_(b'cannot use both continue and interactive'))
1116 elif continuef:
1108 elif continuef:
1117 return unshelvecontinue(ui, repo, state, opts)
1109 return unshelvecontinue(ui, repo, state, opts)
1118 elif len(shelved) > 1:
1110 elif len(shelved) > 1:
1119 raise error.Abort(_(b'can only unshelve one change at a time'))
1111 raise error.Abort(_(b'can only unshelve one change at a time'))
1120 elif not shelved:
1112 elif not shelved:
1121 shelved = listshelves(repo)
1113 shelved = listshelves(repo)
1122 if not shelved:
1114 if not shelved:
1123 raise error.Abort(_(b'no shelved changes to apply!'))
1115 raise error.Abort(_(b'no shelved changes to apply!'))
1124 basename = util.split(shelved[0][1])[1]
1116 basename = util.split(shelved[0][1])[1]
1125 ui.status(_(b"unshelving change '%s'\n") % basename)
1117 ui.status(_(b"unshelving change '%s'\n") % basename)
1126 else:
1118 else:
1127 basename = shelved[0]
1119 basename = shelved[0]
1128
1120
1129 if not shelvedfile(repo, basename, patchextension).exists():
1121 if not shelvedfile(repo, basename, patchextension).exists():
1130 raise error.Abort(_(b"shelved change '%s' not found") % basename)
1122 raise error.Abort(_(b"shelved change '%s' not found") % basename)
1131
1123
1132 repo = repo.unfiltered()
1124 repo = repo.unfiltered()
1133 lock = tr = None
1125 lock = tr = None
1134 try:
1126 try:
1135 lock = repo.lock()
1127 lock = repo.lock()
1136 tr = repo.transaction(b'unshelve', report=lambda x: None)
1128 tr = repo.transaction(b'unshelve', report=lambda x: None)
1137 oldtiprev = len(repo)
1129 oldtiprev = len(repo)
1138
1130
1139 pctx = repo[b'.']
1131 pctx = repo[b'.']
1140 tmpwctx = pctx
1132 tmpwctx = pctx
1141 # The goal is to have a commit structure like so:
1133 # The goal is to have a commit structure like so:
1142 # ...-> pctx -> tmpwctx -> shelvectx
1134 # ...-> pctx -> tmpwctx -> shelvectx
1143 # where tmpwctx is an optional commit with the user's pending changes
1135 # where tmpwctx is an optional commit with the user's pending changes
1144 # and shelvectx is the unshelved changes. Then we merge it all down
1136 # and shelvectx is the unshelved changes. Then we merge it all down
1145 # to the original pctx.
1137 # to the original pctx.
1146
1138
1147 activebookmark = _backupactivebookmark(repo)
1139 activebookmark = _backupactivebookmark(repo)
1148 tmpwctx, addedbefore = _commitworkingcopychanges(
1140 tmpwctx, addedbefore = _commitworkingcopychanges(
1149 ui, repo, opts, tmpwctx
1141 ui, repo, opts, tmpwctx
1150 )
1142 )
1151 repo, shelvectx = _unshelverestorecommit(ui, repo, tr, basename)
1143 repo, shelvectx = _unshelverestorecommit(ui, repo, tr, basename)
1152 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
1144 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
1153 branchtorestore = b''
1145 branchtorestore = b''
1154 if shelvectx.branch() != shelvectx.p1().branch():
1146 if shelvectx.branch() != shelvectx.p1().branch():
1155 branchtorestore = shelvectx.branch()
1147 branchtorestore = shelvectx.branch()
1156
1148
1157 shelvectx, ispartialunshelve = _rebaserestoredcommit(
1149 shelvectx, ispartialunshelve = _rebaserestoredcommit(
1158 ui,
1150 ui,
1159 repo,
1151 repo,
1160 opts,
1152 opts,
1161 tr,
1153 tr,
1162 oldtiprev,
1154 oldtiprev,
1163 basename,
1155 basename,
1164 pctx,
1156 pctx,
1165 tmpwctx,
1157 tmpwctx,
1166 shelvectx,
1158 shelvectx,
1167 branchtorestore,
1159 branchtorestore,
1168 activebookmark,
1160 activebookmark,
1169 )
1161 )
1170 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
1162 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
1171 with ui.configoverride(overrides, b'unshelve'):
1163 with ui.configoverride(overrides, b'unshelve'):
1172 mergefiles(ui, repo, pctx, shelvectx)
1164 mergefiles(ui, repo, pctx, shelvectx)
1173 restorebranch(ui, repo, branchtorestore)
1165 restorebranch(ui, repo, branchtorestore)
1174 shelvedstate.clear(repo)
1166 shelvedstate.clear(repo)
1175 _finishunshelve(repo, oldtiprev, tr, activebookmark)
1167 _finishunshelve(repo, oldtiprev, tr, activebookmark)
1176 _forgetunknownfiles(repo, shelvectx, addedbefore)
1168 _forgetunknownfiles(repo, shelvectx, addedbefore)
1177 if not ispartialunshelve:
1169 if not ispartialunshelve:
1178 unshelvecleanup(ui, repo, basename, opts)
1170 unshelvecleanup(ui, repo, basename, opts)
1179 finally:
1171 finally:
1180 if tr:
1172 if tr:
1181 tr.release()
1173 tr.release()
1182 lockmod.release(lock)
1174 lockmod.release(lock)
General Comments 0
You need to be logged in to leave comments. Login now