##// END OF EJS Templates
error: normalize "unresolved conflicts" error messages with a custom class...
Daniel Ploch -
r45711:e429e7c8 default
parent child Browse files
Show More
@@ -1,2262 +1,2257
1 # rebase.py - rebasing feature for mercurial
1 # rebase.py - rebasing feature for mercurial
2 #
2 #
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to move sets of revisions to a different ancestor
8 '''command to move sets of revisions to a different ancestor
9
9
10 This extension lets you rebase changesets in an existing Mercurial
10 This extension lets you rebase changesets in an existing Mercurial
11 repository.
11 repository.
12
12
13 For more information:
13 For more information:
14 https://mercurial-scm.org/wiki/RebaseExtension
14 https://mercurial-scm.org/wiki/RebaseExtension
15 '''
15 '''
16
16
17 from __future__ import absolute_import
17 from __future__ import absolute_import
18
18
19 import errno
19 import errno
20 import os
20 import os
21
21
22 from mercurial.i18n import _
22 from mercurial.i18n import _
23 from mercurial.node import (
23 from mercurial.node import (
24 nullrev,
24 nullrev,
25 short,
25 short,
26 )
26 )
27 from mercurial.pycompat import open
27 from mercurial.pycompat import open
28 from mercurial import (
28 from mercurial import (
29 bookmarks,
29 bookmarks,
30 cmdutil,
30 cmdutil,
31 commands,
31 commands,
32 copies,
32 copies,
33 destutil,
33 destutil,
34 dirstateguard,
34 dirstateguard,
35 error,
35 error,
36 extensions,
36 extensions,
37 hg,
37 hg,
38 merge as mergemod,
38 merge as mergemod,
39 mergestate as mergestatemod,
39 mergestate as mergestatemod,
40 mergeutil,
40 mergeutil,
41 node as nodemod,
41 node as nodemod,
42 obsolete,
42 obsolete,
43 obsutil,
43 obsutil,
44 patch,
44 patch,
45 phases,
45 phases,
46 pycompat,
46 pycompat,
47 registrar,
47 registrar,
48 repair,
48 repair,
49 revset,
49 revset,
50 revsetlang,
50 revsetlang,
51 rewriteutil,
51 rewriteutil,
52 scmutil,
52 scmutil,
53 smartset,
53 smartset,
54 state as statemod,
54 state as statemod,
55 util,
55 util,
56 )
56 )
57
57
58 # The following constants are used throughout the rebase module. The ordering of
58 # The following constants are used throughout the rebase module. The ordering of
59 # their values must be maintained.
59 # their values must be maintained.
60
60
61 # Indicates that a revision needs to be rebased
61 # Indicates that a revision needs to be rebased
62 revtodo = -1
62 revtodo = -1
63 revtodostr = b'-1'
63 revtodostr = b'-1'
64
64
65 # legacy revstates no longer needed in current code
65 # legacy revstates no longer needed in current code
66 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
66 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
67 legacystates = {b'-2', b'-3', b'-4', b'-5'}
67 legacystates = {b'-2', b'-3', b'-4', b'-5'}
68
68
69 cmdtable = {}
69 cmdtable = {}
70 command = registrar.command(cmdtable)
70 command = registrar.command(cmdtable)
71 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
71 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
72 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
72 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
73 # be specifying the version(s) of Mercurial they are tested with, or
73 # be specifying the version(s) of Mercurial they are tested with, or
74 # leave the attribute unspecified.
74 # leave the attribute unspecified.
75 testedwith = b'ships-with-hg-core'
75 testedwith = b'ships-with-hg-core'
76
76
77
77
78 def _nothingtorebase():
78 def _nothingtorebase():
79 return 1
79 return 1
80
80
81
81
82 def _savegraft(ctx, extra):
82 def _savegraft(ctx, extra):
83 s = ctx.extra().get(b'source', None)
83 s = ctx.extra().get(b'source', None)
84 if s is not None:
84 if s is not None:
85 extra[b'source'] = s
85 extra[b'source'] = s
86 s = ctx.extra().get(b'intermediate-source', None)
86 s = ctx.extra().get(b'intermediate-source', None)
87 if s is not None:
87 if s is not None:
88 extra[b'intermediate-source'] = s
88 extra[b'intermediate-source'] = s
89
89
90
90
91 def _savebranch(ctx, extra):
91 def _savebranch(ctx, extra):
92 extra[b'branch'] = ctx.branch()
92 extra[b'branch'] = ctx.branch()
93
93
94
94
95 def _destrebase(repo, sourceset, destspace=None):
95 def _destrebase(repo, sourceset, destspace=None):
96 """small wrapper around destmerge to pass the right extra args
96 """small wrapper around destmerge to pass the right extra args
97
97
98 Please wrap destutil.destmerge instead."""
98 Please wrap destutil.destmerge instead."""
99 return destutil.destmerge(
99 return destutil.destmerge(
100 repo,
100 repo,
101 action=b'rebase',
101 action=b'rebase',
102 sourceset=sourceset,
102 sourceset=sourceset,
103 onheadcheck=False,
103 onheadcheck=False,
104 destspace=destspace,
104 destspace=destspace,
105 )
105 )
106
106
107
107
108 revsetpredicate = registrar.revsetpredicate()
108 revsetpredicate = registrar.revsetpredicate()
109
109
110
110
111 @revsetpredicate(b'_destrebase')
111 @revsetpredicate(b'_destrebase')
112 def _revsetdestrebase(repo, subset, x):
112 def _revsetdestrebase(repo, subset, x):
113 # ``_rebasedefaultdest()``
113 # ``_rebasedefaultdest()``
114
114
115 # default destination for rebase.
115 # default destination for rebase.
116 # # XXX: Currently private because I expect the signature to change.
116 # # XXX: Currently private because I expect the signature to change.
117 # # XXX: - bailing out in case of ambiguity vs returning all data.
117 # # XXX: - bailing out in case of ambiguity vs returning all data.
118 # i18n: "_rebasedefaultdest" is a keyword
118 # i18n: "_rebasedefaultdest" is a keyword
119 sourceset = None
119 sourceset = None
120 if x is not None:
120 if x is not None:
121 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
121 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
122 return subset & smartset.baseset([_destrebase(repo, sourceset)])
122 return subset & smartset.baseset([_destrebase(repo, sourceset)])
123
123
124
124
125 @revsetpredicate(b'_destautoorphanrebase')
125 @revsetpredicate(b'_destautoorphanrebase')
126 def _revsetdestautoorphanrebase(repo, subset, x):
126 def _revsetdestautoorphanrebase(repo, subset, x):
127 # ``_destautoorphanrebase()``
127 # ``_destautoorphanrebase()``
128
128
129 # automatic rebase destination for a single orphan revision.
129 # automatic rebase destination for a single orphan revision.
130 unfi = repo.unfiltered()
130 unfi = repo.unfiltered()
131 obsoleted = unfi.revs(b'obsolete()')
131 obsoleted = unfi.revs(b'obsolete()')
132
132
133 src = revset.getset(repo, subset, x).first()
133 src = revset.getset(repo, subset, x).first()
134
134
135 # Empty src or already obsoleted - Do not return a destination
135 # Empty src or already obsoleted - Do not return a destination
136 if not src or src in obsoleted:
136 if not src or src in obsoleted:
137 return smartset.baseset()
137 return smartset.baseset()
138 dests = destutil.orphanpossibledestination(repo, src)
138 dests = destutil.orphanpossibledestination(repo, src)
139 if len(dests) > 1:
139 if len(dests) > 1:
140 raise error.Abort(
140 raise error.Abort(
141 _(b"ambiguous automatic rebase: %r could end up on any of %r")
141 _(b"ambiguous automatic rebase: %r could end up on any of %r")
142 % (src, dests)
142 % (src, dests)
143 )
143 )
144 # We have zero or one destination, so we can just return here.
144 # We have zero or one destination, so we can just return here.
145 return smartset.baseset(dests)
145 return smartset.baseset(dests)
146
146
147
147
148 def _ctxdesc(ctx):
148 def _ctxdesc(ctx):
149 """short description for a context"""
149 """short description for a context"""
150 desc = b'%d:%s "%s"' % (
150 desc = b'%d:%s "%s"' % (
151 ctx.rev(),
151 ctx.rev(),
152 ctx,
152 ctx,
153 ctx.description().split(b'\n', 1)[0],
153 ctx.description().split(b'\n', 1)[0],
154 )
154 )
155 repo = ctx.repo()
155 repo = ctx.repo()
156 names = []
156 names = []
157 for nsname, ns in pycompat.iteritems(repo.names):
157 for nsname, ns in pycompat.iteritems(repo.names):
158 if nsname == b'branches':
158 if nsname == b'branches':
159 continue
159 continue
160 names.extend(ns.names(repo, ctx.node()))
160 names.extend(ns.names(repo, ctx.node()))
161 if names:
161 if names:
162 desc += b' (%s)' % b' '.join(names)
162 desc += b' (%s)' % b' '.join(names)
163 return desc
163 return desc
164
164
165
165
166 class rebaseruntime(object):
166 class rebaseruntime(object):
167 """This class is a container for rebase runtime state"""
167 """This class is a container for rebase runtime state"""
168
168
169 def __init__(self, repo, ui, inmemory=False, opts=None):
169 def __init__(self, repo, ui, inmemory=False, opts=None):
170 if opts is None:
170 if opts is None:
171 opts = {}
171 opts = {}
172
172
173 # prepared: whether we have rebasestate prepared or not. Currently it
173 # prepared: whether we have rebasestate prepared or not. Currently it
174 # decides whether "self.repo" is unfiltered or not.
174 # decides whether "self.repo" is unfiltered or not.
175 # The rebasestate has explicit hash to hash instructions not depending
175 # The rebasestate has explicit hash to hash instructions not depending
176 # on visibility. If rebasestate exists (in-memory or on-disk), use
176 # on visibility. If rebasestate exists (in-memory or on-disk), use
177 # unfiltered repo to avoid visibility issues.
177 # unfiltered repo to avoid visibility issues.
178 # Before knowing rebasestate (i.e. when starting a new rebase (not
178 # Before knowing rebasestate (i.e. when starting a new rebase (not
179 # --continue or --abort)), the original repo should be used so
179 # --continue or --abort)), the original repo should be used so
180 # visibility-dependent revsets are correct.
180 # visibility-dependent revsets are correct.
181 self.prepared = False
181 self.prepared = False
182 self.resume = False
182 self.resume = False
183 self._repo = repo
183 self._repo = repo
184
184
185 self.ui = ui
185 self.ui = ui
186 self.opts = opts
186 self.opts = opts
187 self.originalwd = None
187 self.originalwd = None
188 self.external = nullrev
188 self.external = nullrev
189 # Mapping between the old revision id and either what is the new rebased
189 # Mapping between the old revision id and either what is the new rebased
190 # revision or what needs to be done with the old revision. The state
190 # revision or what needs to be done with the old revision. The state
191 # dict will be what contains most of the rebase progress state.
191 # dict will be what contains most of the rebase progress state.
192 self.state = {}
192 self.state = {}
193 self.activebookmark = None
193 self.activebookmark = None
194 self.destmap = {}
194 self.destmap = {}
195 self.skipped = set()
195 self.skipped = set()
196
196
197 self.collapsef = opts.get(b'collapse', False)
197 self.collapsef = opts.get(b'collapse', False)
198 self.collapsemsg = cmdutil.logmessage(ui, opts)
198 self.collapsemsg = cmdutil.logmessage(ui, opts)
199 self.date = opts.get(b'date', None)
199 self.date = opts.get(b'date', None)
200
200
201 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
201 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
202 self.extrafns = [_savegraft]
202 self.extrafns = [_savegraft]
203 if e:
203 if e:
204 self.extrafns = [e]
204 self.extrafns = [e]
205
205
206 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
206 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
207 self.keepf = opts.get(b'keep', False)
207 self.keepf = opts.get(b'keep', False)
208 self.keepbranchesf = opts.get(b'keepbranches', False)
208 self.keepbranchesf = opts.get(b'keepbranches', False)
209 self.skipemptysuccessorf = rewriteutil.skip_empty_successor(
209 self.skipemptysuccessorf = rewriteutil.skip_empty_successor(
210 repo.ui, b'rebase'
210 repo.ui, b'rebase'
211 )
211 )
212 self.obsoletenotrebased = {}
212 self.obsoletenotrebased = {}
213 self.obsoletewithoutsuccessorindestination = set()
213 self.obsoletewithoutsuccessorindestination = set()
214 self.inmemory = inmemory
214 self.inmemory = inmemory
215 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
215 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
216
216
217 @property
217 @property
218 def repo(self):
218 def repo(self):
219 if self.prepared:
219 if self.prepared:
220 return self._repo.unfiltered()
220 return self._repo.unfiltered()
221 else:
221 else:
222 return self._repo
222 return self._repo
223
223
224 def storestatus(self, tr=None):
224 def storestatus(self, tr=None):
225 """Store the current status to allow recovery"""
225 """Store the current status to allow recovery"""
226 if tr:
226 if tr:
227 tr.addfilegenerator(
227 tr.addfilegenerator(
228 b'rebasestate',
228 b'rebasestate',
229 (b'rebasestate',),
229 (b'rebasestate',),
230 self._writestatus,
230 self._writestatus,
231 location=b'plain',
231 location=b'plain',
232 )
232 )
233 else:
233 else:
234 with self.repo.vfs(b"rebasestate", b"w") as f:
234 with self.repo.vfs(b"rebasestate", b"w") as f:
235 self._writestatus(f)
235 self._writestatus(f)
236
236
237 def _writestatus(self, f):
237 def _writestatus(self, f):
238 repo = self.repo
238 repo = self.repo
239 assert repo.filtername is None
239 assert repo.filtername is None
240 f.write(repo[self.originalwd].hex() + b'\n')
240 f.write(repo[self.originalwd].hex() + b'\n')
241 # was "dest". we now write dest per src root below.
241 # was "dest". we now write dest per src root below.
242 f.write(b'\n')
242 f.write(b'\n')
243 f.write(repo[self.external].hex() + b'\n')
243 f.write(repo[self.external].hex() + b'\n')
244 f.write(b'%d\n' % int(self.collapsef))
244 f.write(b'%d\n' % int(self.collapsef))
245 f.write(b'%d\n' % int(self.keepf))
245 f.write(b'%d\n' % int(self.keepf))
246 f.write(b'%d\n' % int(self.keepbranchesf))
246 f.write(b'%d\n' % int(self.keepbranchesf))
247 f.write(b'%s\n' % (self.activebookmark or b''))
247 f.write(b'%s\n' % (self.activebookmark or b''))
248 destmap = self.destmap
248 destmap = self.destmap
249 for d, v in pycompat.iteritems(self.state):
249 for d, v in pycompat.iteritems(self.state):
250 oldrev = repo[d].hex()
250 oldrev = repo[d].hex()
251 if v >= 0:
251 if v >= 0:
252 newrev = repo[v].hex()
252 newrev = repo[v].hex()
253 else:
253 else:
254 newrev = b"%d" % v
254 newrev = b"%d" % v
255 destnode = repo[destmap[d]].hex()
255 destnode = repo[destmap[d]].hex()
256 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
256 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
257 repo.ui.debug(b'rebase status stored\n')
257 repo.ui.debug(b'rebase status stored\n')
258
258
259 def restorestatus(self):
259 def restorestatus(self):
260 """Restore a previously stored status"""
260 """Restore a previously stored status"""
261 if not self.stateobj.exists():
261 if not self.stateobj.exists():
262 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
262 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
263
263
264 data = self._read()
264 data = self._read()
265 self.repo.ui.debug(b'rebase status resumed\n')
265 self.repo.ui.debug(b'rebase status resumed\n')
266
266
267 self.originalwd = data[b'originalwd']
267 self.originalwd = data[b'originalwd']
268 self.destmap = data[b'destmap']
268 self.destmap = data[b'destmap']
269 self.state = data[b'state']
269 self.state = data[b'state']
270 self.skipped = data[b'skipped']
270 self.skipped = data[b'skipped']
271 self.collapsef = data[b'collapse']
271 self.collapsef = data[b'collapse']
272 self.keepf = data[b'keep']
272 self.keepf = data[b'keep']
273 self.keepbranchesf = data[b'keepbranches']
273 self.keepbranchesf = data[b'keepbranches']
274 self.external = data[b'external']
274 self.external = data[b'external']
275 self.activebookmark = data[b'activebookmark']
275 self.activebookmark = data[b'activebookmark']
276
276
277 def _read(self):
277 def _read(self):
278 self.prepared = True
278 self.prepared = True
279 repo = self.repo
279 repo = self.repo
280 assert repo.filtername is None
280 assert repo.filtername is None
281 data = {
281 data = {
282 b'keepbranches': None,
282 b'keepbranches': None,
283 b'collapse': None,
283 b'collapse': None,
284 b'activebookmark': None,
284 b'activebookmark': None,
285 b'external': nullrev,
285 b'external': nullrev,
286 b'keep': None,
286 b'keep': None,
287 b'originalwd': None,
287 b'originalwd': None,
288 }
288 }
289 legacydest = None
289 legacydest = None
290 state = {}
290 state = {}
291 destmap = {}
291 destmap = {}
292
292
293 if True:
293 if True:
294 f = repo.vfs(b"rebasestate")
294 f = repo.vfs(b"rebasestate")
295 for i, l in enumerate(f.read().splitlines()):
295 for i, l in enumerate(f.read().splitlines()):
296 if i == 0:
296 if i == 0:
297 data[b'originalwd'] = repo[l].rev()
297 data[b'originalwd'] = repo[l].rev()
298 elif i == 1:
298 elif i == 1:
299 # this line should be empty in newer version. but legacy
299 # this line should be empty in newer version. but legacy
300 # clients may still use it
300 # clients may still use it
301 if l:
301 if l:
302 legacydest = repo[l].rev()
302 legacydest = repo[l].rev()
303 elif i == 2:
303 elif i == 2:
304 data[b'external'] = repo[l].rev()
304 data[b'external'] = repo[l].rev()
305 elif i == 3:
305 elif i == 3:
306 data[b'collapse'] = bool(int(l))
306 data[b'collapse'] = bool(int(l))
307 elif i == 4:
307 elif i == 4:
308 data[b'keep'] = bool(int(l))
308 data[b'keep'] = bool(int(l))
309 elif i == 5:
309 elif i == 5:
310 data[b'keepbranches'] = bool(int(l))
310 data[b'keepbranches'] = bool(int(l))
311 elif i == 6 and not (len(l) == 81 and b':' in l):
311 elif i == 6 and not (len(l) == 81 and b':' in l):
312 # line 6 is a recent addition, so for backwards
312 # line 6 is a recent addition, so for backwards
313 # compatibility check that the line doesn't look like the
313 # compatibility check that the line doesn't look like the
314 # oldrev:newrev lines
314 # oldrev:newrev lines
315 data[b'activebookmark'] = l
315 data[b'activebookmark'] = l
316 else:
316 else:
317 args = l.split(b':')
317 args = l.split(b':')
318 oldrev = repo[args[0]].rev()
318 oldrev = repo[args[0]].rev()
319 newrev = args[1]
319 newrev = args[1]
320 if newrev in legacystates:
320 if newrev in legacystates:
321 continue
321 continue
322 if len(args) > 2:
322 if len(args) > 2:
323 destrev = repo[args[2]].rev()
323 destrev = repo[args[2]].rev()
324 else:
324 else:
325 destrev = legacydest
325 destrev = legacydest
326 destmap[oldrev] = destrev
326 destmap[oldrev] = destrev
327 if newrev == revtodostr:
327 if newrev == revtodostr:
328 state[oldrev] = revtodo
328 state[oldrev] = revtodo
329 # Legacy compat special case
329 # Legacy compat special case
330 else:
330 else:
331 state[oldrev] = repo[newrev].rev()
331 state[oldrev] = repo[newrev].rev()
332
332
333 if data[b'keepbranches'] is None:
333 if data[b'keepbranches'] is None:
334 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
334 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
335
335
336 data[b'destmap'] = destmap
336 data[b'destmap'] = destmap
337 data[b'state'] = state
337 data[b'state'] = state
338 skipped = set()
338 skipped = set()
339 # recompute the set of skipped revs
339 # recompute the set of skipped revs
340 if not data[b'collapse']:
340 if not data[b'collapse']:
341 seen = set(destmap.values())
341 seen = set(destmap.values())
342 for old, new in sorted(state.items()):
342 for old, new in sorted(state.items()):
343 if new != revtodo and new in seen:
343 if new != revtodo and new in seen:
344 skipped.add(old)
344 skipped.add(old)
345 seen.add(new)
345 seen.add(new)
346 data[b'skipped'] = skipped
346 data[b'skipped'] = skipped
347 repo.ui.debug(
347 repo.ui.debug(
348 b'computed skipped revs: %s\n'
348 b'computed skipped revs: %s\n'
349 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
349 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
350 )
350 )
351
351
352 return data
352 return data
353
353
354 def _handleskippingobsolete(self, obsoleterevs, destmap):
354 def _handleskippingobsolete(self, obsoleterevs, destmap):
355 """Compute structures necessary for skipping obsolete revisions
355 """Compute structures necessary for skipping obsolete revisions
356
356
357 obsoleterevs: iterable of all obsolete revisions in rebaseset
357 obsoleterevs: iterable of all obsolete revisions in rebaseset
358 destmap: {srcrev: destrev} destination revisions
358 destmap: {srcrev: destrev} destination revisions
359 """
359 """
360 self.obsoletenotrebased = {}
360 self.obsoletenotrebased = {}
361 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
361 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
362 return
362 return
363 obsoleteset = set(obsoleterevs)
363 obsoleteset = set(obsoleterevs)
364 (
364 (
365 self.obsoletenotrebased,
365 self.obsoletenotrebased,
366 self.obsoletewithoutsuccessorindestination,
366 self.obsoletewithoutsuccessorindestination,
367 obsoleteextinctsuccessors,
367 obsoleteextinctsuccessors,
368 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
368 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
369 skippedset = set(self.obsoletenotrebased)
369 skippedset = set(self.obsoletenotrebased)
370 skippedset.update(self.obsoletewithoutsuccessorindestination)
370 skippedset.update(self.obsoletewithoutsuccessorindestination)
371 skippedset.update(obsoleteextinctsuccessors)
371 skippedset.update(obsoleteextinctsuccessors)
372 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
372 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
373
373
374 def _prepareabortorcontinue(
374 def _prepareabortorcontinue(
375 self, isabort, backup=True, suppwarns=False, dryrun=False, confirm=False
375 self, isabort, backup=True, suppwarns=False, dryrun=False, confirm=False
376 ):
376 ):
377 self.resume = True
377 self.resume = True
378 try:
378 try:
379 self.restorestatus()
379 self.restorestatus()
380 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
380 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
381 except error.RepoLookupError:
381 except error.RepoLookupError:
382 if isabort:
382 if isabort:
383 clearstatus(self.repo)
383 clearstatus(self.repo)
384 clearcollapsemsg(self.repo)
384 clearcollapsemsg(self.repo)
385 self.repo.ui.warn(
385 self.repo.ui.warn(
386 _(
386 _(
387 b'rebase aborted (no revision is removed,'
387 b'rebase aborted (no revision is removed,'
388 b' only broken state is cleared)\n'
388 b' only broken state is cleared)\n'
389 )
389 )
390 )
390 )
391 return 0
391 return 0
392 else:
392 else:
393 msg = _(b'cannot continue inconsistent rebase')
393 msg = _(b'cannot continue inconsistent rebase')
394 hint = _(b'use "hg rebase --abort" to clear broken state')
394 hint = _(b'use "hg rebase --abort" to clear broken state')
395 raise error.Abort(msg, hint=hint)
395 raise error.Abort(msg, hint=hint)
396
396
397 if isabort:
397 if isabort:
398 backup = backup and self.backupf
398 backup = backup and self.backupf
399 return self._abort(
399 return self._abort(
400 backup=backup,
400 backup=backup,
401 suppwarns=suppwarns,
401 suppwarns=suppwarns,
402 dryrun=dryrun,
402 dryrun=dryrun,
403 confirm=confirm,
403 confirm=confirm,
404 )
404 )
405
405
406 def _preparenewrebase(self, destmap):
406 def _preparenewrebase(self, destmap):
407 if not destmap:
407 if not destmap:
408 return _nothingtorebase()
408 return _nothingtorebase()
409
409
410 rebaseset = destmap.keys()
410 rebaseset = destmap.keys()
411 if not self.keepf:
411 if not self.keepf:
412 try:
412 try:
413 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
413 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
414 except error.Abort as e:
414 except error.Abort as e:
415 if e.hint is None:
415 if e.hint is None:
416 e.hint = _(b'use --keep to keep original changesets')
416 e.hint = _(b'use --keep to keep original changesets')
417 raise e
417 raise e
418
418
419 result = buildstate(self.repo, destmap, self.collapsef)
419 result = buildstate(self.repo, destmap, self.collapsef)
420
420
421 if not result:
421 if not result:
422 # Empty state built, nothing to rebase
422 # Empty state built, nothing to rebase
423 self.ui.status(_(b'nothing to rebase\n'))
423 self.ui.status(_(b'nothing to rebase\n'))
424 return _nothingtorebase()
424 return _nothingtorebase()
425
425
426 (self.originalwd, self.destmap, self.state) = result
426 (self.originalwd, self.destmap, self.state) = result
427 if self.collapsef:
427 if self.collapsef:
428 dests = set(self.destmap.values())
428 dests = set(self.destmap.values())
429 if len(dests) != 1:
429 if len(dests) != 1:
430 raise error.Abort(
430 raise error.Abort(
431 _(b'--collapse does not work with multiple destinations')
431 _(b'--collapse does not work with multiple destinations')
432 )
432 )
433 destrev = next(iter(dests))
433 destrev = next(iter(dests))
434 destancestors = self.repo.changelog.ancestors(
434 destancestors = self.repo.changelog.ancestors(
435 [destrev], inclusive=True
435 [destrev], inclusive=True
436 )
436 )
437 self.external = externalparent(self.repo, self.state, destancestors)
437 self.external = externalparent(self.repo, self.state, destancestors)
438
438
439 for destrev in sorted(set(destmap.values())):
439 for destrev in sorted(set(destmap.values())):
440 dest = self.repo[destrev]
440 dest = self.repo[destrev]
441 if dest.closesbranch() and not self.keepbranchesf:
441 if dest.closesbranch() and not self.keepbranchesf:
442 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
442 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
443
443
444 self.prepared = True
444 self.prepared = True
445
445
446 def _assignworkingcopy(self):
446 def _assignworkingcopy(self):
447 if self.inmemory:
447 if self.inmemory:
448 from mercurial.context import overlayworkingctx
448 from mercurial.context import overlayworkingctx
449
449
450 self.wctx = overlayworkingctx(self.repo)
450 self.wctx = overlayworkingctx(self.repo)
451 self.repo.ui.debug(b"rebasing in-memory\n")
451 self.repo.ui.debug(b"rebasing in-memory\n")
452 else:
452 else:
453 self.wctx = self.repo[None]
453 self.wctx = self.repo[None]
454 self.repo.ui.debug(b"rebasing on disk\n")
454 self.repo.ui.debug(b"rebasing on disk\n")
455 self.repo.ui.log(
455 self.repo.ui.log(
456 b"rebase",
456 b"rebase",
457 b"using in-memory rebase: %r\n",
457 b"using in-memory rebase: %r\n",
458 self.inmemory,
458 self.inmemory,
459 rebase_imm_used=self.inmemory,
459 rebase_imm_used=self.inmemory,
460 )
460 )
461
461
462 def _performrebase(self, tr):
462 def _performrebase(self, tr):
463 self._assignworkingcopy()
463 self._assignworkingcopy()
464 repo, ui = self.repo, self.ui
464 repo, ui = self.repo, self.ui
465 if self.keepbranchesf:
465 if self.keepbranchesf:
466 # insert _savebranch at the start of extrafns so if
466 # insert _savebranch at the start of extrafns so if
467 # there's a user-provided extrafn it can clobber branch if
467 # there's a user-provided extrafn it can clobber branch if
468 # desired
468 # desired
469 self.extrafns.insert(0, _savebranch)
469 self.extrafns.insert(0, _savebranch)
470 if self.collapsef:
470 if self.collapsef:
471 branches = set()
471 branches = set()
472 for rev in self.state:
472 for rev in self.state:
473 branches.add(repo[rev].branch())
473 branches.add(repo[rev].branch())
474 if len(branches) > 1:
474 if len(branches) > 1:
475 raise error.Abort(
475 raise error.Abort(
476 _(b'cannot collapse multiple named branches')
476 _(b'cannot collapse multiple named branches')
477 )
477 )
478
478
479 # Calculate self.obsoletenotrebased
479 # Calculate self.obsoletenotrebased
480 obsrevs = _filterobsoleterevs(self.repo, self.state)
480 obsrevs = _filterobsoleterevs(self.repo, self.state)
481 self._handleskippingobsolete(obsrevs, self.destmap)
481 self._handleskippingobsolete(obsrevs, self.destmap)
482
482
483 # Keep track of the active bookmarks in order to reset them later
483 # Keep track of the active bookmarks in order to reset them later
484 self.activebookmark = self.activebookmark or repo._activebookmark
484 self.activebookmark = self.activebookmark or repo._activebookmark
485 if self.activebookmark:
485 if self.activebookmark:
486 bookmarks.deactivate(repo)
486 bookmarks.deactivate(repo)
487
487
488 # Store the state before we begin so users can run 'hg rebase --abort'
488 # Store the state before we begin so users can run 'hg rebase --abort'
489 # if we fail before the transaction closes.
489 # if we fail before the transaction closes.
490 self.storestatus()
490 self.storestatus()
491 if tr:
491 if tr:
492 # When using single transaction, store state when transaction
492 # When using single transaction, store state when transaction
493 # commits.
493 # commits.
494 self.storestatus(tr)
494 self.storestatus(tr)
495
495
496 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
496 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
497 p = repo.ui.makeprogress(
497 p = repo.ui.makeprogress(
498 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
498 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
499 )
499 )
500
500
501 def progress(ctx):
501 def progress(ctx):
502 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
502 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
503
503
504 allowdivergence = self.ui.configbool(
504 allowdivergence = self.ui.configbool(
505 b'experimental', b'evolution.allowdivergence'
505 b'experimental', b'evolution.allowdivergence'
506 )
506 )
507 for subset in sortsource(self.destmap):
507 for subset in sortsource(self.destmap):
508 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
508 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
509 if not allowdivergence:
509 if not allowdivergence:
510 sortedrevs -= self.repo.revs(
510 sortedrevs -= self.repo.revs(
511 b'descendants(%ld) and not %ld',
511 b'descendants(%ld) and not %ld',
512 self.obsoletewithoutsuccessorindestination,
512 self.obsoletewithoutsuccessorindestination,
513 self.obsoletewithoutsuccessorindestination,
513 self.obsoletewithoutsuccessorindestination,
514 )
514 )
515 for rev in sortedrevs:
515 for rev in sortedrevs:
516 self._rebasenode(tr, rev, allowdivergence, progress)
516 self._rebasenode(tr, rev, allowdivergence, progress)
517 p.complete()
517 p.complete()
518 ui.note(_(b'rebase merging completed\n'))
518 ui.note(_(b'rebase merging completed\n'))
519
519
520 def _concludenode(self, rev, p1, editor, commitmsg=None):
520 def _concludenode(self, rev, p1, editor, commitmsg=None):
521 '''Commit the wd changes with parents p1 and p2.
521 '''Commit the wd changes with parents p1 and p2.
522
522
523 Reuse commit info from rev but also store useful information in extra.
523 Reuse commit info from rev but also store useful information in extra.
524 Return node of committed revision.'''
524 Return node of committed revision.'''
525 repo = self.repo
525 repo = self.repo
526 ctx = repo[rev]
526 ctx = repo[rev]
527 if commitmsg is None:
527 if commitmsg is None:
528 commitmsg = ctx.description()
528 commitmsg = ctx.description()
529 date = self.date
529 date = self.date
530 if date is None:
530 if date is None:
531 date = ctx.date()
531 date = ctx.date()
532 extra = {b'rebase_source': ctx.hex()}
532 extra = {b'rebase_source': ctx.hex()}
533 for c in self.extrafns:
533 for c in self.extrafns:
534 c(ctx, extra)
534 c(ctx, extra)
535 destphase = max(ctx.phase(), phases.draft)
535 destphase = max(ctx.phase(), phases.draft)
536 overrides = {
536 overrides = {
537 (b'phases', b'new-commit'): destphase,
537 (b'phases', b'new-commit'): destphase,
538 (b'ui', b'allowemptycommit'): not self.skipemptysuccessorf,
538 (b'ui', b'allowemptycommit'): not self.skipemptysuccessorf,
539 }
539 }
540 with repo.ui.configoverride(overrides, b'rebase'):
540 with repo.ui.configoverride(overrides, b'rebase'):
541 if self.inmemory:
541 if self.inmemory:
542 newnode = commitmemorynode(
542 newnode = commitmemorynode(
543 repo,
543 repo,
544 wctx=self.wctx,
544 wctx=self.wctx,
545 extra=extra,
545 extra=extra,
546 commitmsg=commitmsg,
546 commitmsg=commitmsg,
547 editor=editor,
547 editor=editor,
548 user=ctx.user(),
548 user=ctx.user(),
549 date=date,
549 date=date,
550 )
550 )
551 mergestatemod.mergestate.clean(repo)
551 mergestatemod.mergestate.clean(repo)
552 else:
552 else:
553 newnode = commitnode(
553 newnode = commitnode(
554 repo,
554 repo,
555 extra=extra,
555 extra=extra,
556 commitmsg=commitmsg,
556 commitmsg=commitmsg,
557 editor=editor,
557 editor=editor,
558 user=ctx.user(),
558 user=ctx.user(),
559 date=date,
559 date=date,
560 )
560 )
561
561
562 return newnode
562 return newnode
563
563
564 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
564 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
565 repo, ui, opts = self.repo, self.ui, self.opts
565 repo, ui, opts = self.repo, self.ui, self.opts
566 dest = self.destmap[rev]
566 dest = self.destmap[rev]
567 ctx = repo[rev]
567 ctx = repo[rev]
568 desc = _ctxdesc(ctx)
568 desc = _ctxdesc(ctx)
569 if self.state[rev] == rev:
569 if self.state[rev] == rev:
570 ui.status(_(b'already rebased %s\n') % desc)
570 ui.status(_(b'already rebased %s\n') % desc)
571 elif (
571 elif (
572 not allowdivergence
572 not allowdivergence
573 and rev in self.obsoletewithoutsuccessorindestination
573 and rev in self.obsoletewithoutsuccessorindestination
574 ):
574 ):
575 msg = (
575 msg = (
576 _(
576 _(
577 b'note: not rebasing %s and its descendants as '
577 b'note: not rebasing %s and its descendants as '
578 b'this would cause divergence\n'
578 b'this would cause divergence\n'
579 )
579 )
580 % desc
580 % desc
581 )
581 )
582 repo.ui.status(msg)
582 repo.ui.status(msg)
583 self.skipped.add(rev)
583 self.skipped.add(rev)
584 elif rev in self.obsoletenotrebased:
584 elif rev in self.obsoletenotrebased:
585 succ = self.obsoletenotrebased[rev]
585 succ = self.obsoletenotrebased[rev]
586 if succ is None:
586 if succ is None:
587 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
587 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
588 else:
588 else:
589 succdesc = _ctxdesc(repo[succ])
589 succdesc = _ctxdesc(repo[succ])
590 msg = _(
590 msg = _(
591 b'note: not rebasing %s, already in destination as %s\n'
591 b'note: not rebasing %s, already in destination as %s\n'
592 ) % (desc, succdesc)
592 ) % (desc, succdesc)
593 repo.ui.status(msg)
593 repo.ui.status(msg)
594 # Make clearrebased aware state[rev] is not a true successor
594 # Make clearrebased aware state[rev] is not a true successor
595 self.skipped.add(rev)
595 self.skipped.add(rev)
596 # Record rev as moved to its desired destination in self.state.
596 # Record rev as moved to its desired destination in self.state.
597 # This helps bookmark and working parent movement.
597 # This helps bookmark and working parent movement.
598 dest = max(
598 dest = max(
599 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
599 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
600 )
600 )
601 self.state[rev] = dest
601 self.state[rev] = dest
602 elif self.state[rev] == revtodo:
602 elif self.state[rev] == revtodo:
603 ui.status(_(b'rebasing %s\n') % desc)
603 ui.status(_(b'rebasing %s\n') % desc)
604 progressfn(ctx)
604 progressfn(ctx)
605 p1, p2, base = defineparents(
605 p1, p2, base = defineparents(
606 repo,
606 repo,
607 rev,
607 rev,
608 self.destmap,
608 self.destmap,
609 self.state,
609 self.state,
610 self.skipped,
610 self.skipped,
611 self.obsoletenotrebased,
611 self.obsoletenotrebased,
612 )
612 )
613 if self.resume and self.wctx.p1().rev() == p1:
613 if self.resume and self.wctx.p1().rev() == p1:
614 repo.ui.debug(b'resuming interrupted rebase\n')
614 repo.ui.debug(b'resuming interrupted rebase\n')
615 self.resume = False
615 self.resume = False
616 else:
616 else:
617 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
617 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
618 with ui.configoverride(overrides, b'rebase'):
618 with ui.configoverride(overrides, b'rebase'):
619 stats = rebasenode(
619 stats = rebasenode(
620 repo,
620 repo,
621 rev,
621 rev,
622 p1,
622 p1,
623 p2,
623 p2,
624 base,
624 base,
625 self.collapsef,
625 self.collapsef,
626 dest,
626 dest,
627 wctx=self.wctx,
627 wctx=self.wctx,
628 )
628 )
629 if stats.unresolvedcount > 0:
629 if stats.unresolvedcount > 0:
630 if self.inmemory:
630 if self.inmemory:
631 raise error.InMemoryMergeConflictsError()
631 raise error.InMemoryMergeConflictsError()
632 else:
632 else:
633 raise error.InterventionRequired(
633 raise error.ConflictResolutionRequired(b'rebase')
634 _(
635 b"unresolved conflicts (see 'hg "
636 b"resolve', then 'hg rebase --continue')"
637 )
638 )
639 if not self.collapsef:
634 if not self.collapsef:
640 merging = p2 != nullrev
635 merging = p2 != nullrev
641 editform = cmdutil.mergeeditform(merging, b'rebase')
636 editform = cmdutil.mergeeditform(merging, b'rebase')
642 editor = cmdutil.getcommiteditor(
637 editor = cmdutil.getcommiteditor(
643 editform=editform, **pycompat.strkwargs(opts)
638 editform=editform, **pycompat.strkwargs(opts)
644 )
639 )
645 # We need to set parents again here just in case we're continuing
640 # We need to set parents again here just in case we're continuing
646 # a rebase started with an old hg version (before 9c9cfecd4600),
641 # a rebase started with an old hg version (before 9c9cfecd4600),
647 # because those old versions would have left us with two dirstate
642 # because those old versions would have left us with two dirstate
648 # parents, and we don't want to create a merge commit here (unless
643 # parents, and we don't want to create a merge commit here (unless
649 # we're rebasing a merge commit).
644 # we're rebasing a merge commit).
650 self.wctx.setparents(repo[p1].node(), repo[p2].node())
645 self.wctx.setparents(repo[p1].node(), repo[p2].node())
651 newnode = self._concludenode(rev, p1, editor)
646 newnode = self._concludenode(rev, p1, editor)
652 else:
647 else:
653 # Skip commit if we are collapsing
648 # Skip commit if we are collapsing
654 newnode = None
649 newnode = None
655 # Update the state
650 # Update the state
656 if newnode is not None:
651 if newnode is not None:
657 self.state[rev] = repo[newnode].rev()
652 self.state[rev] = repo[newnode].rev()
658 ui.debug(b'rebased as %s\n' % short(newnode))
653 ui.debug(b'rebased as %s\n' % short(newnode))
659 if repo[newnode].isempty():
654 if repo[newnode].isempty():
660 ui.warn(
655 ui.warn(
661 _(
656 _(
662 b'note: created empty successor for %s, its '
657 b'note: created empty successor for %s, its '
663 b'destination already has all its changes\n'
658 b'destination already has all its changes\n'
664 )
659 )
665 % desc
660 % desc
666 )
661 )
667 else:
662 else:
668 if not self.collapsef:
663 if not self.collapsef:
669 ui.warn(
664 ui.warn(
670 _(
665 _(
671 b'note: not rebasing %s, its destination already '
666 b'note: not rebasing %s, its destination already '
672 b'has all its changes\n'
667 b'has all its changes\n'
673 )
668 )
674 % desc
669 % desc
675 )
670 )
676 self.skipped.add(rev)
671 self.skipped.add(rev)
677 self.state[rev] = p1
672 self.state[rev] = p1
678 ui.debug(b'next revision set to %d\n' % p1)
673 ui.debug(b'next revision set to %d\n' % p1)
679 else:
674 else:
680 ui.status(
675 ui.status(
681 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
676 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
682 )
677 )
683 if not tr:
678 if not tr:
684 # When not using single transaction, store state after each
679 # When not using single transaction, store state after each
685 # commit is completely done. On InterventionRequired, we thus
680 # commit is completely done. On InterventionRequired, we thus
686 # won't store the status. Instead, we'll hit the "len(parents) == 2"
681 # won't store the status. Instead, we'll hit the "len(parents) == 2"
687 # case and realize that the commit was in progress.
682 # case and realize that the commit was in progress.
688 self.storestatus()
683 self.storestatus()
689
684
690 def _finishrebase(self):
685 def _finishrebase(self):
691 repo, ui, opts = self.repo, self.ui, self.opts
686 repo, ui, opts = self.repo, self.ui, self.opts
692 fm = ui.formatter(b'rebase', opts)
687 fm = ui.formatter(b'rebase', opts)
693 fm.startitem()
688 fm.startitem()
694 if self.collapsef:
689 if self.collapsef:
695 p1, p2, _base = defineparents(
690 p1, p2, _base = defineparents(
696 repo,
691 repo,
697 min(self.state),
692 min(self.state),
698 self.destmap,
693 self.destmap,
699 self.state,
694 self.state,
700 self.skipped,
695 self.skipped,
701 self.obsoletenotrebased,
696 self.obsoletenotrebased,
702 )
697 )
703 editopt = opts.get(b'edit')
698 editopt = opts.get(b'edit')
704 editform = b'rebase.collapse'
699 editform = b'rebase.collapse'
705 if self.collapsemsg:
700 if self.collapsemsg:
706 commitmsg = self.collapsemsg
701 commitmsg = self.collapsemsg
707 else:
702 else:
708 commitmsg = b'Collapsed revision'
703 commitmsg = b'Collapsed revision'
709 for rebased in sorted(self.state):
704 for rebased in sorted(self.state):
710 if rebased not in self.skipped:
705 if rebased not in self.skipped:
711 commitmsg += b'\n* %s' % repo[rebased].description()
706 commitmsg += b'\n* %s' % repo[rebased].description()
712 editopt = True
707 editopt = True
713 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
708 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
714 revtoreuse = max(self.state)
709 revtoreuse = max(self.state)
715
710
716 self.wctx.setparents(repo[p1].node(), repo[self.external].node())
711 self.wctx.setparents(repo[p1].node(), repo[self.external].node())
717 newnode = self._concludenode(
712 newnode = self._concludenode(
718 revtoreuse, p1, editor, commitmsg=commitmsg
713 revtoreuse, p1, editor, commitmsg=commitmsg
719 )
714 )
720
715
721 if newnode is not None:
716 if newnode is not None:
722 newrev = repo[newnode].rev()
717 newrev = repo[newnode].rev()
723 for oldrev in self.state:
718 for oldrev in self.state:
724 self.state[oldrev] = newrev
719 self.state[oldrev] = newrev
725
720
726 if b'qtip' in repo.tags():
721 if b'qtip' in repo.tags():
727 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
722 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
728
723
729 # restore original working directory
724 # restore original working directory
730 # (we do this before stripping)
725 # (we do this before stripping)
731 newwd = self.state.get(self.originalwd, self.originalwd)
726 newwd = self.state.get(self.originalwd, self.originalwd)
732 if newwd < 0:
727 if newwd < 0:
733 # original directory is a parent of rebase set root or ignored
728 # original directory is a parent of rebase set root or ignored
734 newwd = self.originalwd
729 newwd = self.originalwd
735 if newwd not in [c.rev() for c in repo[None].parents()]:
730 if newwd not in [c.rev() for c in repo[None].parents()]:
736 ui.note(_(b"update back to initial working directory parent\n"))
731 ui.note(_(b"update back to initial working directory parent\n"))
737 hg.updaterepo(repo, newwd, overwrite=False)
732 hg.updaterepo(repo, newwd, overwrite=False)
738
733
739 collapsedas = None
734 collapsedas = None
740 if self.collapsef and not self.keepf:
735 if self.collapsef and not self.keepf:
741 collapsedas = newnode
736 collapsedas = newnode
742 clearrebased(
737 clearrebased(
743 ui,
738 ui,
744 repo,
739 repo,
745 self.destmap,
740 self.destmap,
746 self.state,
741 self.state,
747 self.skipped,
742 self.skipped,
748 collapsedas,
743 collapsedas,
749 self.keepf,
744 self.keepf,
750 fm=fm,
745 fm=fm,
751 backup=self.backupf,
746 backup=self.backupf,
752 )
747 )
753
748
754 clearstatus(repo)
749 clearstatus(repo)
755 clearcollapsemsg(repo)
750 clearcollapsemsg(repo)
756
751
757 ui.note(_(b"rebase completed\n"))
752 ui.note(_(b"rebase completed\n"))
758 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
753 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
759 if self.skipped:
754 if self.skipped:
760 skippedlen = len(self.skipped)
755 skippedlen = len(self.skipped)
761 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
756 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
762 fm.end()
757 fm.end()
763
758
764 if (
759 if (
765 self.activebookmark
760 self.activebookmark
766 and self.activebookmark in repo._bookmarks
761 and self.activebookmark in repo._bookmarks
767 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
762 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
768 ):
763 ):
769 bookmarks.activate(repo, self.activebookmark)
764 bookmarks.activate(repo, self.activebookmark)
770
765
771 def _abort(self, backup=True, suppwarns=False, dryrun=False, confirm=False):
766 def _abort(self, backup=True, suppwarns=False, dryrun=False, confirm=False):
772 '''Restore the repository to its original state.'''
767 '''Restore the repository to its original state.'''
773
768
774 repo = self.repo
769 repo = self.repo
775 try:
770 try:
776 # If the first commits in the rebased set get skipped during the
771 # If the first commits in the rebased set get skipped during the
777 # rebase, their values within the state mapping will be the dest
772 # rebase, their values within the state mapping will be the dest
778 # rev id. The rebased list must must not contain the dest rev
773 # rev id. The rebased list must must not contain the dest rev
779 # (issue4896)
774 # (issue4896)
780 rebased = [
775 rebased = [
781 s
776 s
782 for r, s in self.state.items()
777 for r, s in self.state.items()
783 if s >= 0 and s != r and s != self.destmap[r]
778 if s >= 0 and s != r and s != self.destmap[r]
784 ]
779 ]
785 immutable = [d for d in rebased if not repo[d].mutable()]
780 immutable = [d for d in rebased if not repo[d].mutable()]
786 cleanup = True
781 cleanup = True
787 if immutable:
782 if immutable:
788 repo.ui.warn(
783 repo.ui.warn(
789 _(b"warning: can't clean up public changesets %s\n")
784 _(b"warning: can't clean up public changesets %s\n")
790 % b', '.join(bytes(repo[r]) for r in immutable),
785 % b', '.join(bytes(repo[r]) for r in immutable),
791 hint=_(b"see 'hg help phases' for details"),
786 hint=_(b"see 'hg help phases' for details"),
792 )
787 )
793 cleanup = False
788 cleanup = False
794
789
795 descendants = set()
790 descendants = set()
796 if rebased:
791 if rebased:
797 descendants = set(repo.changelog.descendants(rebased))
792 descendants = set(repo.changelog.descendants(rebased))
798 if descendants - set(rebased):
793 if descendants - set(rebased):
799 repo.ui.warn(
794 repo.ui.warn(
800 _(
795 _(
801 b"warning: new changesets detected on "
796 b"warning: new changesets detected on "
802 b"destination branch, can't strip\n"
797 b"destination branch, can't strip\n"
803 )
798 )
804 )
799 )
805 cleanup = False
800 cleanup = False
806
801
807 if cleanup:
802 if cleanup:
808 if rebased:
803 if rebased:
809 strippoints = [
804 strippoints = [
810 c.node() for c in repo.set(b'roots(%ld)', rebased)
805 c.node() for c in repo.set(b'roots(%ld)', rebased)
811 ]
806 ]
812
807
813 updateifonnodes = set(rebased)
808 updateifonnodes = set(rebased)
814 updateifonnodes.update(self.destmap.values())
809 updateifonnodes.update(self.destmap.values())
815
810
816 if not dryrun and not confirm:
811 if not dryrun and not confirm:
817 updateifonnodes.add(self.originalwd)
812 updateifonnodes.add(self.originalwd)
818
813
819 shouldupdate = repo[b'.'].rev() in updateifonnodes
814 shouldupdate = repo[b'.'].rev() in updateifonnodes
820
815
821 # Update away from the rebase if necessary
816 # Update away from the rebase if necessary
822 if shouldupdate:
817 if shouldupdate:
823 mergemod.clean_update(repo[self.originalwd])
818 mergemod.clean_update(repo[self.originalwd])
824
819
825 # Strip from the first rebased revision
820 # Strip from the first rebased revision
826 if rebased:
821 if rebased:
827 repair.strip(repo.ui, repo, strippoints, backup=backup)
822 repair.strip(repo.ui, repo, strippoints, backup=backup)
828
823
829 if self.activebookmark and self.activebookmark in repo._bookmarks:
824 if self.activebookmark and self.activebookmark in repo._bookmarks:
830 bookmarks.activate(repo, self.activebookmark)
825 bookmarks.activate(repo, self.activebookmark)
831
826
832 finally:
827 finally:
833 clearstatus(repo)
828 clearstatus(repo)
834 clearcollapsemsg(repo)
829 clearcollapsemsg(repo)
835 if not suppwarns:
830 if not suppwarns:
836 repo.ui.warn(_(b'rebase aborted\n'))
831 repo.ui.warn(_(b'rebase aborted\n'))
837 return 0
832 return 0
838
833
839
834
840 @command(
835 @command(
841 b'rebase',
836 b'rebase',
842 [
837 [
843 (
838 (
844 b's',
839 b's',
845 b'source',
840 b'source',
846 [],
841 [],
847 _(b'rebase the specified changesets and their descendants'),
842 _(b'rebase the specified changesets and their descendants'),
848 _(b'REV'),
843 _(b'REV'),
849 ),
844 ),
850 (
845 (
851 b'b',
846 b'b',
852 b'base',
847 b'base',
853 [],
848 [],
854 _(b'rebase everything from branching point of specified changeset'),
849 _(b'rebase everything from branching point of specified changeset'),
855 _(b'REV'),
850 _(b'REV'),
856 ),
851 ),
857 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
852 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
858 (
853 (
859 b'd',
854 b'd',
860 b'dest',
855 b'dest',
861 b'',
856 b'',
862 _(b'rebase onto the specified changeset'),
857 _(b'rebase onto the specified changeset'),
863 _(b'REV'),
858 _(b'REV'),
864 ),
859 ),
865 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
860 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
866 (
861 (
867 b'm',
862 b'm',
868 b'message',
863 b'message',
869 b'',
864 b'',
870 _(b'use text as collapse commit message'),
865 _(b'use text as collapse commit message'),
871 _(b'TEXT'),
866 _(b'TEXT'),
872 ),
867 ),
873 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
868 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
874 (
869 (
875 b'l',
870 b'l',
876 b'logfile',
871 b'logfile',
877 b'',
872 b'',
878 _(b'read collapse commit message from file'),
873 _(b'read collapse commit message from file'),
879 _(b'FILE'),
874 _(b'FILE'),
880 ),
875 ),
881 (b'k', b'keep', False, _(b'keep original changesets')),
876 (b'k', b'keep', False, _(b'keep original changesets')),
882 (b'', b'keepbranches', False, _(b'keep original branch names')),
877 (b'', b'keepbranches', False, _(b'keep original branch names')),
883 (b'D', b'detach', False, _(b'(DEPRECATED)')),
878 (b'D', b'detach', False, _(b'(DEPRECATED)')),
884 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
879 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
885 (b't', b'tool', b'', _(b'specify merge tool')),
880 (b't', b'tool', b'', _(b'specify merge tool')),
886 (b'', b'stop', False, _(b'stop interrupted rebase')),
881 (b'', b'stop', False, _(b'stop interrupted rebase')),
887 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
882 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
888 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
883 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
889 (
884 (
890 b'',
885 b'',
891 b'auto-orphans',
886 b'auto-orphans',
892 b'',
887 b'',
893 _(
888 _(
894 b'automatically rebase orphan revisions '
889 b'automatically rebase orphan revisions '
895 b'in the specified revset (EXPERIMENTAL)'
890 b'in the specified revset (EXPERIMENTAL)'
896 ),
891 ),
897 ),
892 ),
898 ]
893 ]
899 + cmdutil.dryrunopts
894 + cmdutil.dryrunopts
900 + cmdutil.formatteropts
895 + cmdutil.formatteropts
901 + cmdutil.confirmopts,
896 + cmdutil.confirmopts,
902 _(b'[[-s REV]... | [-b REV]... | [-r REV]...] [-d REV] [OPTION]...'),
897 _(b'[[-s REV]... | [-b REV]... | [-r REV]...] [-d REV] [OPTION]...'),
903 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
898 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
904 )
899 )
905 def rebase(ui, repo, **opts):
900 def rebase(ui, repo, **opts):
906 """move changeset (and descendants) to a different branch
901 """move changeset (and descendants) to a different branch
907
902
908 Rebase uses repeated merging to graft changesets from one part of
903 Rebase uses repeated merging to graft changesets from one part of
909 history (the source) onto another (the destination). This can be
904 history (the source) onto another (the destination). This can be
910 useful for linearizing *local* changes relative to a master
905 useful for linearizing *local* changes relative to a master
911 development tree.
906 development tree.
912
907
913 Published commits cannot be rebased (see :hg:`help phases`).
908 Published commits cannot be rebased (see :hg:`help phases`).
914 To copy commits, see :hg:`help graft`.
909 To copy commits, see :hg:`help graft`.
915
910
916 If you don't specify a destination changeset (``-d/--dest``), rebase
911 If you don't specify a destination changeset (``-d/--dest``), rebase
917 will use the same logic as :hg:`merge` to pick a destination. if
912 will use the same logic as :hg:`merge` to pick a destination. if
918 the current branch contains exactly one other head, the other head
913 the current branch contains exactly one other head, the other head
919 is merged with by default. Otherwise, an explicit revision with
914 is merged with by default. Otherwise, an explicit revision with
920 which to merge with must be provided. (destination changeset is not
915 which to merge with must be provided. (destination changeset is not
921 modified by rebasing, but new changesets are added as its
916 modified by rebasing, but new changesets are added as its
922 descendants.)
917 descendants.)
923
918
924 Here are the ways to select changesets:
919 Here are the ways to select changesets:
925
920
926 1. Explicitly select them using ``--rev``.
921 1. Explicitly select them using ``--rev``.
927
922
928 2. Use ``--source`` to select a root changeset and include all of its
923 2. Use ``--source`` to select a root changeset and include all of its
929 descendants.
924 descendants.
930
925
931 3. Use ``--base`` to select a changeset; rebase will find ancestors
926 3. Use ``--base`` to select a changeset; rebase will find ancestors
932 and their descendants which are not also ancestors of the destination.
927 and their descendants which are not also ancestors of the destination.
933
928
934 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
929 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
935 rebase will use ``--base .`` as above.
930 rebase will use ``--base .`` as above.
936
931
937 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
932 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
938 can be used in ``--dest``. Destination would be calculated per source
933 can be used in ``--dest``. Destination would be calculated per source
939 revision with ``SRC`` substituted by that single source revision and
934 revision with ``SRC`` substituted by that single source revision and
940 ``ALLSRC`` substituted by all source revisions.
935 ``ALLSRC`` substituted by all source revisions.
941
936
942 Rebase will destroy original changesets unless you use ``--keep``.
937 Rebase will destroy original changesets unless you use ``--keep``.
943 It will also move your bookmarks (even if you do).
938 It will also move your bookmarks (even if you do).
944
939
945 Some changesets may be dropped if they do not contribute changes
940 Some changesets may be dropped if they do not contribute changes
946 (e.g. merges from the destination branch).
941 (e.g. merges from the destination branch).
947
942
948 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
943 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
949 a named branch with two heads. You will need to explicitly specify source
944 a named branch with two heads. You will need to explicitly specify source
950 and/or destination.
945 and/or destination.
951
946
952 If you need to use a tool to automate merge/conflict decisions, you
947 If you need to use a tool to automate merge/conflict decisions, you
953 can specify one with ``--tool``, see :hg:`help merge-tools`.
948 can specify one with ``--tool``, see :hg:`help merge-tools`.
954 As a caveat: the tool will not be used to mediate when a file was
949 As a caveat: the tool will not be used to mediate when a file was
955 deleted, there is no hook presently available for this.
950 deleted, there is no hook presently available for this.
956
951
957 If a rebase is interrupted to manually resolve a conflict, it can be
952 If a rebase is interrupted to manually resolve a conflict, it can be
958 continued with --continue/-c, aborted with --abort/-a, or stopped with
953 continued with --continue/-c, aborted with --abort/-a, or stopped with
959 --stop.
954 --stop.
960
955
961 .. container:: verbose
956 .. container:: verbose
962
957
963 Examples:
958 Examples:
964
959
965 - move "local changes" (current commit back to branching point)
960 - move "local changes" (current commit back to branching point)
966 to the current branch tip after a pull::
961 to the current branch tip after a pull::
967
962
968 hg rebase
963 hg rebase
969
964
970 - move a single changeset to the stable branch::
965 - move a single changeset to the stable branch::
971
966
972 hg rebase -r 5f493448 -d stable
967 hg rebase -r 5f493448 -d stable
973
968
974 - splice a commit and all its descendants onto another part of history::
969 - splice a commit and all its descendants onto another part of history::
975
970
976 hg rebase --source c0c3 --dest 4cf9
971 hg rebase --source c0c3 --dest 4cf9
977
972
978 - rebase everything on a branch marked by a bookmark onto the
973 - rebase everything on a branch marked by a bookmark onto the
979 default branch::
974 default branch::
980
975
981 hg rebase --base myfeature --dest default
976 hg rebase --base myfeature --dest default
982
977
983 - collapse a sequence of changes into a single commit::
978 - collapse a sequence of changes into a single commit::
984
979
985 hg rebase --collapse -r 1520:1525 -d .
980 hg rebase --collapse -r 1520:1525 -d .
986
981
987 - move a named branch while preserving its name::
982 - move a named branch while preserving its name::
988
983
989 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
984 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
990
985
991 - stabilize orphaned changesets so history looks linear::
986 - stabilize orphaned changesets so history looks linear::
992
987
993 hg rebase -r 'orphan()-obsolete()'\
988 hg rebase -r 'orphan()-obsolete()'\
994 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
989 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
995 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
990 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
996
991
997 Configuration Options:
992 Configuration Options:
998
993
999 You can make rebase require a destination if you set the following config
994 You can make rebase require a destination if you set the following config
1000 option::
995 option::
1001
996
1002 [commands]
997 [commands]
1003 rebase.requiredest = True
998 rebase.requiredest = True
1004
999
1005 By default, rebase will close the transaction after each commit. For
1000 By default, rebase will close the transaction after each commit. For
1006 performance purposes, you can configure rebase to use a single transaction
1001 performance purposes, you can configure rebase to use a single transaction
1007 across the entire rebase. WARNING: This setting introduces a significant
1002 across the entire rebase. WARNING: This setting introduces a significant
1008 risk of losing the work you've done in a rebase if the rebase aborts
1003 risk of losing the work you've done in a rebase if the rebase aborts
1009 unexpectedly::
1004 unexpectedly::
1010
1005
1011 [rebase]
1006 [rebase]
1012 singletransaction = True
1007 singletransaction = True
1013
1008
1014 By default, rebase writes to the working copy, but you can configure it to
1009 By default, rebase writes to the working copy, but you can configure it to
1015 run in-memory for better performance. When the rebase is not moving the
1010 run in-memory for better performance. When the rebase is not moving the
1016 parent(s) of the working copy (AKA the "currently checked out changesets"),
1011 parent(s) of the working copy (AKA the "currently checked out changesets"),
1017 this may also allow it to run even if the working copy is dirty::
1012 this may also allow it to run even if the working copy is dirty::
1018
1013
1019 [rebase]
1014 [rebase]
1020 experimental.inmemory = True
1015 experimental.inmemory = True
1021
1016
1022 Return Values:
1017 Return Values:
1023
1018
1024 Returns 0 on success, 1 if nothing to rebase or there are
1019 Returns 0 on success, 1 if nothing to rebase or there are
1025 unresolved conflicts.
1020 unresolved conflicts.
1026
1021
1027 """
1022 """
1028 opts = pycompat.byteskwargs(opts)
1023 opts = pycompat.byteskwargs(opts)
1029 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1024 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1030 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1025 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1031 if action:
1026 if action:
1032 cmdutil.check_incompatible_arguments(
1027 cmdutil.check_incompatible_arguments(
1033 opts, action, [b'confirm', b'dry_run']
1028 opts, action, [b'confirm', b'dry_run']
1034 )
1029 )
1035 cmdutil.check_incompatible_arguments(
1030 cmdutil.check_incompatible_arguments(
1036 opts, action, [b'rev', b'source', b'base', b'dest']
1031 opts, action, [b'rev', b'source', b'base', b'dest']
1037 )
1032 )
1038 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1033 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1039 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1034 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1040
1035
1041 if action or repo.currenttransaction() is not None:
1036 if action or repo.currenttransaction() is not None:
1042 # in-memory rebase is not compatible with resuming rebases.
1037 # in-memory rebase is not compatible with resuming rebases.
1043 # (Or if it is run within a transaction, since the restart logic can
1038 # (Or if it is run within a transaction, since the restart logic can
1044 # fail the entire transaction.)
1039 # fail the entire transaction.)
1045 inmemory = False
1040 inmemory = False
1046
1041
1047 if opts.get(b'auto_orphans'):
1042 if opts.get(b'auto_orphans'):
1048 disallowed_opts = set(opts) - {b'auto_orphans'}
1043 disallowed_opts = set(opts) - {b'auto_orphans'}
1049 cmdutil.check_incompatible_arguments(
1044 cmdutil.check_incompatible_arguments(
1050 opts, b'auto_orphans', disallowed_opts
1045 opts, b'auto_orphans', disallowed_opts
1051 )
1046 )
1052
1047
1053 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1048 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1054 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1049 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1055 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1050 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1056
1051
1057 if opts.get(b'dry_run') or opts.get(b'confirm'):
1052 if opts.get(b'dry_run') or opts.get(b'confirm'):
1058 return _dryrunrebase(ui, repo, action, opts)
1053 return _dryrunrebase(ui, repo, action, opts)
1059 elif action == b'stop':
1054 elif action == b'stop':
1060 rbsrt = rebaseruntime(repo, ui)
1055 rbsrt = rebaseruntime(repo, ui)
1061 with repo.wlock(), repo.lock():
1056 with repo.wlock(), repo.lock():
1062 rbsrt.restorestatus()
1057 rbsrt.restorestatus()
1063 if rbsrt.collapsef:
1058 if rbsrt.collapsef:
1064 raise error.Abort(_(b"cannot stop in --collapse session"))
1059 raise error.Abort(_(b"cannot stop in --collapse session"))
1065 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1060 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1066 if not (rbsrt.keepf or allowunstable):
1061 if not (rbsrt.keepf or allowunstable):
1067 raise error.Abort(
1062 raise error.Abort(
1068 _(
1063 _(
1069 b"cannot remove original changesets with"
1064 b"cannot remove original changesets with"
1070 b" unrebased descendants"
1065 b" unrebased descendants"
1071 ),
1066 ),
1072 hint=_(
1067 hint=_(
1073 b'either enable obsmarkers to allow unstable '
1068 b'either enable obsmarkers to allow unstable '
1074 b'revisions or use --keep to keep original '
1069 b'revisions or use --keep to keep original '
1075 b'changesets'
1070 b'changesets'
1076 ),
1071 ),
1077 )
1072 )
1078 # update to the current working revision
1073 # update to the current working revision
1079 # to clear interrupted merge
1074 # to clear interrupted merge
1080 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1075 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1081 rbsrt._finishrebase()
1076 rbsrt._finishrebase()
1082 return 0
1077 return 0
1083 elif inmemory:
1078 elif inmemory:
1084 try:
1079 try:
1085 # in-memory merge doesn't support conflicts, so if we hit any, abort
1080 # in-memory merge doesn't support conflicts, so if we hit any, abort
1086 # and re-run as an on-disk merge.
1081 # and re-run as an on-disk merge.
1087 overrides = {(b'rebase', b'singletransaction'): True}
1082 overrides = {(b'rebase', b'singletransaction'): True}
1088 with ui.configoverride(overrides, b'rebase'):
1083 with ui.configoverride(overrides, b'rebase'):
1089 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1084 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1090 except error.InMemoryMergeConflictsError:
1085 except error.InMemoryMergeConflictsError:
1091 ui.warn(
1086 ui.warn(
1092 _(
1087 _(
1093 b'hit merge conflicts; re-running rebase without in-memory'
1088 b'hit merge conflicts; re-running rebase without in-memory'
1094 b' merge\n'
1089 b' merge\n'
1095 )
1090 )
1096 )
1091 )
1097 # TODO: Make in-memory merge not use the on-disk merge state, so
1092 # TODO: Make in-memory merge not use the on-disk merge state, so
1098 # we don't have to clean it here
1093 # we don't have to clean it here
1099 mergestatemod.mergestate.clean(repo)
1094 mergestatemod.mergestate.clean(repo)
1100 clearstatus(repo)
1095 clearstatus(repo)
1101 clearcollapsemsg(repo)
1096 clearcollapsemsg(repo)
1102 return _dorebase(ui, repo, action, opts, inmemory=False)
1097 return _dorebase(ui, repo, action, opts, inmemory=False)
1103 else:
1098 else:
1104 return _dorebase(ui, repo, action, opts)
1099 return _dorebase(ui, repo, action, opts)
1105
1100
1106
1101
1107 def _dryrunrebase(ui, repo, action, opts):
1102 def _dryrunrebase(ui, repo, action, opts):
1108 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
1103 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
1109 confirm = opts.get(b'confirm')
1104 confirm = opts.get(b'confirm')
1110 if confirm:
1105 if confirm:
1111 ui.status(_(b'starting in-memory rebase\n'))
1106 ui.status(_(b'starting in-memory rebase\n'))
1112 else:
1107 else:
1113 ui.status(
1108 ui.status(
1114 _(b'starting dry-run rebase; repository will not be changed\n')
1109 _(b'starting dry-run rebase; repository will not be changed\n')
1115 )
1110 )
1116 with repo.wlock(), repo.lock():
1111 with repo.wlock(), repo.lock():
1117 needsabort = True
1112 needsabort = True
1118 try:
1113 try:
1119 overrides = {(b'rebase', b'singletransaction'): True}
1114 overrides = {(b'rebase', b'singletransaction'): True}
1120 with ui.configoverride(overrides, b'rebase'):
1115 with ui.configoverride(overrides, b'rebase'):
1121 _origrebase(
1116 _origrebase(
1122 ui,
1117 ui,
1123 repo,
1118 repo,
1124 action,
1119 action,
1125 opts,
1120 opts,
1126 rbsrt,
1121 rbsrt,
1127 inmemory=True,
1122 inmemory=True,
1128 leaveunfinished=True,
1123 leaveunfinished=True,
1129 )
1124 )
1130 except error.InMemoryMergeConflictsError:
1125 except error.InMemoryMergeConflictsError:
1131 ui.status(_(b'hit a merge conflict\n'))
1126 ui.status(_(b'hit a merge conflict\n'))
1132 return 1
1127 return 1
1133 except error.Abort:
1128 except error.Abort:
1134 needsabort = False
1129 needsabort = False
1135 raise
1130 raise
1136 else:
1131 else:
1137 if confirm:
1132 if confirm:
1138 ui.status(_(b'rebase completed successfully\n'))
1133 ui.status(_(b'rebase completed successfully\n'))
1139 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1134 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1140 # finish unfinished rebase
1135 # finish unfinished rebase
1141 rbsrt._finishrebase()
1136 rbsrt._finishrebase()
1142 else:
1137 else:
1143 rbsrt._prepareabortorcontinue(
1138 rbsrt._prepareabortorcontinue(
1144 isabort=True,
1139 isabort=True,
1145 backup=False,
1140 backup=False,
1146 suppwarns=True,
1141 suppwarns=True,
1147 confirm=confirm,
1142 confirm=confirm,
1148 )
1143 )
1149 needsabort = False
1144 needsabort = False
1150 else:
1145 else:
1151 ui.status(
1146 ui.status(
1152 _(
1147 _(
1153 b'dry-run rebase completed successfully; run without'
1148 b'dry-run rebase completed successfully; run without'
1154 b' -n/--dry-run to perform this rebase\n'
1149 b' -n/--dry-run to perform this rebase\n'
1155 )
1150 )
1156 )
1151 )
1157 return 0
1152 return 0
1158 finally:
1153 finally:
1159 if needsabort:
1154 if needsabort:
1160 # no need to store backup in case of dryrun
1155 # no need to store backup in case of dryrun
1161 rbsrt._prepareabortorcontinue(
1156 rbsrt._prepareabortorcontinue(
1162 isabort=True,
1157 isabort=True,
1163 backup=False,
1158 backup=False,
1164 suppwarns=True,
1159 suppwarns=True,
1165 dryrun=opts.get(b'dry_run'),
1160 dryrun=opts.get(b'dry_run'),
1166 )
1161 )
1167
1162
1168
1163
1169 def _dorebase(ui, repo, action, opts, inmemory=False):
1164 def _dorebase(ui, repo, action, opts, inmemory=False):
1170 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
1165 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
1171 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
1166 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
1172
1167
1173
1168
1174 def _origrebase(
1169 def _origrebase(
1175 ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
1170 ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
1176 ):
1171 ):
1177 assert action != b'stop'
1172 assert action != b'stop'
1178 with repo.wlock(), repo.lock():
1173 with repo.wlock(), repo.lock():
1179 if opts.get(b'interactive'):
1174 if opts.get(b'interactive'):
1180 try:
1175 try:
1181 if extensions.find(b'histedit'):
1176 if extensions.find(b'histedit'):
1182 enablehistedit = b''
1177 enablehistedit = b''
1183 except KeyError:
1178 except KeyError:
1184 enablehistedit = b" --config extensions.histedit="
1179 enablehistedit = b" --config extensions.histedit="
1185 help = b"hg%s help -e histedit" % enablehistedit
1180 help = b"hg%s help -e histedit" % enablehistedit
1186 msg = (
1181 msg = (
1187 _(
1182 _(
1188 b"interactive history editing is supported by the "
1183 b"interactive history editing is supported by the "
1189 b"'histedit' extension (see \"%s\")"
1184 b"'histedit' extension (see \"%s\")"
1190 )
1185 )
1191 % help
1186 % help
1192 )
1187 )
1193 raise error.Abort(msg)
1188 raise error.Abort(msg)
1194
1189
1195 if rbsrt.collapsemsg and not rbsrt.collapsef:
1190 if rbsrt.collapsemsg and not rbsrt.collapsef:
1196 raise error.Abort(_(b'message can only be specified with collapse'))
1191 raise error.Abort(_(b'message can only be specified with collapse'))
1197
1192
1198 if action:
1193 if action:
1199 if rbsrt.collapsef:
1194 if rbsrt.collapsef:
1200 raise error.Abort(
1195 raise error.Abort(
1201 _(b'cannot use collapse with continue or abort')
1196 _(b'cannot use collapse with continue or abort')
1202 )
1197 )
1203 if action == b'abort' and opts.get(b'tool', False):
1198 if action == b'abort' and opts.get(b'tool', False):
1204 ui.warn(_(b'tool option will be ignored\n'))
1199 ui.warn(_(b'tool option will be ignored\n'))
1205 if action == b'continue':
1200 if action == b'continue':
1206 ms = mergestatemod.mergestate.read(repo)
1201 ms = mergestatemod.mergestate.read(repo)
1207 mergeutil.checkunresolved(ms)
1202 mergeutil.checkunresolved(ms)
1208
1203
1209 retcode = rbsrt._prepareabortorcontinue(
1204 retcode = rbsrt._prepareabortorcontinue(
1210 isabort=(action == b'abort')
1205 isabort=(action == b'abort')
1211 )
1206 )
1212 if retcode is not None:
1207 if retcode is not None:
1213 return retcode
1208 return retcode
1214 else:
1209 else:
1215 # search default destination in this space
1210 # search default destination in this space
1216 # used in the 'hg pull --rebase' case, see issue 5214.
1211 # used in the 'hg pull --rebase' case, see issue 5214.
1217 destspace = opts.get(b'_destspace')
1212 destspace = opts.get(b'_destspace')
1218 destmap = _definedestmap(
1213 destmap = _definedestmap(
1219 ui,
1214 ui,
1220 repo,
1215 repo,
1221 inmemory,
1216 inmemory,
1222 opts.get(b'dest', None),
1217 opts.get(b'dest', None),
1223 opts.get(b'source', []),
1218 opts.get(b'source', []),
1224 opts.get(b'base', []),
1219 opts.get(b'base', []),
1225 opts.get(b'rev', []),
1220 opts.get(b'rev', []),
1226 destspace=destspace,
1221 destspace=destspace,
1227 )
1222 )
1228 retcode = rbsrt._preparenewrebase(destmap)
1223 retcode = rbsrt._preparenewrebase(destmap)
1229 if retcode is not None:
1224 if retcode is not None:
1230 return retcode
1225 return retcode
1231 storecollapsemsg(repo, rbsrt.collapsemsg)
1226 storecollapsemsg(repo, rbsrt.collapsemsg)
1232
1227
1233 tr = None
1228 tr = None
1234
1229
1235 singletr = ui.configbool(b'rebase', b'singletransaction')
1230 singletr = ui.configbool(b'rebase', b'singletransaction')
1236 if singletr:
1231 if singletr:
1237 tr = repo.transaction(b'rebase')
1232 tr = repo.transaction(b'rebase')
1238
1233
1239 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1234 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1240 # one transaction here. Otherwise, transactions are obtained when
1235 # one transaction here. Otherwise, transactions are obtained when
1241 # committing each node, which is slower but allows partial success.
1236 # committing each node, which is slower but allows partial success.
1242 with util.acceptintervention(tr):
1237 with util.acceptintervention(tr):
1243 # Same logic for the dirstate guard, except we don't create one when
1238 # Same logic for the dirstate guard, except we don't create one when
1244 # rebasing in-memory (it's not needed).
1239 # rebasing in-memory (it's not needed).
1245 dsguard = None
1240 dsguard = None
1246 if singletr and not inmemory:
1241 if singletr and not inmemory:
1247 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1242 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1248 with util.acceptintervention(dsguard):
1243 with util.acceptintervention(dsguard):
1249 rbsrt._performrebase(tr)
1244 rbsrt._performrebase(tr)
1250 if not leaveunfinished:
1245 if not leaveunfinished:
1251 rbsrt._finishrebase()
1246 rbsrt._finishrebase()
1252
1247
1253
1248
1254 def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace):
1249 def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace):
1255 """use revisions argument to define destmap {srcrev: destrev}"""
1250 """use revisions argument to define destmap {srcrev: destrev}"""
1256 if revf is None:
1251 if revf is None:
1257 revf = []
1252 revf = []
1258
1253
1259 # destspace is here to work around issues with `hg pull --rebase` see
1254 # destspace is here to work around issues with `hg pull --rebase` see
1260 # issue5214 for details
1255 # issue5214 for details
1261
1256
1262 cmdutil.checkunfinished(repo)
1257 cmdutil.checkunfinished(repo)
1263 if not inmemory:
1258 if not inmemory:
1264 cmdutil.bailifchanged(repo)
1259 cmdutil.bailifchanged(repo)
1265
1260
1266 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1261 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1267 raise error.Abort(
1262 raise error.Abort(
1268 _(b'you must specify a destination'),
1263 _(b'you must specify a destination'),
1269 hint=_(b'use: hg rebase -d REV'),
1264 hint=_(b'use: hg rebase -d REV'),
1270 )
1265 )
1271
1266
1272 dest = None
1267 dest = None
1273
1268
1274 if revf:
1269 if revf:
1275 rebaseset = scmutil.revrange(repo, revf)
1270 rebaseset = scmutil.revrange(repo, revf)
1276 if not rebaseset:
1271 if not rebaseset:
1277 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1272 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1278 return None
1273 return None
1279 elif srcf:
1274 elif srcf:
1280 src = scmutil.revrange(repo, srcf)
1275 src = scmutil.revrange(repo, srcf)
1281 if not src:
1276 if not src:
1282 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1277 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1283 return None
1278 return None
1284 # `+ (%ld)` to work around `wdir()::` being empty
1279 # `+ (%ld)` to work around `wdir()::` being empty
1285 rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src)
1280 rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src)
1286 else:
1281 else:
1287 base = scmutil.revrange(repo, basef or [b'.'])
1282 base = scmutil.revrange(repo, basef or [b'.'])
1288 if not base:
1283 if not base:
1289 ui.status(
1284 ui.status(
1290 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1285 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1291 )
1286 )
1292 return None
1287 return None
1293 if destf:
1288 if destf:
1294 # --base does not support multiple destinations
1289 # --base does not support multiple destinations
1295 dest = scmutil.revsingle(repo, destf)
1290 dest = scmutil.revsingle(repo, destf)
1296 else:
1291 else:
1297 dest = repo[_destrebase(repo, base, destspace=destspace)]
1292 dest = repo[_destrebase(repo, base, destspace=destspace)]
1298 destf = bytes(dest)
1293 destf = bytes(dest)
1299
1294
1300 roots = [] # selected children of branching points
1295 roots = [] # selected children of branching points
1301 bpbase = {} # {branchingpoint: [origbase]}
1296 bpbase = {} # {branchingpoint: [origbase]}
1302 for b in base: # group bases by branching points
1297 for b in base: # group bases by branching points
1303 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1298 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1304 bpbase[bp] = bpbase.get(bp, []) + [b]
1299 bpbase[bp] = bpbase.get(bp, []) + [b]
1305 if None in bpbase:
1300 if None in bpbase:
1306 # emulate the old behavior, showing "nothing to rebase" (a better
1301 # emulate the old behavior, showing "nothing to rebase" (a better
1307 # behavior may be abort with "cannot find branching point" error)
1302 # behavior may be abort with "cannot find branching point" error)
1308 bpbase.clear()
1303 bpbase.clear()
1309 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1304 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1310 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1305 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1311
1306
1312 rebaseset = repo.revs(b'%ld::', roots)
1307 rebaseset = repo.revs(b'%ld::', roots)
1313
1308
1314 if not rebaseset:
1309 if not rebaseset:
1315 # transform to list because smartsets are not comparable to
1310 # transform to list because smartsets are not comparable to
1316 # lists. This should be improved to honor laziness of
1311 # lists. This should be improved to honor laziness of
1317 # smartset.
1312 # smartset.
1318 if list(base) == [dest.rev()]:
1313 if list(base) == [dest.rev()]:
1319 if basef:
1314 if basef:
1320 ui.status(
1315 ui.status(
1321 _(
1316 _(
1322 b'nothing to rebase - %s is both "base"'
1317 b'nothing to rebase - %s is both "base"'
1323 b' and destination\n'
1318 b' and destination\n'
1324 )
1319 )
1325 % dest
1320 % dest
1326 )
1321 )
1327 else:
1322 else:
1328 ui.status(
1323 ui.status(
1329 _(
1324 _(
1330 b'nothing to rebase - working directory '
1325 b'nothing to rebase - working directory '
1331 b'parent is also destination\n'
1326 b'parent is also destination\n'
1332 )
1327 )
1333 )
1328 )
1334 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1329 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1335 if basef:
1330 if basef:
1336 ui.status(
1331 ui.status(
1337 _(
1332 _(
1338 b'nothing to rebase - "base" %s is '
1333 b'nothing to rebase - "base" %s is '
1339 b'already an ancestor of destination '
1334 b'already an ancestor of destination '
1340 b'%s\n'
1335 b'%s\n'
1341 )
1336 )
1342 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1337 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1343 )
1338 )
1344 else:
1339 else:
1345 ui.status(
1340 ui.status(
1346 _(
1341 _(
1347 b'nothing to rebase - working '
1342 b'nothing to rebase - working '
1348 b'directory parent is already an '
1343 b'directory parent is already an '
1349 b'ancestor of destination %s\n'
1344 b'ancestor of destination %s\n'
1350 )
1345 )
1351 % dest
1346 % dest
1352 )
1347 )
1353 else: # can it happen?
1348 else: # can it happen?
1354 ui.status(
1349 ui.status(
1355 _(b'nothing to rebase from %s to %s\n')
1350 _(b'nothing to rebase from %s to %s\n')
1356 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1351 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1357 )
1352 )
1358 return None
1353 return None
1359
1354
1360 if nodemod.wdirrev in rebaseset:
1355 if nodemod.wdirrev in rebaseset:
1361 raise error.Abort(_(b'cannot rebase the working copy'))
1356 raise error.Abort(_(b'cannot rebase the working copy'))
1362 rebasingwcp = repo[b'.'].rev() in rebaseset
1357 rebasingwcp = repo[b'.'].rev() in rebaseset
1363 ui.log(
1358 ui.log(
1364 b"rebase",
1359 b"rebase",
1365 b"rebasing working copy parent: %r\n",
1360 b"rebasing working copy parent: %r\n",
1366 rebasingwcp,
1361 rebasingwcp,
1367 rebase_rebasing_wcp=rebasingwcp,
1362 rebase_rebasing_wcp=rebasingwcp,
1368 )
1363 )
1369 if inmemory and rebasingwcp:
1364 if inmemory and rebasingwcp:
1370 # Check these since we did not before.
1365 # Check these since we did not before.
1371 cmdutil.checkunfinished(repo)
1366 cmdutil.checkunfinished(repo)
1372 cmdutil.bailifchanged(repo)
1367 cmdutil.bailifchanged(repo)
1373
1368
1374 if not destf:
1369 if not destf:
1375 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1370 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1376 destf = bytes(dest)
1371 destf = bytes(dest)
1377
1372
1378 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1373 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1379 alias = {b'ALLSRC': allsrc}
1374 alias = {b'ALLSRC': allsrc}
1380
1375
1381 if dest is None:
1376 if dest is None:
1382 try:
1377 try:
1383 # fast path: try to resolve dest without SRC alias
1378 # fast path: try to resolve dest without SRC alias
1384 dest = scmutil.revsingle(repo, destf, localalias=alias)
1379 dest = scmutil.revsingle(repo, destf, localalias=alias)
1385 except error.RepoLookupError:
1380 except error.RepoLookupError:
1386 # multi-dest path: resolve dest for each SRC separately
1381 # multi-dest path: resolve dest for each SRC separately
1387 destmap = {}
1382 destmap = {}
1388 for r in rebaseset:
1383 for r in rebaseset:
1389 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1384 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1390 # use repo.anyrevs instead of scmutil.revsingle because we
1385 # use repo.anyrevs instead of scmutil.revsingle because we
1391 # don't want to abort if destset is empty.
1386 # don't want to abort if destset is empty.
1392 destset = repo.anyrevs([destf], user=True, localalias=alias)
1387 destset = repo.anyrevs([destf], user=True, localalias=alias)
1393 size = len(destset)
1388 size = len(destset)
1394 if size == 1:
1389 if size == 1:
1395 destmap[r] = destset.first()
1390 destmap[r] = destset.first()
1396 elif size == 0:
1391 elif size == 0:
1397 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1392 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1398 else:
1393 else:
1399 raise error.Abort(
1394 raise error.Abort(
1400 _(b'rebase destination for %s is not unique') % repo[r]
1395 _(b'rebase destination for %s is not unique') % repo[r]
1401 )
1396 )
1402
1397
1403 if dest is not None:
1398 if dest is not None:
1404 # single-dest case: assign dest to each rev in rebaseset
1399 # single-dest case: assign dest to each rev in rebaseset
1405 destrev = dest.rev()
1400 destrev = dest.rev()
1406 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1401 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1407
1402
1408 if not destmap:
1403 if not destmap:
1409 ui.status(_(b'nothing to rebase - empty destination\n'))
1404 ui.status(_(b'nothing to rebase - empty destination\n'))
1410 return None
1405 return None
1411
1406
1412 return destmap
1407 return destmap
1413
1408
1414
1409
1415 def externalparent(repo, state, destancestors):
1410 def externalparent(repo, state, destancestors):
1416 """Return the revision that should be used as the second parent
1411 """Return the revision that should be used as the second parent
1417 when the revisions in state is collapsed on top of destancestors.
1412 when the revisions in state is collapsed on top of destancestors.
1418 Abort if there is more than one parent.
1413 Abort if there is more than one parent.
1419 """
1414 """
1420 parents = set()
1415 parents = set()
1421 source = min(state)
1416 source = min(state)
1422 for rev in state:
1417 for rev in state:
1423 if rev == source:
1418 if rev == source:
1424 continue
1419 continue
1425 for p in repo[rev].parents():
1420 for p in repo[rev].parents():
1426 if p.rev() not in state and p.rev() not in destancestors:
1421 if p.rev() not in state and p.rev() not in destancestors:
1427 parents.add(p.rev())
1422 parents.add(p.rev())
1428 if not parents:
1423 if not parents:
1429 return nullrev
1424 return nullrev
1430 if len(parents) == 1:
1425 if len(parents) == 1:
1431 return parents.pop()
1426 return parents.pop()
1432 raise error.Abort(
1427 raise error.Abort(
1433 _(
1428 _(
1434 b'unable to collapse on top of %d, there is more '
1429 b'unable to collapse on top of %d, there is more '
1435 b'than one external parent: %s'
1430 b'than one external parent: %s'
1436 )
1431 )
1437 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1432 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1438 )
1433 )
1439
1434
1440
1435
1441 def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
1436 def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
1442 '''Commit the memory changes with parents p1 and p2.
1437 '''Commit the memory changes with parents p1 and p2.
1443 Return node of committed revision.'''
1438 Return node of committed revision.'''
1444 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1439 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1445 # ``branch`` (used when passing ``--keepbranches``).
1440 # ``branch`` (used when passing ``--keepbranches``).
1446 branch = None
1441 branch = None
1447 if b'branch' in extra:
1442 if b'branch' in extra:
1448 branch = extra[b'branch']
1443 branch = extra[b'branch']
1449
1444
1450 memctx = wctx.tomemctx(
1445 memctx = wctx.tomemctx(
1451 commitmsg,
1446 commitmsg,
1452 date=date,
1447 date=date,
1453 extra=extra,
1448 extra=extra,
1454 user=user,
1449 user=user,
1455 branch=branch,
1450 branch=branch,
1456 editor=editor,
1451 editor=editor,
1457 )
1452 )
1458 if memctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1453 if memctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1459 return None
1454 return None
1460 commitres = repo.commitctx(memctx)
1455 commitres = repo.commitctx(memctx)
1461 wctx.clean() # Might be reused
1456 wctx.clean() # Might be reused
1462 return commitres
1457 return commitres
1463
1458
1464
1459
1465 def commitnode(repo, editor, extra, user, date, commitmsg):
1460 def commitnode(repo, editor, extra, user, date, commitmsg):
1466 '''Commit the wd changes with parents p1 and p2.
1461 '''Commit the wd changes with parents p1 and p2.
1467 Return node of committed revision.'''
1462 Return node of committed revision.'''
1468 dsguard = util.nullcontextmanager()
1463 dsguard = util.nullcontextmanager()
1469 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1464 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1470 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1465 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1471 with dsguard:
1466 with dsguard:
1472 # Commit might fail if unresolved files exist
1467 # Commit might fail if unresolved files exist
1473 newnode = repo.commit(
1468 newnode = repo.commit(
1474 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1469 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1475 )
1470 )
1476
1471
1477 repo.dirstate.setbranch(repo[newnode].branch())
1472 repo.dirstate.setbranch(repo[newnode].branch())
1478 return newnode
1473 return newnode
1479
1474
1480
1475
1481 def rebasenode(repo, rev, p1, p2, base, collapse, dest, wctx):
1476 def rebasenode(repo, rev, p1, p2, base, collapse, dest, wctx):
1482 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1477 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1483 # Merge phase
1478 # Merge phase
1484 # Update to destination and merge it with local
1479 # Update to destination and merge it with local
1485 p1ctx = repo[p1]
1480 p1ctx = repo[p1]
1486 if wctx.isinmemory():
1481 if wctx.isinmemory():
1487 wctx.setbase(p1ctx)
1482 wctx.setbase(p1ctx)
1488 else:
1483 else:
1489 if repo[b'.'].rev() != p1:
1484 if repo[b'.'].rev() != p1:
1490 repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
1485 repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
1491 mergemod.clean_update(p1ctx)
1486 mergemod.clean_update(p1ctx)
1492 else:
1487 else:
1493 repo.ui.debug(b" already in destination\n")
1488 repo.ui.debug(b" already in destination\n")
1494 # This is, alas, necessary to invalidate workingctx's manifest cache,
1489 # This is, alas, necessary to invalidate workingctx's manifest cache,
1495 # as well as other data we litter on it in other places.
1490 # as well as other data we litter on it in other places.
1496 wctx = repo[None]
1491 wctx = repo[None]
1497 repo.dirstate.write(repo.currenttransaction())
1492 repo.dirstate.write(repo.currenttransaction())
1498 ctx = repo[rev]
1493 ctx = repo[rev]
1499 repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx))
1494 repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx))
1500 if base is not None:
1495 if base is not None:
1501 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1496 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1502
1497
1503 # See explanation in merge.graft()
1498 # See explanation in merge.graft()
1504 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1499 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1505 stats = mergemod.update(
1500 stats = mergemod.update(
1506 repo,
1501 repo,
1507 rev,
1502 rev,
1508 branchmerge=True,
1503 branchmerge=True,
1509 force=True,
1504 force=True,
1510 ancestor=base,
1505 ancestor=base,
1511 mergeancestor=mergeancestor,
1506 mergeancestor=mergeancestor,
1512 labels=[b'dest', b'source'],
1507 labels=[b'dest', b'source'],
1513 wc=wctx,
1508 wc=wctx,
1514 )
1509 )
1515 wctx.setparents(p1ctx.node(), repo[p2].node())
1510 wctx.setparents(p1ctx.node(), repo[p2].node())
1516 if collapse:
1511 if collapse:
1517 copies.graftcopies(wctx, ctx, repo[dest])
1512 copies.graftcopies(wctx, ctx, repo[dest])
1518 else:
1513 else:
1519 # If we're not using --collapse, we need to
1514 # If we're not using --collapse, we need to
1520 # duplicate copies between the revision we're
1515 # duplicate copies between the revision we're
1521 # rebasing and its first parent.
1516 # rebasing and its first parent.
1522 copies.graftcopies(wctx, ctx, ctx.p1())
1517 copies.graftcopies(wctx, ctx, ctx.p1())
1523 return stats
1518 return stats
1524
1519
1525
1520
1526 def adjustdest(repo, rev, destmap, state, skipped):
1521 def adjustdest(repo, rev, destmap, state, skipped):
1527 r"""adjust rebase destination given the current rebase state
1522 r"""adjust rebase destination given the current rebase state
1528
1523
1529 rev is what is being rebased. Return a list of two revs, which are the
1524 rev is what is being rebased. Return a list of two revs, which are the
1530 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1525 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1531 nullrev, return dest without adjustment for it.
1526 nullrev, return dest without adjustment for it.
1532
1527
1533 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1528 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1534 to B1, and E's destination will be adjusted from F to B1.
1529 to B1, and E's destination will be adjusted from F to B1.
1535
1530
1536 B1 <- written during rebasing B
1531 B1 <- written during rebasing B
1537 |
1532 |
1538 F <- original destination of B, E
1533 F <- original destination of B, E
1539 |
1534 |
1540 | E <- rev, which is being rebased
1535 | E <- rev, which is being rebased
1541 | |
1536 | |
1542 | D <- prev, one parent of rev being checked
1537 | D <- prev, one parent of rev being checked
1543 | |
1538 | |
1544 | x <- skipped, ex. no successor or successor in (::dest)
1539 | x <- skipped, ex. no successor or successor in (::dest)
1545 | |
1540 | |
1546 | C <- rebased as C', different destination
1541 | C <- rebased as C', different destination
1547 | |
1542 | |
1548 | B <- rebased as B1 C'
1543 | B <- rebased as B1 C'
1549 |/ |
1544 |/ |
1550 A G <- destination of C, different
1545 A G <- destination of C, different
1551
1546
1552 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1547 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1553 first move C to C1, G to G1, and when it's checking H, the adjusted
1548 first move C to C1, G to G1, and when it's checking H, the adjusted
1554 destinations will be [C1, G1].
1549 destinations will be [C1, G1].
1555
1550
1556 H C1 G1
1551 H C1 G1
1557 /| | /
1552 /| | /
1558 F G |/
1553 F G |/
1559 K | | -> K
1554 K | | -> K
1560 | C D |
1555 | C D |
1561 | |/ |
1556 | |/ |
1562 | B | ...
1557 | B | ...
1563 |/ |/
1558 |/ |/
1564 A A
1559 A A
1565
1560
1566 Besides, adjust dest according to existing rebase information. For example,
1561 Besides, adjust dest according to existing rebase information. For example,
1567
1562
1568 B C D B needs to be rebased on top of C, C needs to be rebased on top
1563 B C D B needs to be rebased on top of C, C needs to be rebased on top
1569 \|/ of D. We will rebase C first.
1564 \|/ of D. We will rebase C first.
1570 A
1565 A
1571
1566
1572 C' After rebasing C, when considering B's destination, use C'
1567 C' After rebasing C, when considering B's destination, use C'
1573 | instead of the original C.
1568 | instead of the original C.
1574 B D
1569 B D
1575 \ /
1570 \ /
1576 A
1571 A
1577 """
1572 """
1578 # pick already rebased revs with same dest from state as interesting source
1573 # pick already rebased revs with same dest from state as interesting source
1579 dest = destmap[rev]
1574 dest = destmap[rev]
1580 source = [
1575 source = [
1581 s
1576 s
1582 for s, d in state.items()
1577 for s, d in state.items()
1583 if d > 0 and destmap[s] == dest and s not in skipped
1578 if d > 0 and destmap[s] == dest and s not in skipped
1584 ]
1579 ]
1585
1580
1586 result = []
1581 result = []
1587 for prev in repo.changelog.parentrevs(rev):
1582 for prev in repo.changelog.parentrevs(rev):
1588 adjusted = dest
1583 adjusted = dest
1589 if prev != nullrev:
1584 if prev != nullrev:
1590 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1585 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1591 if candidate is not None:
1586 if candidate is not None:
1592 adjusted = state[candidate]
1587 adjusted = state[candidate]
1593 if adjusted == dest and dest in state:
1588 if adjusted == dest and dest in state:
1594 adjusted = state[dest]
1589 adjusted = state[dest]
1595 if adjusted == revtodo:
1590 if adjusted == revtodo:
1596 # sortsource should produce an order that makes this impossible
1591 # sortsource should produce an order that makes this impossible
1597 raise error.ProgrammingError(
1592 raise error.ProgrammingError(
1598 b'rev %d should be rebased already at this time' % dest
1593 b'rev %d should be rebased already at this time' % dest
1599 )
1594 )
1600 result.append(adjusted)
1595 result.append(adjusted)
1601 return result
1596 return result
1602
1597
1603
1598
1604 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1599 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1605 """
1600 """
1606 Abort if rebase will create divergence or rebase is noop because of markers
1601 Abort if rebase will create divergence or rebase is noop because of markers
1607
1602
1608 `rebaseobsrevs`: set of obsolete revision in source
1603 `rebaseobsrevs`: set of obsolete revision in source
1609 `rebaseobsskipped`: set of revisions from source skipped because they have
1604 `rebaseobsskipped`: set of revisions from source skipped because they have
1610 successors in destination or no non-obsolete successor.
1605 successors in destination or no non-obsolete successor.
1611 """
1606 """
1612 # Obsolete node with successors not in dest leads to divergence
1607 # Obsolete node with successors not in dest leads to divergence
1613 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1608 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1614 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1609 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1615
1610
1616 if divergencebasecandidates and not divergenceok:
1611 if divergencebasecandidates and not divergenceok:
1617 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1612 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1618 msg = _(b"this rebase will cause divergences from: %s")
1613 msg = _(b"this rebase will cause divergences from: %s")
1619 h = _(
1614 h = _(
1620 b"to force the rebase please set "
1615 b"to force the rebase please set "
1621 b"experimental.evolution.allowdivergence=True"
1616 b"experimental.evolution.allowdivergence=True"
1622 )
1617 )
1623 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1618 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1624
1619
1625
1620
1626 def successorrevs(unfi, rev):
1621 def successorrevs(unfi, rev):
1627 """yield revision numbers for successors of rev"""
1622 """yield revision numbers for successors of rev"""
1628 assert unfi.filtername is None
1623 assert unfi.filtername is None
1629 get_rev = unfi.changelog.index.get_rev
1624 get_rev = unfi.changelog.index.get_rev
1630 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1625 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1631 r = get_rev(s)
1626 r = get_rev(s)
1632 if r is not None:
1627 if r is not None:
1633 yield r
1628 yield r
1634
1629
1635
1630
1636 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1631 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1637 """Return new parents and optionally a merge base for rev being rebased
1632 """Return new parents and optionally a merge base for rev being rebased
1638
1633
1639 The destination specified by "dest" cannot always be used directly because
1634 The destination specified by "dest" cannot always be used directly because
1640 previously rebase result could affect destination. For example,
1635 previously rebase result could affect destination. For example,
1641
1636
1642 D E rebase -r C+D+E -d B
1637 D E rebase -r C+D+E -d B
1643 |/ C will be rebased to C'
1638 |/ C will be rebased to C'
1644 B C D's new destination will be C' instead of B
1639 B C D's new destination will be C' instead of B
1645 |/ E's new destination will be C' instead of B
1640 |/ E's new destination will be C' instead of B
1646 A
1641 A
1647
1642
1648 The new parents of a merge is slightly more complicated. See the comment
1643 The new parents of a merge is slightly more complicated. See the comment
1649 block below.
1644 block below.
1650 """
1645 """
1651 # use unfiltered changelog since successorrevs may return filtered nodes
1646 # use unfiltered changelog since successorrevs may return filtered nodes
1652 assert repo.filtername is None
1647 assert repo.filtername is None
1653 cl = repo.changelog
1648 cl = repo.changelog
1654 isancestor = cl.isancestorrev
1649 isancestor = cl.isancestorrev
1655
1650
1656 dest = destmap[rev]
1651 dest = destmap[rev]
1657 oldps = repo.changelog.parentrevs(rev) # old parents
1652 oldps = repo.changelog.parentrevs(rev) # old parents
1658 newps = [nullrev, nullrev] # new parents
1653 newps = [nullrev, nullrev] # new parents
1659 dests = adjustdest(repo, rev, destmap, state, skipped)
1654 dests = adjustdest(repo, rev, destmap, state, skipped)
1660 bases = list(oldps) # merge base candidates, initially just old parents
1655 bases = list(oldps) # merge base candidates, initially just old parents
1661
1656
1662 if all(r == nullrev for r in oldps[1:]):
1657 if all(r == nullrev for r in oldps[1:]):
1663 # For non-merge changeset, just move p to adjusted dest as requested.
1658 # For non-merge changeset, just move p to adjusted dest as requested.
1664 newps[0] = dests[0]
1659 newps[0] = dests[0]
1665 else:
1660 else:
1666 # For merge changeset, if we move p to dests[i] unconditionally, both
1661 # For merge changeset, if we move p to dests[i] unconditionally, both
1667 # parents may change and the end result looks like "the merge loses a
1662 # parents may change and the end result looks like "the merge loses a
1668 # parent", which is a surprise. This is a limit because "--dest" only
1663 # parent", which is a surprise. This is a limit because "--dest" only
1669 # accepts one dest per src.
1664 # accepts one dest per src.
1670 #
1665 #
1671 # Therefore, only move p with reasonable conditions (in this order):
1666 # Therefore, only move p with reasonable conditions (in this order):
1672 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1667 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1673 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1668 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1674 #
1669 #
1675 # Comparing with adjustdest, the logic here does some additional work:
1670 # Comparing with adjustdest, the logic here does some additional work:
1676 # 1. decide which parents will not be moved towards dest
1671 # 1. decide which parents will not be moved towards dest
1677 # 2. if the above decision is "no", should a parent still be moved
1672 # 2. if the above decision is "no", should a parent still be moved
1678 # because it was rebased?
1673 # because it was rebased?
1679 #
1674 #
1680 # For example:
1675 # For example:
1681 #
1676 #
1682 # C # "rebase -r C -d D" is an error since none of the parents
1677 # C # "rebase -r C -d D" is an error since none of the parents
1683 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1678 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1684 # A B D # B (using rule "2."), since B will be rebased.
1679 # A B D # B (using rule "2."), since B will be rebased.
1685 #
1680 #
1686 # The loop tries to be not rely on the fact that a Mercurial node has
1681 # The loop tries to be not rely on the fact that a Mercurial node has
1687 # at most 2 parents.
1682 # at most 2 parents.
1688 for i, p in enumerate(oldps):
1683 for i, p in enumerate(oldps):
1689 np = p # new parent
1684 np = p # new parent
1690 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1685 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1691 np = dests[i]
1686 np = dests[i]
1692 elif p in state and state[p] > 0:
1687 elif p in state and state[p] > 0:
1693 np = state[p]
1688 np = state[p]
1694
1689
1695 # If one parent becomes an ancestor of the other, drop the ancestor
1690 # If one parent becomes an ancestor of the other, drop the ancestor
1696 for j, x in enumerate(newps[:i]):
1691 for j, x in enumerate(newps[:i]):
1697 if x == nullrev:
1692 if x == nullrev:
1698 continue
1693 continue
1699 if isancestor(np, x): # CASE-1
1694 if isancestor(np, x): # CASE-1
1700 np = nullrev
1695 np = nullrev
1701 elif isancestor(x, np): # CASE-2
1696 elif isancestor(x, np): # CASE-2
1702 newps[j] = np
1697 newps[j] = np
1703 np = nullrev
1698 np = nullrev
1704 # New parents forming an ancestor relationship does not
1699 # New parents forming an ancestor relationship does not
1705 # mean the old parents have a similar relationship. Do not
1700 # mean the old parents have a similar relationship. Do not
1706 # set bases[x] to nullrev.
1701 # set bases[x] to nullrev.
1707 bases[j], bases[i] = bases[i], bases[j]
1702 bases[j], bases[i] = bases[i], bases[j]
1708
1703
1709 newps[i] = np
1704 newps[i] = np
1710
1705
1711 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1706 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1712 # base. If only p2 changes, merging using unchanged p1 as merge base is
1707 # base. If only p2 changes, merging using unchanged p1 as merge base is
1713 # suboptimal. Therefore swap parents to make the merge sane.
1708 # suboptimal. Therefore swap parents to make the merge sane.
1714 if newps[1] != nullrev and oldps[0] == newps[0]:
1709 if newps[1] != nullrev and oldps[0] == newps[0]:
1715 assert len(newps) == 2 and len(oldps) == 2
1710 assert len(newps) == 2 and len(oldps) == 2
1716 newps.reverse()
1711 newps.reverse()
1717 bases.reverse()
1712 bases.reverse()
1718
1713
1719 # No parent change might be an error because we fail to make rev a
1714 # No parent change might be an error because we fail to make rev a
1720 # descendent of requested dest. This can happen, for example:
1715 # descendent of requested dest. This can happen, for example:
1721 #
1716 #
1722 # C # rebase -r C -d D
1717 # C # rebase -r C -d D
1723 # /| # None of A and B will be changed to D and rebase fails.
1718 # /| # None of A and B will be changed to D and rebase fails.
1724 # A B D
1719 # A B D
1725 if set(newps) == set(oldps) and dest not in newps:
1720 if set(newps) == set(oldps) and dest not in newps:
1726 raise error.Abort(
1721 raise error.Abort(
1727 _(
1722 _(
1728 b'cannot rebase %d:%s without '
1723 b'cannot rebase %d:%s without '
1729 b'moving at least one of its parents'
1724 b'moving at least one of its parents'
1730 )
1725 )
1731 % (rev, repo[rev])
1726 % (rev, repo[rev])
1732 )
1727 )
1733
1728
1734 # Source should not be ancestor of dest. The check here guarantees it's
1729 # Source should not be ancestor of dest. The check here guarantees it's
1735 # impossible. With multi-dest, the initial check does not cover complex
1730 # impossible. With multi-dest, the initial check does not cover complex
1736 # cases since we don't have abstractions to dry-run rebase cheaply.
1731 # cases since we don't have abstractions to dry-run rebase cheaply.
1737 if any(p != nullrev and isancestor(rev, p) for p in newps):
1732 if any(p != nullrev and isancestor(rev, p) for p in newps):
1738 raise error.Abort(_(b'source is ancestor of destination'))
1733 raise error.Abort(_(b'source is ancestor of destination'))
1739
1734
1740 # Check if the merge will contain unwanted changes. That may happen if
1735 # Check if the merge will contain unwanted changes. That may happen if
1741 # there are multiple special (non-changelog ancestor) merge bases, which
1736 # there are multiple special (non-changelog ancestor) merge bases, which
1742 # cannot be handled well by the 3-way merge algorithm. For example:
1737 # cannot be handled well by the 3-way merge algorithm. For example:
1743 #
1738 #
1744 # F
1739 # F
1745 # /|
1740 # /|
1746 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1741 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1747 # | | # as merge base, the difference between D and F will include
1742 # | | # as merge base, the difference between D and F will include
1748 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1743 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1749 # |/ # chosen, the rebased F will contain B.
1744 # |/ # chosen, the rebased F will contain B.
1750 # A Z
1745 # A Z
1751 #
1746 #
1752 # But our merge base candidates (D and E in above case) could still be
1747 # But our merge base candidates (D and E in above case) could still be
1753 # better than the default (ancestor(F, Z) == null). Therefore still
1748 # better than the default (ancestor(F, Z) == null). Therefore still
1754 # pick one (so choose p1 above).
1749 # pick one (so choose p1 above).
1755 if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1:
1750 if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1:
1756 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1751 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1757 for i, base in enumerate(bases):
1752 for i, base in enumerate(bases):
1758 if base == nullrev or base in newps:
1753 if base == nullrev or base in newps:
1759 continue
1754 continue
1760 # Revisions in the side (not chosen as merge base) branch that
1755 # Revisions in the side (not chosen as merge base) branch that
1761 # might contain "surprising" contents
1756 # might contain "surprising" contents
1762 other_bases = set(bases) - {base}
1757 other_bases = set(bases) - {base}
1763 siderevs = list(
1758 siderevs = list(
1764 repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
1759 repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
1765 )
1760 )
1766
1761
1767 # If those revisions are covered by rebaseset, the result is good.
1762 # If those revisions are covered by rebaseset, the result is good.
1768 # A merge in rebaseset would be considered to cover its ancestors.
1763 # A merge in rebaseset would be considered to cover its ancestors.
1769 if siderevs:
1764 if siderevs:
1770 rebaseset = [
1765 rebaseset = [
1771 r for r, d in state.items() if d > 0 and r not in obsskipped
1766 r for r, d in state.items() if d > 0 and r not in obsskipped
1772 ]
1767 ]
1773 merges = [
1768 merges = [
1774 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1769 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1775 ]
1770 ]
1776 unwanted[i] = list(
1771 unwanted[i] = list(
1777 repo.revs(
1772 repo.revs(
1778 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1773 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1779 )
1774 )
1780 )
1775 )
1781
1776
1782 if any(revs is not None for revs in unwanted):
1777 if any(revs is not None for revs in unwanted):
1783 # Choose a merge base that has a minimal number of unwanted revs.
1778 # Choose a merge base that has a minimal number of unwanted revs.
1784 l, i = min(
1779 l, i = min(
1785 (len(revs), i)
1780 (len(revs), i)
1786 for i, revs in enumerate(unwanted)
1781 for i, revs in enumerate(unwanted)
1787 if revs is not None
1782 if revs is not None
1788 )
1783 )
1789
1784
1790 # The merge will include unwanted revisions. Abort now. Revisit this if
1785 # The merge will include unwanted revisions. Abort now. Revisit this if
1791 # we have a more advanced merge algorithm that handles multiple bases.
1786 # we have a more advanced merge algorithm that handles multiple bases.
1792 if l > 0:
1787 if l > 0:
1793 unwanteddesc = _(b' or ').join(
1788 unwanteddesc = _(b' or ').join(
1794 (
1789 (
1795 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1790 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1796 for revs in unwanted
1791 for revs in unwanted
1797 if revs is not None
1792 if revs is not None
1798 )
1793 )
1799 )
1794 )
1800 raise error.Abort(
1795 raise error.Abort(
1801 _(b'rebasing %d:%s will include unwanted changes from %s')
1796 _(b'rebasing %d:%s will include unwanted changes from %s')
1802 % (rev, repo[rev], unwanteddesc)
1797 % (rev, repo[rev], unwanteddesc)
1803 )
1798 )
1804
1799
1805 # newps[0] should match merge base if possible. Currently, if newps[i]
1800 # newps[0] should match merge base if possible. Currently, if newps[i]
1806 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1801 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1807 # the other's ancestor. In that case, it's fine to not swap newps here.
1802 # the other's ancestor. In that case, it's fine to not swap newps here.
1808 # (see CASE-1 and CASE-2 above)
1803 # (see CASE-1 and CASE-2 above)
1809 if i != 0:
1804 if i != 0:
1810 if newps[i] != nullrev:
1805 if newps[i] != nullrev:
1811 newps[0], newps[i] = newps[i], newps[0]
1806 newps[0], newps[i] = newps[i], newps[0]
1812 bases[0], bases[i] = bases[i], bases[0]
1807 bases[0], bases[i] = bases[i], bases[0]
1813
1808
1814 # "rebasenode" updates to new p1, use the corresponding merge base.
1809 # "rebasenode" updates to new p1, use the corresponding merge base.
1815 base = bases[0]
1810 base = bases[0]
1816
1811
1817 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1812 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1818
1813
1819 return newps[0], newps[1], base
1814 return newps[0], newps[1], base
1820
1815
1821
1816
1822 def isagitpatch(repo, patchname):
1817 def isagitpatch(repo, patchname):
1823 """Return true if the given patch is in git format"""
1818 """Return true if the given patch is in git format"""
1824 mqpatch = os.path.join(repo.mq.path, patchname)
1819 mqpatch = os.path.join(repo.mq.path, patchname)
1825 for line in patch.linereader(open(mqpatch, b'rb')):
1820 for line in patch.linereader(open(mqpatch, b'rb')):
1826 if line.startswith(b'diff --git'):
1821 if line.startswith(b'diff --git'):
1827 return True
1822 return True
1828 return False
1823 return False
1829
1824
1830
1825
1831 def updatemq(repo, state, skipped, **opts):
1826 def updatemq(repo, state, skipped, **opts):
1832 """Update rebased mq patches - finalize and then import them"""
1827 """Update rebased mq patches - finalize and then import them"""
1833 mqrebase = {}
1828 mqrebase = {}
1834 mq = repo.mq
1829 mq = repo.mq
1835 original_series = mq.fullseries[:]
1830 original_series = mq.fullseries[:]
1836 skippedpatches = set()
1831 skippedpatches = set()
1837
1832
1838 for p in mq.applied:
1833 for p in mq.applied:
1839 rev = repo[p.node].rev()
1834 rev = repo[p.node].rev()
1840 if rev in state:
1835 if rev in state:
1841 repo.ui.debug(
1836 repo.ui.debug(
1842 b'revision %d is an mq patch (%s), finalize it.\n'
1837 b'revision %d is an mq patch (%s), finalize it.\n'
1843 % (rev, p.name)
1838 % (rev, p.name)
1844 )
1839 )
1845 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1840 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1846 else:
1841 else:
1847 # Applied but not rebased, not sure this should happen
1842 # Applied but not rebased, not sure this should happen
1848 skippedpatches.add(p.name)
1843 skippedpatches.add(p.name)
1849
1844
1850 if mqrebase:
1845 if mqrebase:
1851 mq.finish(repo, mqrebase.keys())
1846 mq.finish(repo, mqrebase.keys())
1852
1847
1853 # We must start import from the newest revision
1848 # We must start import from the newest revision
1854 for rev in sorted(mqrebase, reverse=True):
1849 for rev in sorted(mqrebase, reverse=True):
1855 if rev not in skipped:
1850 if rev not in skipped:
1856 name, isgit = mqrebase[rev]
1851 name, isgit = mqrebase[rev]
1857 repo.ui.note(
1852 repo.ui.note(
1858 _(b'updating mq patch %s to %d:%s\n')
1853 _(b'updating mq patch %s to %d:%s\n')
1859 % (name, state[rev], repo[state[rev]])
1854 % (name, state[rev], repo[state[rev]])
1860 )
1855 )
1861 mq.qimport(
1856 mq.qimport(
1862 repo,
1857 repo,
1863 (),
1858 (),
1864 patchname=name,
1859 patchname=name,
1865 git=isgit,
1860 git=isgit,
1866 rev=[b"%d" % state[rev]],
1861 rev=[b"%d" % state[rev]],
1867 )
1862 )
1868 else:
1863 else:
1869 # Rebased and skipped
1864 # Rebased and skipped
1870 skippedpatches.add(mqrebase[rev][0])
1865 skippedpatches.add(mqrebase[rev][0])
1871
1866
1872 # Patches were either applied and rebased and imported in
1867 # Patches were either applied and rebased and imported in
1873 # order, applied and removed or unapplied. Discard the removed
1868 # order, applied and removed or unapplied. Discard the removed
1874 # ones while preserving the original series order and guards.
1869 # ones while preserving the original series order and guards.
1875 newseries = [
1870 newseries = [
1876 s
1871 s
1877 for s in original_series
1872 for s in original_series
1878 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1873 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1879 ]
1874 ]
1880 mq.fullseries[:] = newseries
1875 mq.fullseries[:] = newseries
1881 mq.seriesdirty = True
1876 mq.seriesdirty = True
1882 mq.savedirty()
1877 mq.savedirty()
1883
1878
1884
1879
1885 def storecollapsemsg(repo, collapsemsg):
1880 def storecollapsemsg(repo, collapsemsg):
1886 """Store the collapse message to allow recovery"""
1881 """Store the collapse message to allow recovery"""
1887 collapsemsg = collapsemsg or b''
1882 collapsemsg = collapsemsg or b''
1888 f = repo.vfs(b"last-message.txt", b"w")
1883 f = repo.vfs(b"last-message.txt", b"w")
1889 f.write(b"%s\n" % collapsemsg)
1884 f.write(b"%s\n" % collapsemsg)
1890 f.close()
1885 f.close()
1891
1886
1892
1887
1893 def clearcollapsemsg(repo):
1888 def clearcollapsemsg(repo):
1894 """Remove collapse message file"""
1889 """Remove collapse message file"""
1895 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1890 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1896
1891
1897
1892
1898 def restorecollapsemsg(repo, isabort):
1893 def restorecollapsemsg(repo, isabort):
1899 """Restore previously stored collapse message"""
1894 """Restore previously stored collapse message"""
1900 try:
1895 try:
1901 f = repo.vfs(b"last-message.txt")
1896 f = repo.vfs(b"last-message.txt")
1902 collapsemsg = f.readline().strip()
1897 collapsemsg = f.readline().strip()
1903 f.close()
1898 f.close()
1904 except IOError as err:
1899 except IOError as err:
1905 if err.errno != errno.ENOENT:
1900 if err.errno != errno.ENOENT:
1906 raise
1901 raise
1907 if isabort:
1902 if isabort:
1908 # Oh well, just abort like normal
1903 # Oh well, just abort like normal
1909 collapsemsg = b''
1904 collapsemsg = b''
1910 else:
1905 else:
1911 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1906 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1912 return collapsemsg
1907 return collapsemsg
1913
1908
1914
1909
1915 def clearstatus(repo):
1910 def clearstatus(repo):
1916 """Remove the status files"""
1911 """Remove the status files"""
1917 # Make sure the active transaction won't write the state file
1912 # Make sure the active transaction won't write the state file
1918 tr = repo.currenttransaction()
1913 tr = repo.currenttransaction()
1919 if tr:
1914 if tr:
1920 tr.removefilegenerator(b'rebasestate')
1915 tr.removefilegenerator(b'rebasestate')
1921 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1916 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1922
1917
1923
1918
1924 def sortsource(destmap):
1919 def sortsource(destmap):
1925 """yield source revisions in an order that we only rebase things once
1920 """yield source revisions in an order that we only rebase things once
1926
1921
1927 If source and destination overlaps, we should filter out revisions
1922 If source and destination overlaps, we should filter out revisions
1928 depending on other revisions which hasn't been rebased yet.
1923 depending on other revisions which hasn't been rebased yet.
1929
1924
1930 Yield a sorted list of revisions each time.
1925 Yield a sorted list of revisions each time.
1931
1926
1932 For example, when rebasing A to B, B to C. This function yields [B], then
1927 For example, when rebasing A to B, B to C. This function yields [B], then
1933 [A], indicating B needs to be rebased first.
1928 [A], indicating B needs to be rebased first.
1934
1929
1935 Raise if there is a cycle so the rebase is impossible.
1930 Raise if there is a cycle so the rebase is impossible.
1936 """
1931 """
1937 srcset = set(destmap)
1932 srcset = set(destmap)
1938 while srcset:
1933 while srcset:
1939 srclist = sorted(srcset)
1934 srclist = sorted(srcset)
1940 result = []
1935 result = []
1941 for r in srclist:
1936 for r in srclist:
1942 if destmap[r] not in srcset:
1937 if destmap[r] not in srcset:
1943 result.append(r)
1938 result.append(r)
1944 if not result:
1939 if not result:
1945 raise error.Abort(_(b'source and destination form a cycle'))
1940 raise error.Abort(_(b'source and destination form a cycle'))
1946 srcset -= set(result)
1941 srcset -= set(result)
1947 yield result
1942 yield result
1948
1943
1949
1944
1950 def buildstate(repo, destmap, collapse):
1945 def buildstate(repo, destmap, collapse):
1951 '''Define which revisions are going to be rebased and where
1946 '''Define which revisions are going to be rebased and where
1952
1947
1953 repo: repo
1948 repo: repo
1954 destmap: {srcrev: destrev}
1949 destmap: {srcrev: destrev}
1955 '''
1950 '''
1956 rebaseset = destmap.keys()
1951 rebaseset = destmap.keys()
1957 originalwd = repo[b'.'].rev()
1952 originalwd = repo[b'.'].rev()
1958
1953
1959 # This check isn't strictly necessary, since mq detects commits over an
1954 # This check isn't strictly necessary, since mq detects commits over an
1960 # applied patch. But it prevents messing up the working directory when
1955 # applied patch. But it prevents messing up the working directory when
1961 # a partially completed rebase is blocked by mq.
1956 # a partially completed rebase is blocked by mq.
1962 if b'qtip' in repo.tags():
1957 if b'qtip' in repo.tags():
1963 mqapplied = {repo[s.node].rev() for s in repo.mq.applied}
1958 mqapplied = {repo[s.node].rev() for s in repo.mq.applied}
1964 if set(destmap.values()) & mqapplied:
1959 if set(destmap.values()) & mqapplied:
1965 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1960 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1966
1961
1967 # Get "cycle" error early by exhausting the generator.
1962 # Get "cycle" error early by exhausting the generator.
1968 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1963 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1969 if not sortedsrc:
1964 if not sortedsrc:
1970 raise error.Abort(_(b'no matching revisions'))
1965 raise error.Abort(_(b'no matching revisions'))
1971
1966
1972 # Only check the first batch of revisions to rebase not depending on other
1967 # Only check the first batch of revisions to rebase not depending on other
1973 # rebaseset. This means "source is ancestor of destination" for the second
1968 # rebaseset. This means "source is ancestor of destination" for the second
1974 # (and following) batches of revisions are not checked here. We rely on
1969 # (and following) batches of revisions are not checked here. We rely on
1975 # "defineparents" to do that check.
1970 # "defineparents" to do that check.
1976 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1971 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1977 if not roots:
1972 if not roots:
1978 raise error.Abort(_(b'no matching revisions'))
1973 raise error.Abort(_(b'no matching revisions'))
1979
1974
1980 def revof(r):
1975 def revof(r):
1981 return r.rev()
1976 return r.rev()
1982
1977
1983 roots = sorted(roots, key=revof)
1978 roots = sorted(roots, key=revof)
1984 state = dict.fromkeys(rebaseset, revtodo)
1979 state = dict.fromkeys(rebaseset, revtodo)
1985 emptyrebase = len(sortedsrc) == 1
1980 emptyrebase = len(sortedsrc) == 1
1986 for root in roots:
1981 for root in roots:
1987 dest = repo[destmap[root.rev()]]
1982 dest = repo[destmap[root.rev()]]
1988 commonbase = root.ancestor(dest)
1983 commonbase = root.ancestor(dest)
1989 if commonbase == root:
1984 if commonbase == root:
1990 raise error.Abort(_(b'source is ancestor of destination'))
1985 raise error.Abort(_(b'source is ancestor of destination'))
1991 if commonbase == dest:
1986 if commonbase == dest:
1992 wctx = repo[None]
1987 wctx = repo[None]
1993 if dest == wctx.p1():
1988 if dest == wctx.p1():
1994 # when rebasing to '.', it will use the current wd branch name
1989 # when rebasing to '.', it will use the current wd branch name
1995 samebranch = root.branch() == wctx.branch()
1990 samebranch = root.branch() == wctx.branch()
1996 else:
1991 else:
1997 samebranch = root.branch() == dest.branch()
1992 samebranch = root.branch() == dest.branch()
1998 if not collapse and samebranch and dest in root.parents():
1993 if not collapse and samebranch and dest in root.parents():
1999 # mark the revision as done by setting its new revision
1994 # mark the revision as done by setting its new revision
2000 # equal to its old (current) revisions
1995 # equal to its old (current) revisions
2001 state[root.rev()] = root.rev()
1996 state[root.rev()] = root.rev()
2002 repo.ui.debug(b'source is a child of destination\n')
1997 repo.ui.debug(b'source is a child of destination\n')
2003 continue
1998 continue
2004
1999
2005 emptyrebase = False
2000 emptyrebase = False
2006 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
2001 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
2007 if emptyrebase:
2002 if emptyrebase:
2008 return None
2003 return None
2009 for rev in sorted(state):
2004 for rev in sorted(state):
2010 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
2005 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
2011 # if all parents of this revision are done, then so is this revision
2006 # if all parents of this revision are done, then so is this revision
2012 if parents and all((state.get(p) == p for p in parents)):
2007 if parents and all((state.get(p) == p for p in parents)):
2013 state[rev] = rev
2008 state[rev] = rev
2014 return originalwd, destmap, state
2009 return originalwd, destmap, state
2015
2010
2016
2011
2017 def clearrebased(
2012 def clearrebased(
2018 ui,
2013 ui,
2019 repo,
2014 repo,
2020 destmap,
2015 destmap,
2021 state,
2016 state,
2022 skipped,
2017 skipped,
2023 collapsedas=None,
2018 collapsedas=None,
2024 keepf=False,
2019 keepf=False,
2025 fm=None,
2020 fm=None,
2026 backup=True,
2021 backup=True,
2027 ):
2022 ):
2028 """dispose of rebased revision at the end of the rebase
2023 """dispose of rebased revision at the end of the rebase
2029
2024
2030 If `collapsedas` is not None, the rebase was a collapse whose result if the
2025 If `collapsedas` is not None, the rebase was a collapse whose result if the
2031 `collapsedas` node.
2026 `collapsedas` node.
2032
2027
2033 If `keepf` is not True, the rebase has --keep set and no nodes should be
2028 If `keepf` is not True, the rebase has --keep set and no nodes should be
2034 removed (but bookmarks still need to be moved).
2029 removed (but bookmarks still need to be moved).
2035
2030
2036 If `backup` is False, no backup will be stored when stripping rebased
2031 If `backup` is False, no backup will be stored when stripping rebased
2037 revisions.
2032 revisions.
2038 """
2033 """
2039 tonode = repo.changelog.node
2034 tonode = repo.changelog.node
2040 replacements = {}
2035 replacements = {}
2041 moves = {}
2036 moves = {}
2042 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2037 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2043
2038
2044 collapsednodes = []
2039 collapsednodes = []
2045 for rev, newrev in sorted(state.items()):
2040 for rev, newrev in sorted(state.items()):
2046 if newrev >= 0 and newrev != rev:
2041 if newrev >= 0 and newrev != rev:
2047 oldnode = tonode(rev)
2042 oldnode = tonode(rev)
2048 newnode = collapsedas or tonode(newrev)
2043 newnode = collapsedas or tonode(newrev)
2049 moves[oldnode] = newnode
2044 moves[oldnode] = newnode
2050 succs = None
2045 succs = None
2051 if rev in skipped:
2046 if rev in skipped:
2052 if stripcleanup or not repo[rev].obsolete():
2047 if stripcleanup or not repo[rev].obsolete():
2053 succs = ()
2048 succs = ()
2054 elif collapsedas:
2049 elif collapsedas:
2055 collapsednodes.append(oldnode)
2050 collapsednodes.append(oldnode)
2056 else:
2051 else:
2057 succs = (newnode,)
2052 succs = (newnode,)
2058 if succs is not None:
2053 if succs is not None:
2059 replacements[(oldnode,)] = succs
2054 replacements[(oldnode,)] = succs
2060 if collapsednodes:
2055 if collapsednodes:
2061 replacements[tuple(collapsednodes)] = (collapsedas,)
2056 replacements[tuple(collapsednodes)] = (collapsedas,)
2062 if fm:
2057 if fm:
2063 hf = fm.hexfunc
2058 hf = fm.hexfunc
2064 fl = fm.formatlist
2059 fl = fm.formatlist
2065 fd = fm.formatdict
2060 fd = fm.formatdict
2066 changes = {}
2061 changes = {}
2067 for oldns, newn in pycompat.iteritems(replacements):
2062 for oldns, newn in pycompat.iteritems(replacements):
2068 for oldn in oldns:
2063 for oldn in oldns:
2069 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2064 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2070 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2065 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2071 fm.data(nodechanges=nodechanges)
2066 fm.data(nodechanges=nodechanges)
2072 if keepf:
2067 if keepf:
2073 replacements = {}
2068 replacements = {}
2074 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2069 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2075
2070
2076
2071
2077 def pullrebase(orig, ui, repo, *args, **opts):
2072 def pullrebase(orig, ui, repo, *args, **opts):
2078 """Call rebase after pull if the latter has been invoked with --rebase"""
2073 """Call rebase after pull if the latter has been invoked with --rebase"""
2079 if opts.get('rebase'):
2074 if opts.get('rebase'):
2080 if ui.configbool(b'commands', b'rebase.requiredest'):
2075 if ui.configbool(b'commands', b'rebase.requiredest'):
2081 msg = _(b'rebase destination required by configuration')
2076 msg = _(b'rebase destination required by configuration')
2082 hint = _(b'use hg pull followed by hg rebase -d DEST')
2077 hint = _(b'use hg pull followed by hg rebase -d DEST')
2083 raise error.Abort(msg, hint=hint)
2078 raise error.Abort(msg, hint=hint)
2084
2079
2085 with repo.wlock(), repo.lock():
2080 with repo.wlock(), repo.lock():
2086 if opts.get('update'):
2081 if opts.get('update'):
2087 del opts['update']
2082 del opts['update']
2088 ui.debug(
2083 ui.debug(
2089 b'--update and --rebase are not compatible, ignoring '
2084 b'--update and --rebase are not compatible, ignoring '
2090 b'the update flag\n'
2085 b'the update flag\n'
2091 )
2086 )
2092
2087
2093 cmdutil.checkunfinished(repo, skipmerge=True)
2088 cmdutil.checkunfinished(repo, skipmerge=True)
2094 cmdutil.bailifchanged(
2089 cmdutil.bailifchanged(
2095 repo,
2090 repo,
2096 hint=_(
2091 hint=_(
2097 b'cannot pull with rebase: '
2092 b'cannot pull with rebase: '
2098 b'please commit or shelve your changes first'
2093 b'please commit or shelve your changes first'
2099 ),
2094 ),
2100 )
2095 )
2101
2096
2102 revsprepull = len(repo)
2097 revsprepull = len(repo)
2103 origpostincoming = commands.postincoming
2098 origpostincoming = commands.postincoming
2104
2099
2105 def _dummy(*args, **kwargs):
2100 def _dummy(*args, **kwargs):
2106 pass
2101 pass
2107
2102
2108 commands.postincoming = _dummy
2103 commands.postincoming = _dummy
2109 try:
2104 try:
2110 ret = orig(ui, repo, *args, **opts)
2105 ret = orig(ui, repo, *args, **opts)
2111 finally:
2106 finally:
2112 commands.postincoming = origpostincoming
2107 commands.postincoming = origpostincoming
2113 revspostpull = len(repo)
2108 revspostpull = len(repo)
2114 if revspostpull > revsprepull:
2109 if revspostpull > revsprepull:
2115 # --rev option from pull conflict with rebase own --rev
2110 # --rev option from pull conflict with rebase own --rev
2116 # dropping it
2111 # dropping it
2117 if 'rev' in opts:
2112 if 'rev' in opts:
2118 del opts['rev']
2113 del opts['rev']
2119 # positional argument from pull conflicts with rebase's own
2114 # positional argument from pull conflicts with rebase's own
2120 # --source.
2115 # --source.
2121 if 'source' in opts:
2116 if 'source' in opts:
2122 del opts['source']
2117 del opts['source']
2123 # revsprepull is the len of the repo, not revnum of tip.
2118 # revsprepull is the len of the repo, not revnum of tip.
2124 destspace = list(repo.changelog.revs(start=revsprepull))
2119 destspace = list(repo.changelog.revs(start=revsprepull))
2125 opts['_destspace'] = destspace
2120 opts['_destspace'] = destspace
2126 try:
2121 try:
2127 rebase(ui, repo, **opts)
2122 rebase(ui, repo, **opts)
2128 except error.NoMergeDestAbort:
2123 except error.NoMergeDestAbort:
2129 # we can maybe update instead
2124 # we can maybe update instead
2130 rev, _a, _b = destutil.destupdate(repo)
2125 rev, _a, _b = destutil.destupdate(repo)
2131 if rev == repo[b'.'].rev():
2126 if rev == repo[b'.'].rev():
2132 ui.status(_(b'nothing to rebase\n'))
2127 ui.status(_(b'nothing to rebase\n'))
2133 else:
2128 else:
2134 ui.status(_(b'nothing to rebase - updating instead\n'))
2129 ui.status(_(b'nothing to rebase - updating instead\n'))
2135 # not passing argument to get the bare update behavior
2130 # not passing argument to get the bare update behavior
2136 # with warning and trumpets
2131 # with warning and trumpets
2137 commands.update(ui, repo)
2132 commands.update(ui, repo)
2138 else:
2133 else:
2139 if opts.get('tool'):
2134 if opts.get('tool'):
2140 raise error.Abort(_(b'--tool can only be used with --rebase'))
2135 raise error.Abort(_(b'--tool can only be used with --rebase'))
2141 ret = orig(ui, repo, *args, **opts)
2136 ret = orig(ui, repo, *args, **opts)
2142
2137
2143 return ret
2138 return ret
2144
2139
2145
2140
2146 def _filterobsoleterevs(repo, revs):
2141 def _filterobsoleterevs(repo, revs):
2147 """returns a set of the obsolete revisions in revs"""
2142 """returns a set of the obsolete revisions in revs"""
2148 return {r for r in revs if repo[r].obsolete()}
2143 return {r for r in revs if repo[r].obsolete()}
2149
2144
2150
2145
2151 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2146 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2152 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2147 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2153
2148
2154 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2149 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2155 obsolete nodes to be rebased given in `rebaseobsrevs`.
2150 obsolete nodes to be rebased given in `rebaseobsrevs`.
2156
2151
2157 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2152 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2158 without a successor in destination.
2153 without a successor in destination.
2159
2154
2160 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2155 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2161 obsolete successors.
2156 obsolete successors.
2162 """
2157 """
2163 obsoletenotrebased = {}
2158 obsoletenotrebased = {}
2164 obsoletewithoutsuccessorindestination = set()
2159 obsoletewithoutsuccessorindestination = set()
2165 obsoleteextinctsuccessors = set()
2160 obsoleteextinctsuccessors = set()
2166
2161
2167 assert repo.filtername is None
2162 assert repo.filtername is None
2168 cl = repo.changelog
2163 cl = repo.changelog
2169 get_rev = cl.index.get_rev
2164 get_rev = cl.index.get_rev
2170 extinctrevs = set(repo.revs(b'extinct()'))
2165 extinctrevs = set(repo.revs(b'extinct()'))
2171 for srcrev in rebaseobsrevs:
2166 for srcrev in rebaseobsrevs:
2172 srcnode = cl.node(srcrev)
2167 srcnode = cl.node(srcrev)
2173 # XXX: more advanced APIs are required to handle split correctly
2168 # XXX: more advanced APIs are required to handle split correctly
2174 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2169 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2175 # obsutil.allsuccessors includes node itself
2170 # obsutil.allsuccessors includes node itself
2176 successors.remove(srcnode)
2171 successors.remove(srcnode)
2177 succrevs = {get_rev(s) for s in successors}
2172 succrevs = {get_rev(s) for s in successors}
2178 succrevs.discard(None)
2173 succrevs.discard(None)
2179 if succrevs.issubset(extinctrevs):
2174 if succrevs.issubset(extinctrevs):
2180 # all successors are extinct
2175 # all successors are extinct
2181 obsoleteextinctsuccessors.add(srcrev)
2176 obsoleteextinctsuccessors.add(srcrev)
2182 if not successors:
2177 if not successors:
2183 # no successor
2178 # no successor
2184 obsoletenotrebased[srcrev] = None
2179 obsoletenotrebased[srcrev] = None
2185 else:
2180 else:
2186 dstrev = destmap[srcrev]
2181 dstrev = destmap[srcrev]
2187 for succrev in succrevs:
2182 for succrev in succrevs:
2188 if cl.isancestorrev(succrev, dstrev):
2183 if cl.isancestorrev(succrev, dstrev):
2189 obsoletenotrebased[srcrev] = succrev
2184 obsoletenotrebased[srcrev] = succrev
2190 break
2185 break
2191 else:
2186 else:
2192 # If 'srcrev' has a successor in rebase set but none in
2187 # If 'srcrev' has a successor in rebase set but none in
2193 # destination (which would be catched above), we shall skip it
2188 # destination (which would be catched above), we shall skip it
2194 # and its descendants to avoid divergence.
2189 # and its descendants to avoid divergence.
2195 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2190 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2196 obsoletewithoutsuccessorindestination.add(srcrev)
2191 obsoletewithoutsuccessorindestination.add(srcrev)
2197
2192
2198 return (
2193 return (
2199 obsoletenotrebased,
2194 obsoletenotrebased,
2200 obsoletewithoutsuccessorindestination,
2195 obsoletewithoutsuccessorindestination,
2201 obsoleteextinctsuccessors,
2196 obsoleteextinctsuccessors,
2202 )
2197 )
2203
2198
2204
2199
2205 def abortrebase(ui, repo):
2200 def abortrebase(ui, repo):
2206 with repo.wlock(), repo.lock():
2201 with repo.wlock(), repo.lock():
2207 rbsrt = rebaseruntime(repo, ui)
2202 rbsrt = rebaseruntime(repo, ui)
2208 rbsrt._prepareabortorcontinue(isabort=True)
2203 rbsrt._prepareabortorcontinue(isabort=True)
2209
2204
2210
2205
2211 def continuerebase(ui, repo):
2206 def continuerebase(ui, repo):
2212 with repo.wlock(), repo.lock():
2207 with repo.wlock(), repo.lock():
2213 rbsrt = rebaseruntime(repo, ui)
2208 rbsrt = rebaseruntime(repo, ui)
2214 ms = mergestatemod.mergestate.read(repo)
2209 ms = mergestatemod.mergestate.read(repo)
2215 mergeutil.checkunresolved(ms)
2210 mergeutil.checkunresolved(ms)
2216 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2211 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2217 if retcode is not None:
2212 if retcode is not None:
2218 return retcode
2213 return retcode
2219 rbsrt._performrebase(None)
2214 rbsrt._performrebase(None)
2220 rbsrt._finishrebase()
2215 rbsrt._finishrebase()
2221
2216
2222
2217
2223 def summaryhook(ui, repo):
2218 def summaryhook(ui, repo):
2224 if not repo.vfs.exists(b'rebasestate'):
2219 if not repo.vfs.exists(b'rebasestate'):
2225 return
2220 return
2226 try:
2221 try:
2227 rbsrt = rebaseruntime(repo, ui, {})
2222 rbsrt = rebaseruntime(repo, ui, {})
2228 rbsrt.restorestatus()
2223 rbsrt.restorestatus()
2229 state = rbsrt.state
2224 state = rbsrt.state
2230 except error.RepoLookupError:
2225 except error.RepoLookupError:
2231 # i18n: column positioning for "hg summary"
2226 # i18n: column positioning for "hg summary"
2232 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2227 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2233 ui.write(msg)
2228 ui.write(msg)
2234 return
2229 return
2235 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2230 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2236 # i18n: column positioning for "hg summary"
2231 # i18n: column positioning for "hg summary"
2237 ui.write(
2232 ui.write(
2238 _(b'rebase: %s, %s (rebase --continue)\n')
2233 _(b'rebase: %s, %s (rebase --continue)\n')
2239 % (
2234 % (
2240 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2235 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2241 ui.label(_(b'%d remaining'), b'rebase.remaining')
2236 ui.label(_(b'%d remaining'), b'rebase.remaining')
2242 % (len(state) - numrebased),
2237 % (len(state) - numrebased),
2243 )
2238 )
2244 )
2239 )
2245
2240
2246
2241
2247 def uisetup(ui):
2242 def uisetup(ui):
2248 # Replace pull with a decorator to provide --rebase option
2243 # Replace pull with a decorator to provide --rebase option
2249 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2244 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2250 entry[1].append(
2245 entry[1].append(
2251 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2246 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2252 )
2247 )
2253 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2248 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2254 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2249 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2255 statemod.addunfinished(
2250 statemod.addunfinished(
2256 b'rebase',
2251 b'rebase',
2257 fname=b'rebasestate',
2252 fname=b'rebasestate',
2258 stopflag=True,
2253 stopflag=True,
2259 continueflag=True,
2254 continueflag=True,
2260 abortfunc=abortrebase,
2255 abortfunc=abortrebase,
2261 continuefunc=continuerebase,
2256 continuefunc=continuerebase,
2262 )
2257 )
@@ -1,438 +1,454
1 # error.py - Mercurial exceptions
1 # error.py - Mercurial exceptions
2 #
2 #
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Mercurial exceptions.
8 """Mercurial exceptions.
9
9
10 This allows us to catch exceptions at higher levels without forcing
10 This allows us to catch exceptions at higher levels without forcing
11 imports.
11 imports.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 # Do not import anything but pycompat here, please
16 # Do not import anything but pycompat here, please
17 from . import pycompat
17 from . import pycompat
18
18
19
19
20 def _tobytes(exc):
20 def _tobytes(exc):
21 """Byte-stringify exception in the same way as BaseException_str()"""
21 """Byte-stringify exception in the same way as BaseException_str()"""
22 if not exc.args:
22 if not exc.args:
23 return b''
23 return b''
24 if len(exc.args) == 1:
24 if len(exc.args) == 1:
25 return pycompat.bytestr(exc.args[0])
25 return pycompat.bytestr(exc.args[0])
26 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
26 return b'(%s)' % b', '.join(b"'%s'" % pycompat.bytestr(a) for a in exc.args)
27
27
28
28
29 class Hint(object):
29 class Hint(object):
30 """Mix-in to provide a hint of an error
30 """Mix-in to provide a hint of an error
31
31
32 This should come first in the inheritance list to consume a hint and
32 This should come first in the inheritance list to consume a hint and
33 pass remaining arguments to the exception class.
33 pass remaining arguments to the exception class.
34 """
34 """
35
35
36 def __init__(self, *args, **kw):
36 def __init__(self, *args, **kw):
37 self.hint = kw.pop('hint', None)
37 self.hint = kw.pop('hint', None)
38 super(Hint, self).__init__(*args, **kw)
38 super(Hint, self).__init__(*args, **kw)
39
39
40
40
41 class StorageError(Hint, Exception):
41 class StorageError(Hint, Exception):
42 """Raised when an error occurs in a storage layer.
42 """Raised when an error occurs in a storage layer.
43
43
44 Usually subclassed by a storage-specific exception.
44 Usually subclassed by a storage-specific exception.
45 """
45 """
46
46
47 __bytes__ = _tobytes
47 __bytes__ = _tobytes
48
48
49
49
50 class RevlogError(StorageError):
50 class RevlogError(StorageError):
51 __bytes__ = _tobytes
51 __bytes__ = _tobytes
52
52
53
53
54 class SidedataHashError(RevlogError):
54 class SidedataHashError(RevlogError):
55 def __init__(self, key, expected, got):
55 def __init__(self, key, expected, got):
56 self.sidedatakey = key
56 self.sidedatakey = key
57 self.expecteddigest = expected
57 self.expecteddigest = expected
58 self.actualdigest = got
58 self.actualdigest = got
59
59
60
60
61 class FilteredIndexError(IndexError):
61 class FilteredIndexError(IndexError):
62 __bytes__ = _tobytes
62 __bytes__ = _tobytes
63
63
64
64
65 class LookupError(RevlogError, KeyError):
65 class LookupError(RevlogError, KeyError):
66 def __init__(self, name, index, message):
66 def __init__(self, name, index, message):
67 self.name = name
67 self.name = name
68 self.index = index
68 self.index = index
69 # this can't be called 'message' because at least some installs of
69 # this can't be called 'message' because at least some installs of
70 # Python 2.6+ complain about the 'message' property being deprecated
70 # Python 2.6+ complain about the 'message' property being deprecated
71 self.lookupmessage = message
71 self.lookupmessage = message
72 if isinstance(name, bytes) and len(name) == 20:
72 if isinstance(name, bytes) and len(name) == 20:
73 from .node import short
73 from .node import short
74
74
75 name = short(name)
75 name = short(name)
76 RevlogError.__init__(self, b'%s@%s: %s' % (index, name, message))
76 RevlogError.__init__(self, b'%s@%s: %s' % (index, name, message))
77
77
78 def __bytes__(self):
78 def __bytes__(self):
79 return RevlogError.__bytes__(self)
79 return RevlogError.__bytes__(self)
80
80
81 def __str__(self):
81 def __str__(self):
82 return RevlogError.__str__(self)
82 return RevlogError.__str__(self)
83
83
84
84
85 class AmbiguousPrefixLookupError(LookupError):
85 class AmbiguousPrefixLookupError(LookupError):
86 pass
86 pass
87
87
88
88
89 class FilteredLookupError(LookupError):
89 class FilteredLookupError(LookupError):
90 pass
90 pass
91
91
92
92
93 class ManifestLookupError(LookupError):
93 class ManifestLookupError(LookupError):
94 pass
94 pass
95
95
96
96
97 class CommandError(Exception):
97 class CommandError(Exception):
98 """Exception raised on errors in parsing the command line."""
98 """Exception raised on errors in parsing the command line."""
99
99
100 __bytes__ = _tobytes
100 __bytes__ = _tobytes
101
101
102
102
103 class InterventionRequired(Hint, Exception):
103 class InterventionRequired(Hint, Exception):
104 """Exception raised when a command requires human intervention."""
104 """Exception raised when a command requires human intervention."""
105
105
106 __bytes__ = _tobytes
106 __bytes__ = _tobytes
107
107
108
108
109 class ConflictResolutionRequired(InterventionRequired):
110 """Exception raised when a continuable command required merge conflict resolution."""
111
112 def __init__(self, opname):
113 from .i18n import _
114
115 self.opname = opname
116 InterventionRequired.__init__(
117 self,
118 _(
119 b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
120 )
121 % opname,
122 )
123
124
109 class Abort(Hint, Exception):
125 class Abort(Hint, Exception):
110 """Raised if a command needs to print an error and exit."""
126 """Raised if a command needs to print an error and exit."""
111
127
112 __bytes__ = _tobytes
128 __bytes__ = _tobytes
113
129
114 if pycompat.ispy3:
130 if pycompat.ispy3:
115
131
116 def __str__(self):
132 def __str__(self):
117 # the output would be unreadable if the message was translated,
133 # the output would be unreadable if the message was translated,
118 # but do not replace it with encoding.strfromlocal(), which
134 # but do not replace it with encoding.strfromlocal(), which
119 # may raise another exception.
135 # may raise another exception.
120 return pycompat.sysstr(self.__bytes__())
136 return pycompat.sysstr(self.__bytes__())
121
137
122
138
123 class HookLoadError(Abort):
139 class HookLoadError(Abort):
124 """raised when loading a hook fails, aborting an operation
140 """raised when loading a hook fails, aborting an operation
125
141
126 Exists to allow more specialized catching."""
142 Exists to allow more specialized catching."""
127
143
128
144
129 class HookAbort(Abort):
145 class HookAbort(Abort):
130 """raised when a validation hook fails, aborting an operation
146 """raised when a validation hook fails, aborting an operation
131
147
132 Exists to allow more specialized catching."""
148 Exists to allow more specialized catching."""
133
149
134
150
135 class ConfigError(Abort):
151 class ConfigError(Abort):
136 """Exception raised when parsing config files"""
152 """Exception raised when parsing config files"""
137
153
138
154
139 class UpdateAbort(Abort):
155 class UpdateAbort(Abort):
140 """Raised when an update is aborted for destination issue"""
156 """Raised when an update is aborted for destination issue"""
141
157
142
158
143 class MergeDestAbort(Abort):
159 class MergeDestAbort(Abort):
144 """Raised when an update is aborted for destination issues"""
160 """Raised when an update is aborted for destination issues"""
145
161
146
162
147 class NoMergeDestAbort(MergeDestAbort):
163 class NoMergeDestAbort(MergeDestAbort):
148 """Raised when an update is aborted because there is nothing to merge"""
164 """Raised when an update is aborted because there is nothing to merge"""
149
165
150
166
151 class ManyMergeDestAbort(MergeDestAbort):
167 class ManyMergeDestAbort(MergeDestAbort):
152 """Raised when an update is aborted because destination is ambiguous"""
168 """Raised when an update is aborted because destination is ambiguous"""
153
169
154
170
155 class ResponseExpected(Abort):
171 class ResponseExpected(Abort):
156 """Raised when an EOF is received for a prompt"""
172 """Raised when an EOF is received for a prompt"""
157
173
158 def __init__(self):
174 def __init__(self):
159 from .i18n import _
175 from .i18n import _
160
176
161 Abort.__init__(self, _(b'response expected'))
177 Abort.__init__(self, _(b'response expected'))
162
178
163
179
164 class OutOfBandError(Hint, Exception):
180 class OutOfBandError(Hint, Exception):
165 """Exception raised when a remote repo reports failure"""
181 """Exception raised when a remote repo reports failure"""
166
182
167 __bytes__ = _tobytes
183 __bytes__ = _tobytes
168
184
169
185
170 class ParseError(Hint, Exception):
186 class ParseError(Hint, Exception):
171 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
187 """Raised when parsing config files and {rev,file}sets (msg[, pos])"""
172
188
173 __bytes__ = _tobytes
189 __bytes__ = _tobytes
174
190
175
191
176 class PatchError(Exception):
192 class PatchError(Exception):
177 __bytes__ = _tobytes
193 __bytes__ = _tobytes
178
194
179
195
180 class UnknownIdentifier(ParseError):
196 class UnknownIdentifier(ParseError):
181 """Exception raised when a {rev,file}set references an unknown identifier"""
197 """Exception raised when a {rev,file}set references an unknown identifier"""
182
198
183 def __init__(self, function, symbols):
199 def __init__(self, function, symbols):
184 from .i18n import _
200 from .i18n import _
185
201
186 ParseError.__init__(self, _(b"unknown identifier: %s") % function)
202 ParseError.__init__(self, _(b"unknown identifier: %s") % function)
187 self.function = function
203 self.function = function
188 self.symbols = symbols
204 self.symbols = symbols
189
205
190
206
191 class RepoError(Hint, Exception):
207 class RepoError(Hint, Exception):
192 __bytes__ = _tobytes
208 __bytes__ = _tobytes
193
209
194
210
195 class RepoLookupError(RepoError):
211 class RepoLookupError(RepoError):
196 pass
212 pass
197
213
198
214
199 class FilteredRepoLookupError(RepoLookupError):
215 class FilteredRepoLookupError(RepoLookupError):
200 pass
216 pass
201
217
202
218
203 class CapabilityError(RepoError):
219 class CapabilityError(RepoError):
204 pass
220 pass
205
221
206
222
207 class RequirementError(RepoError):
223 class RequirementError(RepoError):
208 """Exception raised if .hg/requires has an unknown entry."""
224 """Exception raised if .hg/requires has an unknown entry."""
209
225
210
226
211 class StdioError(IOError):
227 class StdioError(IOError):
212 """Raised if I/O to stdout or stderr fails"""
228 """Raised if I/O to stdout or stderr fails"""
213
229
214 def __init__(self, err):
230 def __init__(self, err):
215 IOError.__init__(self, err.errno, err.strerror)
231 IOError.__init__(self, err.errno, err.strerror)
216
232
217 # no __bytes__() because error message is derived from the standard IOError
233 # no __bytes__() because error message is derived from the standard IOError
218
234
219
235
220 class UnsupportedMergeRecords(Abort):
236 class UnsupportedMergeRecords(Abort):
221 def __init__(self, recordtypes):
237 def __init__(self, recordtypes):
222 from .i18n import _
238 from .i18n import _
223
239
224 self.recordtypes = sorted(recordtypes)
240 self.recordtypes = sorted(recordtypes)
225 s = b' '.join(self.recordtypes)
241 s = b' '.join(self.recordtypes)
226 Abort.__init__(
242 Abort.__init__(
227 self,
243 self,
228 _(b'unsupported merge state records: %s') % s,
244 _(b'unsupported merge state records: %s') % s,
229 hint=_(
245 hint=_(
230 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
246 b'see https://mercurial-scm.org/wiki/MergeStateRecords for '
231 b'more information'
247 b'more information'
232 ),
248 ),
233 )
249 )
234
250
235
251
236 class UnknownVersion(Abort):
252 class UnknownVersion(Abort):
237 """generic exception for aborting from an encounter with an unknown version
253 """generic exception for aborting from an encounter with an unknown version
238 """
254 """
239
255
240 def __init__(self, msg, hint=None, version=None):
256 def __init__(self, msg, hint=None, version=None):
241 self.version = version
257 self.version = version
242 super(UnknownVersion, self).__init__(msg, hint=hint)
258 super(UnknownVersion, self).__init__(msg, hint=hint)
243
259
244
260
245 class LockError(IOError):
261 class LockError(IOError):
246 def __init__(self, errno, strerror, filename, desc):
262 def __init__(self, errno, strerror, filename, desc):
247 IOError.__init__(self, errno, strerror, filename)
263 IOError.__init__(self, errno, strerror, filename)
248 self.desc = desc
264 self.desc = desc
249
265
250 # no __bytes__() because error message is derived from the standard IOError
266 # no __bytes__() because error message is derived from the standard IOError
251
267
252
268
253 class LockHeld(LockError):
269 class LockHeld(LockError):
254 def __init__(self, errno, filename, desc, locker):
270 def __init__(self, errno, filename, desc, locker):
255 LockError.__init__(self, errno, b'Lock held', filename, desc)
271 LockError.__init__(self, errno, b'Lock held', filename, desc)
256 self.locker = locker
272 self.locker = locker
257
273
258
274
259 class LockUnavailable(LockError):
275 class LockUnavailable(LockError):
260 pass
276 pass
261
277
262
278
263 # LockError is for errors while acquiring the lock -- this is unrelated
279 # LockError is for errors while acquiring the lock -- this is unrelated
264 class LockInheritanceContractViolation(RuntimeError):
280 class LockInheritanceContractViolation(RuntimeError):
265 __bytes__ = _tobytes
281 __bytes__ = _tobytes
266
282
267
283
268 class ResponseError(Exception):
284 class ResponseError(Exception):
269 """Raised to print an error with part of output and exit."""
285 """Raised to print an error with part of output and exit."""
270
286
271 __bytes__ = _tobytes
287 __bytes__ = _tobytes
272
288
273
289
274 class UnknownCommand(Exception):
290 class UnknownCommand(Exception):
275 """Exception raised if command is not in the command table."""
291 """Exception raised if command is not in the command table."""
276
292
277 __bytes__ = _tobytes
293 __bytes__ = _tobytes
278
294
279
295
280 class AmbiguousCommand(Exception):
296 class AmbiguousCommand(Exception):
281 """Exception raised if command shortcut matches more than one command."""
297 """Exception raised if command shortcut matches more than one command."""
282
298
283 __bytes__ = _tobytes
299 __bytes__ = _tobytes
284
300
285
301
286 # derived from KeyboardInterrupt to simplify some breakout code
302 # derived from KeyboardInterrupt to simplify some breakout code
287 class SignalInterrupt(KeyboardInterrupt):
303 class SignalInterrupt(KeyboardInterrupt):
288 """Exception raised on SIGTERM and SIGHUP."""
304 """Exception raised on SIGTERM and SIGHUP."""
289
305
290
306
291 class SignatureError(Exception):
307 class SignatureError(Exception):
292 __bytes__ = _tobytes
308 __bytes__ = _tobytes
293
309
294
310
295 class PushRaced(RuntimeError):
311 class PushRaced(RuntimeError):
296 """An exception raised during unbundling that indicate a push race"""
312 """An exception raised during unbundling that indicate a push race"""
297
313
298 __bytes__ = _tobytes
314 __bytes__ = _tobytes
299
315
300
316
301 class ProgrammingError(Hint, RuntimeError):
317 class ProgrammingError(Hint, RuntimeError):
302 """Raised if a mercurial (core or extension) developer made a mistake"""
318 """Raised if a mercurial (core or extension) developer made a mistake"""
303
319
304 def __init__(self, msg, *args, **kwargs):
320 def __init__(self, msg, *args, **kwargs):
305 # On Python 3, turn the message back into a string since this is
321 # On Python 3, turn the message back into a string since this is
306 # an internal-only error that won't be printed except in a
322 # an internal-only error that won't be printed except in a
307 # stack traces.
323 # stack traces.
308 msg = pycompat.sysstr(msg)
324 msg = pycompat.sysstr(msg)
309 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
325 super(ProgrammingError, self).__init__(msg, *args, **kwargs)
310
326
311 __bytes__ = _tobytes
327 __bytes__ = _tobytes
312
328
313
329
314 class WdirUnsupported(Exception):
330 class WdirUnsupported(Exception):
315 """An exception which is raised when 'wdir()' is not supported"""
331 """An exception which is raised when 'wdir()' is not supported"""
316
332
317 __bytes__ = _tobytes
333 __bytes__ = _tobytes
318
334
319
335
320 # bundle2 related errors
336 # bundle2 related errors
321 class BundleValueError(ValueError):
337 class BundleValueError(ValueError):
322 """error raised when bundle2 cannot be processed"""
338 """error raised when bundle2 cannot be processed"""
323
339
324 __bytes__ = _tobytes
340 __bytes__ = _tobytes
325
341
326
342
327 class BundleUnknownFeatureError(BundleValueError):
343 class BundleUnknownFeatureError(BundleValueError):
328 def __init__(self, parttype=None, params=(), values=()):
344 def __init__(self, parttype=None, params=(), values=()):
329 self.parttype = parttype
345 self.parttype = parttype
330 self.params = params
346 self.params = params
331 self.values = values
347 self.values = values
332 if self.parttype is None:
348 if self.parttype is None:
333 msg = b'Stream Parameter'
349 msg = b'Stream Parameter'
334 else:
350 else:
335 msg = parttype
351 msg = parttype
336 entries = self.params
352 entries = self.params
337 if self.params and self.values:
353 if self.params and self.values:
338 assert len(self.params) == len(self.values)
354 assert len(self.params) == len(self.values)
339 entries = []
355 entries = []
340 for idx, par in enumerate(self.params):
356 for idx, par in enumerate(self.params):
341 val = self.values[idx]
357 val = self.values[idx]
342 if val is None:
358 if val is None:
343 entries.append(val)
359 entries.append(val)
344 else:
360 else:
345 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
361 entries.append(b"%s=%r" % (par, pycompat.maybebytestr(val)))
346 if entries:
362 if entries:
347 msg = b'%s - %s' % (msg, b', '.join(entries))
363 msg = b'%s - %s' % (msg, b', '.join(entries))
348 ValueError.__init__(self, msg)
364 ValueError.__init__(self, msg)
349
365
350
366
351 class ReadOnlyPartError(RuntimeError):
367 class ReadOnlyPartError(RuntimeError):
352 """error raised when code tries to alter a part being generated"""
368 """error raised when code tries to alter a part being generated"""
353
369
354 __bytes__ = _tobytes
370 __bytes__ = _tobytes
355
371
356
372
357 class PushkeyFailed(Abort):
373 class PushkeyFailed(Abort):
358 """error raised when a pushkey part failed to update a value"""
374 """error raised when a pushkey part failed to update a value"""
359
375
360 def __init__(
376 def __init__(
361 self, partid, namespace=None, key=None, new=None, old=None, ret=None
377 self, partid, namespace=None, key=None, new=None, old=None, ret=None
362 ):
378 ):
363 self.partid = partid
379 self.partid = partid
364 self.namespace = namespace
380 self.namespace = namespace
365 self.key = key
381 self.key = key
366 self.new = new
382 self.new = new
367 self.old = old
383 self.old = old
368 self.ret = ret
384 self.ret = ret
369 # no i18n expected to be processed into a better message
385 # no i18n expected to be processed into a better message
370 Abort.__init__(
386 Abort.__init__(
371 self, b'failed to update value for "%s/%s"' % (namespace, key)
387 self, b'failed to update value for "%s/%s"' % (namespace, key)
372 )
388 )
373
389
374
390
375 class CensoredNodeError(StorageError):
391 class CensoredNodeError(StorageError):
376 """error raised when content verification fails on a censored node
392 """error raised when content verification fails on a censored node
377
393
378 Also contains the tombstone data substituted for the uncensored data.
394 Also contains the tombstone data substituted for the uncensored data.
379 """
395 """
380
396
381 def __init__(self, filename, node, tombstone):
397 def __init__(self, filename, node, tombstone):
382 from .node import short
398 from .node import short
383
399
384 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
400 StorageError.__init__(self, b'%s:%s' % (filename, short(node)))
385 self.tombstone = tombstone
401 self.tombstone = tombstone
386
402
387
403
388 class CensoredBaseError(StorageError):
404 class CensoredBaseError(StorageError):
389 """error raised when a delta is rejected because its base is censored
405 """error raised when a delta is rejected because its base is censored
390
406
391 A delta based on a censored revision must be formed as single patch
407 A delta based on a censored revision must be formed as single patch
392 operation which replaces the entire base with new content. This ensures
408 operation which replaces the entire base with new content. This ensures
393 the delta may be applied by clones which have not censored the base.
409 the delta may be applied by clones which have not censored the base.
394 """
410 """
395
411
396
412
397 class InvalidBundleSpecification(Exception):
413 class InvalidBundleSpecification(Exception):
398 """error raised when a bundle specification is invalid.
414 """error raised when a bundle specification is invalid.
399
415
400 This is used for syntax errors as opposed to support errors.
416 This is used for syntax errors as opposed to support errors.
401 """
417 """
402
418
403 __bytes__ = _tobytes
419 __bytes__ = _tobytes
404
420
405
421
406 class UnsupportedBundleSpecification(Exception):
422 class UnsupportedBundleSpecification(Exception):
407 """error raised when a bundle specification is not supported."""
423 """error raised when a bundle specification is not supported."""
408
424
409 __bytes__ = _tobytes
425 __bytes__ = _tobytes
410
426
411
427
412 class CorruptedState(Exception):
428 class CorruptedState(Exception):
413 """error raised when a command is not able to read its state from file"""
429 """error raised when a command is not able to read its state from file"""
414
430
415 __bytes__ = _tobytes
431 __bytes__ = _tobytes
416
432
417
433
418 class PeerTransportError(Abort):
434 class PeerTransportError(Abort):
419 """Transport-level I/O error when communicating with a peer repo."""
435 """Transport-level I/O error when communicating with a peer repo."""
420
436
421
437
422 class InMemoryMergeConflictsError(Exception):
438 class InMemoryMergeConflictsError(Exception):
423 """Exception raised when merge conflicts arose during an in-memory merge."""
439 """Exception raised when merge conflicts arose during an in-memory merge."""
424
440
425 __bytes__ = _tobytes
441 __bytes__ = _tobytes
426
442
427
443
428 class WireprotoCommandError(Exception):
444 class WireprotoCommandError(Exception):
429 """Represents an error during execution of a wire protocol command.
445 """Represents an error during execution of a wire protocol command.
430
446
431 Should only be thrown by wire protocol version 2 commands.
447 Should only be thrown by wire protocol version 2 commands.
432
448
433 The error is a formatter string and an optional iterable of arguments.
449 The error is a formatter string and an optional iterable of arguments.
434 """
450 """
435
451
436 def __init__(self, message, args=None):
452 def __init__(self, message, args=None):
437 self.message = message
453 self.message = message
438 self.messageargs = args
454 self.messageargs = args
@@ -1,1179 +1,1174
1 # shelve.py - save/restore working directory state
1 # shelve.py - save/restore working directory state
2 #
2 #
3 # Copyright 2013 Facebook, Inc.
3 # Copyright 2013 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """save and restore changes to the working directory
8 """save and restore changes to the working directory
9
9
10 The "hg shelve" command saves changes made to the working directory
10 The "hg shelve" command saves changes made to the working directory
11 and reverts those changes, resetting the working directory to a clean
11 and reverts those changes, resetting the working directory to a clean
12 state.
12 state.
13
13
14 Later on, the "hg unshelve" command restores the changes saved by "hg
14 Later on, the "hg unshelve" command restores the changes saved by "hg
15 shelve". Changes can be restored even after updating to a different
15 shelve". Changes can be restored even after updating to a different
16 parent, in which case Mercurial's merge machinery will resolve any
16 parent, in which case Mercurial's merge machinery will resolve any
17 conflicts if necessary.
17 conflicts if necessary.
18
18
19 You can have more than one shelved change outstanding at a time; each
19 You can have more than one shelved change outstanding at a time; each
20 shelved change has a distinct name. For details, see the help for "hg
20 shelved change has a distinct name. For details, see the help for "hg
21 shelve".
21 shelve".
22 """
22 """
23 from __future__ import absolute_import
23 from __future__ import absolute_import
24
24
25 import collections
25 import collections
26 import errno
26 import errno
27 import itertools
27 import itertools
28 import stat
28 import stat
29
29
30 from .i18n import _
30 from .i18n import _
31 from .pycompat import open
31 from .pycompat import open
32 from . import (
32 from . import (
33 bookmarks,
33 bookmarks,
34 bundle2,
34 bundle2,
35 bundlerepo,
35 bundlerepo,
36 changegroup,
36 changegroup,
37 cmdutil,
37 cmdutil,
38 discovery,
38 discovery,
39 error,
39 error,
40 exchange,
40 exchange,
41 hg,
41 hg,
42 lock as lockmod,
42 lock as lockmod,
43 mdiff,
43 mdiff,
44 merge,
44 merge,
45 mergestate as mergestatemod,
45 mergestate as mergestatemod,
46 node as nodemod,
46 node as nodemod,
47 patch,
47 patch,
48 phases,
48 phases,
49 pycompat,
49 pycompat,
50 repair,
50 repair,
51 scmutil,
51 scmutil,
52 templatefilters,
52 templatefilters,
53 util,
53 util,
54 vfs as vfsmod,
54 vfs as vfsmod,
55 )
55 )
56 from .utils import (
56 from .utils import (
57 dateutil,
57 dateutil,
58 stringutil,
58 stringutil,
59 )
59 )
60
60
61 backupdir = b'shelve-backup'
61 backupdir = b'shelve-backup'
62 shelvedir = b'shelved'
62 shelvedir = b'shelved'
63 shelvefileextensions = [b'hg', b'patch', b'shelve']
63 shelvefileextensions = [b'hg', b'patch', b'shelve']
64 # universal extension is present in all types of shelves
64 # universal extension is present in all types of shelves
65 patchextension = b'patch'
65 patchextension = b'patch'
66
66
67 # we never need the user, so we use a
67 # we never need the user, so we use a
68 # generic user for all shelve operations
68 # generic user for all shelve operations
69 shelveuser = b'shelve@localhost'
69 shelveuser = b'shelve@localhost'
70
70
71
71
72 class shelvedfile(object):
72 class shelvedfile(object):
73 """Helper for the file storing a single shelve
73 """Helper for the file storing a single shelve
74
74
75 Handles common functions on shelve files (.hg/.patch) using
75 Handles common functions on shelve files (.hg/.patch) using
76 the vfs layer"""
76 the vfs layer"""
77
77
78 def __init__(self, repo, name, filetype=None):
78 def __init__(self, repo, name, filetype=None):
79 self.repo = repo
79 self.repo = repo
80 self.name = name
80 self.name = name
81 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
81 self.vfs = vfsmod.vfs(repo.vfs.join(shelvedir))
82 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
82 self.backupvfs = vfsmod.vfs(repo.vfs.join(backupdir))
83 self.ui = self.repo.ui
83 self.ui = self.repo.ui
84 if filetype:
84 if filetype:
85 self.fname = name + b'.' + filetype
85 self.fname = name + b'.' + filetype
86 else:
86 else:
87 self.fname = name
87 self.fname = name
88
88
89 def exists(self):
89 def exists(self):
90 return self.vfs.exists(self.fname)
90 return self.vfs.exists(self.fname)
91
91
92 def filename(self):
92 def filename(self):
93 return self.vfs.join(self.fname)
93 return self.vfs.join(self.fname)
94
94
95 def backupfilename(self):
95 def backupfilename(self):
96 def gennames(base):
96 def gennames(base):
97 yield base
97 yield base
98 base, ext = base.rsplit(b'.', 1)
98 base, ext = base.rsplit(b'.', 1)
99 for i in itertools.count(1):
99 for i in itertools.count(1):
100 yield b'%s-%d.%s' % (base, i, ext)
100 yield b'%s-%d.%s' % (base, i, ext)
101
101
102 name = self.backupvfs.join(self.fname)
102 name = self.backupvfs.join(self.fname)
103 for n in gennames(name):
103 for n in gennames(name):
104 if not self.backupvfs.exists(n):
104 if not self.backupvfs.exists(n):
105 return n
105 return n
106
106
107 def movetobackup(self):
107 def movetobackup(self):
108 if not self.backupvfs.isdir():
108 if not self.backupvfs.isdir():
109 self.backupvfs.makedir()
109 self.backupvfs.makedir()
110 util.rename(self.filename(), self.backupfilename())
110 util.rename(self.filename(), self.backupfilename())
111
111
112 def stat(self):
112 def stat(self):
113 return self.vfs.stat(self.fname)
113 return self.vfs.stat(self.fname)
114
114
115 def opener(self, mode=b'rb'):
115 def opener(self, mode=b'rb'):
116 try:
116 try:
117 return self.vfs(self.fname, mode)
117 return self.vfs(self.fname, mode)
118 except IOError as err:
118 except IOError as err:
119 if err.errno != errno.ENOENT:
119 if err.errno != errno.ENOENT:
120 raise
120 raise
121 raise error.Abort(_(b"shelved change '%s' not found") % self.name)
121 raise error.Abort(_(b"shelved change '%s' not found") % self.name)
122
122
123 def applybundle(self, tr):
123 def applybundle(self, tr):
124 fp = self.opener()
124 fp = self.opener()
125 try:
125 try:
126 targetphase = phases.internal
126 targetphase = phases.internal
127 if not phases.supportinternal(self.repo):
127 if not phases.supportinternal(self.repo):
128 targetphase = phases.secret
128 targetphase = phases.secret
129 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
129 gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
130 pretip = self.repo[b'tip']
130 pretip = self.repo[b'tip']
131 bundle2.applybundle(
131 bundle2.applybundle(
132 self.repo,
132 self.repo,
133 gen,
133 gen,
134 tr,
134 tr,
135 source=b'unshelve',
135 source=b'unshelve',
136 url=b'bundle:' + self.vfs.join(self.fname),
136 url=b'bundle:' + self.vfs.join(self.fname),
137 targetphase=targetphase,
137 targetphase=targetphase,
138 )
138 )
139 shelvectx = self.repo[b'tip']
139 shelvectx = self.repo[b'tip']
140 if pretip == shelvectx:
140 if pretip == shelvectx:
141 shelverev = tr.changes[b'revduplicates'][-1]
141 shelverev = tr.changes[b'revduplicates'][-1]
142 shelvectx = self.repo[shelverev]
142 shelvectx = self.repo[shelverev]
143 return shelvectx
143 return shelvectx
144 finally:
144 finally:
145 fp.close()
145 fp.close()
146
146
147 def bundlerepo(self):
147 def bundlerepo(self):
148 path = self.vfs.join(self.fname)
148 path = self.vfs.join(self.fname)
149 return bundlerepo.instance(
149 return bundlerepo.instance(
150 self.repo.baseui, b'bundle://%s+%s' % (self.repo.root, path), False
150 self.repo.baseui, b'bundle://%s+%s' % (self.repo.root, path), False
151 )
151 )
152
152
153 def writebundle(self, bases, node):
153 def writebundle(self, bases, node):
154 cgversion = changegroup.safeversion(self.repo)
154 cgversion = changegroup.safeversion(self.repo)
155 if cgversion == b'01':
155 if cgversion == b'01':
156 btype = b'HG10BZ'
156 btype = b'HG10BZ'
157 compression = None
157 compression = None
158 else:
158 else:
159 btype = b'HG20'
159 btype = b'HG20'
160 compression = b'BZ'
160 compression = b'BZ'
161
161
162 repo = self.repo.unfiltered()
162 repo = self.repo.unfiltered()
163
163
164 outgoing = discovery.outgoing(
164 outgoing = discovery.outgoing(
165 repo, missingroots=bases, ancestorsof=[node]
165 repo, missingroots=bases, ancestorsof=[node]
166 )
166 )
167 cg = changegroup.makechangegroup(repo, outgoing, cgversion, b'shelve')
167 cg = changegroup.makechangegroup(repo, outgoing, cgversion, b'shelve')
168
168
169 bundle2.writebundle(
169 bundle2.writebundle(
170 self.ui, cg, self.fname, btype, self.vfs, compression=compression
170 self.ui, cg, self.fname, btype, self.vfs, compression=compression
171 )
171 )
172
172
173 def writeinfo(self, info):
173 def writeinfo(self, info):
174 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
174 scmutil.simplekeyvaluefile(self.vfs, self.fname).write(info)
175
175
176 def readinfo(self):
176 def readinfo(self):
177 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
177 return scmutil.simplekeyvaluefile(self.vfs, self.fname).read()
178
178
179
179
180 class shelvedstate(object):
180 class shelvedstate(object):
181 """Handle persistence during unshelving operations.
181 """Handle persistence during unshelving operations.
182
182
183 Handles saving and restoring a shelved state. Ensures that different
183 Handles saving and restoring a shelved state. Ensures that different
184 versions of a shelved state are possible and handles them appropriately.
184 versions of a shelved state are possible and handles them appropriately.
185 """
185 """
186
186
187 _version = 2
187 _version = 2
188 _filename = b'shelvedstate'
188 _filename = b'shelvedstate'
189 _keep = b'keep'
189 _keep = b'keep'
190 _nokeep = b'nokeep'
190 _nokeep = b'nokeep'
191 # colon is essential to differentiate from a real bookmark name
191 # colon is essential to differentiate from a real bookmark name
192 _noactivebook = b':no-active-bookmark'
192 _noactivebook = b':no-active-bookmark'
193 _interactive = b'interactive'
193 _interactive = b'interactive'
194
194
195 @classmethod
195 @classmethod
196 def _verifyandtransform(cls, d):
196 def _verifyandtransform(cls, d):
197 """Some basic shelvestate syntactic verification and transformation"""
197 """Some basic shelvestate syntactic verification and transformation"""
198 try:
198 try:
199 d[b'originalwctx'] = nodemod.bin(d[b'originalwctx'])
199 d[b'originalwctx'] = nodemod.bin(d[b'originalwctx'])
200 d[b'pendingctx'] = nodemod.bin(d[b'pendingctx'])
200 d[b'pendingctx'] = nodemod.bin(d[b'pendingctx'])
201 d[b'parents'] = [nodemod.bin(h) for h in d[b'parents'].split(b' ')]
201 d[b'parents'] = [nodemod.bin(h) for h in d[b'parents'].split(b' ')]
202 d[b'nodestoremove'] = [
202 d[b'nodestoremove'] = [
203 nodemod.bin(h) for h in d[b'nodestoremove'].split(b' ')
203 nodemod.bin(h) for h in d[b'nodestoremove'].split(b' ')
204 ]
204 ]
205 except (ValueError, TypeError, KeyError) as err:
205 except (ValueError, TypeError, KeyError) as err:
206 raise error.CorruptedState(pycompat.bytestr(err))
206 raise error.CorruptedState(pycompat.bytestr(err))
207
207
208 @classmethod
208 @classmethod
209 def _getversion(cls, repo):
209 def _getversion(cls, repo):
210 """Read version information from shelvestate file"""
210 """Read version information from shelvestate file"""
211 fp = repo.vfs(cls._filename)
211 fp = repo.vfs(cls._filename)
212 try:
212 try:
213 version = int(fp.readline().strip())
213 version = int(fp.readline().strip())
214 except ValueError as err:
214 except ValueError as err:
215 raise error.CorruptedState(pycompat.bytestr(err))
215 raise error.CorruptedState(pycompat.bytestr(err))
216 finally:
216 finally:
217 fp.close()
217 fp.close()
218 return version
218 return version
219
219
220 @classmethod
220 @classmethod
221 def _readold(cls, repo):
221 def _readold(cls, repo):
222 """Read the old position-based version of a shelvestate file"""
222 """Read the old position-based version of a shelvestate file"""
223 # Order is important, because old shelvestate file uses it
223 # Order is important, because old shelvestate file uses it
224 # to detemine values of fields (i.g. name is on the second line,
224 # to detemine values of fields (i.g. name is on the second line,
225 # originalwctx is on the third and so forth). Please do not change.
225 # originalwctx is on the third and so forth). Please do not change.
226 keys = [
226 keys = [
227 b'version',
227 b'version',
228 b'name',
228 b'name',
229 b'originalwctx',
229 b'originalwctx',
230 b'pendingctx',
230 b'pendingctx',
231 b'parents',
231 b'parents',
232 b'nodestoremove',
232 b'nodestoremove',
233 b'branchtorestore',
233 b'branchtorestore',
234 b'keep',
234 b'keep',
235 b'activebook',
235 b'activebook',
236 ]
236 ]
237 # this is executed only seldomly, so it is not a big deal
237 # this is executed only seldomly, so it is not a big deal
238 # that we open this file twice
238 # that we open this file twice
239 fp = repo.vfs(cls._filename)
239 fp = repo.vfs(cls._filename)
240 d = {}
240 d = {}
241 try:
241 try:
242 for key in keys:
242 for key in keys:
243 d[key] = fp.readline().strip()
243 d[key] = fp.readline().strip()
244 finally:
244 finally:
245 fp.close()
245 fp.close()
246 return d
246 return d
247
247
248 @classmethod
248 @classmethod
249 def load(cls, repo):
249 def load(cls, repo):
250 version = cls._getversion(repo)
250 version = cls._getversion(repo)
251 if version < cls._version:
251 if version < cls._version:
252 d = cls._readold(repo)
252 d = cls._readold(repo)
253 elif version == cls._version:
253 elif version == cls._version:
254 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename).read(
254 d = scmutil.simplekeyvaluefile(repo.vfs, cls._filename).read(
255 firstlinenonkeyval=True
255 firstlinenonkeyval=True
256 )
256 )
257 else:
257 else:
258 raise error.Abort(
258 raise error.Abort(
259 _(
259 _(
260 b'this version of shelve is incompatible '
260 b'this version of shelve is incompatible '
261 b'with the version used in this repo'
261 b'with the version used in this repo'
262 )
262 )
263 )
263 )
264
264
265 cls._verifyandtransform(d)
265 cls._verifyandtransform(d)
266 try:
266 try:
267 obj = cls()
267 obj = cls()
268 obj.name = d[b'name']
268 obj.name = d[b'name']
269 obj.wctx = repo[d[b'originalwctx']]
269 obj.wctx = repo[d[b'originalwctx']]
270 obj.pendingctx = repo[d[b'pendingctx']]
270 obj.pendingctx = repo[d[b'pendingctx']]
271 obj.parents = d[b'parents']
271 obj.parents = d[b'parents']
272 obj.nodestoremove = d[b'nodestoremove']
272 obj.nodestoremove = d[b'nodestoremove']
273 obj.branchtorestore = d.get(b'branchtorestore', b'')
273 obj.branchtorestore = d.get(b'branchtorestore', b'')
274 obj.keep = d.get(b'keep') == cls._keep
274 obj.keep = d.get(b'keep') == cls._keep
275 obj.activebookmark = b''
275 obj.activebookmark = b''
276 if d.get(b'activebook', b'') != cls._noactivebook:
276 if d.get(b'activebook', b'') != cls._noactivebook:
277 obj.activebookmark = d.get(b'activebook', b'')
277 obj.activebookmark = d.get(b'activebook', b'')
278 obj.interactive = d.get(b'interactive') == cls._interactive
278 obj.interactive = d.get(b'interactive') == cls._interactive
279 except (error.RepoLookupError, KeyError) as err:
279 except (error.RepoLookupError, KeyError) as err:
280 raise error.CorruptedState(pycompat.bytestr(err))
280 raise error.CorruptedState(pycompat.bytestr(err))
281
281
282 return obj
282 return obj
283
283
284 @classmethod
284 @classmethod
285 def save(
285 def save(
286 cls,
286 cls,
287 repo,
287 repo,
288 name,
288 name,
289 originalwctx,
289 originalwctx,
290 pendingctx,
290 pendingctx,
291 nodestoremove,
291 nodestoremove,
292 branchtorestore,
292 branchtorestore,
293 keep=False,
293 keep=False,
294 activebook=b'',
294 activebook=b'',
295 interactive=False,
295 interactive=False,
296 ):
296 ):
297 info = {
297 info = {
298 b"name": name,
298 b"name": name,
299 b"originalwctx": nodemod.hex(originalwctx.node()),
299 b"originalwctx": nodemod.hex(originalwctx.node()),
300 b"pendingctx": nodemod.hex(pendingctx.node()),
300 b"pendingctx": nodemod.hex(pendingctx.node()),
301 b"parents": b' '.join(
301 b"parents": b' '.join(
302 [nodemod.hex(p) for p in repo.dirstate.parents()]
302 [nodemod.hex(p) for p in repo.dirstate.parents()]
303 ),
303 ),
304 b"nodestoremove": b' '.join(
304 b"nodestoremove": b' '.join(
305 [nodemod.hex(n) for n in nodestoremove]
305 [nodemod.hex(n) for n in nodestoremove]
306 ),
306 ),
307 b"branchtorestore": branchtorestore,
307 b"branchtorestore": branchtorestore,
308 b"keep": cls._keep if keep else cls._nokeep,
308 b"keep": cls._keep if keep else cls._nokeep,
309 b"activebook": activebook or cls._noactivebook,
309 b"activebook": activebook or cls._noactivebook,
310 }
310 }
311 if interactive:
311 if interactive:
312 info[b'interactive'] = cls._interactive
312 info[b'interactive'] = cls._interactive
313 scmutil.simplekeyvaluefile(repo.vfs, cls._filename).write(
313 scmutil.simplekeyvaluefile(repo.vfs, cls._filename).write(
314 info, firstline=(b"%d" % cls._version)
314 info, firstline=(b"%d" % cls._version)
315 )
315 )
316
316
317 @classmethod
317 @classmethod
318 def clear(cls, repo):
318 def clear(cls, repo):
319 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
319 repo.vfs.unlinkpath(cls._filename, ignoremissing=True)
320
320
321
321
322 def cleanupoldbackups(repo):
322 def cleanupoldbackups(repo):
323 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
323 vfs = vfsmod.vfs(repo.vfs.join(backupdir))
324 maxbackups = repo.ui.configint(b'shelve', b'maxbackups')
324 maxbackups = repo.ui.configint(b'shelve', b'maxbackups')
325 hgfiles = [f for f in vfs.listdir() if f.endswith(b'.' + patchextension)]
325 hgfiles = [f for f in vfs.listdir() if f.endswith(b'.' + patchextension)]
326 hgfiles = sorted([(vfs.stat(f)[stat.ST_MTIME], f) for f in hgfiles])
326 hgfiles = sorted([(vfs.stat(f)[stat.ST_MTIME], f) for f in hgfiles])
327 if maxbackups > 0 and maxbackups < len(hgfiles):
327 if maxbackups > 0 and maxbackups < len(hgfiles):
328 bordermtime = hgfiles[-maxbackups][0]
328 bordermtime = hgfiles[-maxbackups][0]
329 else:
329 else:
330 bordermtime = None
330 bordermtime = None
331 for mtime, f in hgfiles[: len(hgfiles) - maxbackups]:
331 for mtime, f in hgfiles[: len(hgfiles) - maxbackups]:
332 if mtime == bordermtime:
332 if mtime == bordermtime:
333 # keep it, because timestamp can't decide exact order of backups
333 # keep it, because timestamp can't decide exact order of backups
334 continue
334 continue
335 base = f[: -(1 + len(patchextension))]
335 base = f[: -(1 + len(patchextension))]
336 for ext in shelvefileextensions:
336 for ext in shelvefileextensions:
337 vfs.tryunlink(base + b'.' + ext)
337 vfs.tryunlink(base + b'.' + ext)
338
338
339
339
340 def _backupactivebookmark(repo):
340 def _backupactivebookmark(repo):
341 activebookmark = repo._activebookmark
341 activebookmark = repo._activebookmark
342 if activebookmark:
342 if activebookmark:
343 bookmarks.deactivate(repo)
343 bookmarks.deactivate(repo)
344 return activebookmark
344 return activebookmark
345
345
346
346
347 def _restoreactivebookmark(repo, mark):
347 def _restoreactivebookmark(repo, mark):
348 if mark:
348 if mark:
349 bookmarks.activate(repo, mark)
349 bookmarks.activate(repo, mark)
350
350
351
351
352 def _aborttransaction(repo, tr):
352 def _aborttransaction(repo, tr):
353 '''Abort current transaction for shelve/unshelve, but keep dirstate
353 '''Abort current transaction for shelve/unshelve, but keep dirstate
354 '''
354 '''
355 dirstatebackupname = b'dirstate.shelve'
355 dirstatebackupname = b'dirstate.shelve'
356 repo.dirstate.savebackup(tr, dirstatebackupname)
356 repo.dirstate.savebackup(tr, dirstatebackupname)
357 tr.abort()
357 tr.abort()
358 repo.dirstate.restorebackup(None, dirstatebackupname)
358 repo.dirstate.restorebackup(None, dirstatebackupname)
359
359
360
360
361 def getshelvename(repo, parent, opts):
361 def getshelvename(repo, parent, opts):
362 """Decide on the name this shelve is going to have"""
362 """Decide on the name this shelve is going to have"""
363
363
364 def gennames():
364 def gennames():
365 yield label
365 yield label
366 for i in itertools.count(1):
366 for i in itertools.count(1):
367 yield b'%s-%02d' % (label, i)
367 yield b'%s-%02d' % (label, i)
368
368
369 name = opts.get(b'name')
369 name = opts.get(b'name')
370 label = repo._activebookmark or parent.branch() or b'default'
370 label = repo._activebookmark or parent.branch() or b'default'
371 # slashes aren't allowed in filenames, therefore we rename it
371 # slashes aren't allowed in filenames, therefore we rename it
372 label = label.replace(b'/', b'_')
372 label = label.replace(b'/', b'_')
373 label = label.replace(b'\\', b'_')
373 label = label.replace(b'\\', b'_')
374 # filenames must not start with '.' as it should not be hidden
374 # filenames must not start with '.' as it should not be hidden
375 if label.startswith(b'.'):
375 if label.startswith(b'.'):
376 label = label.replace(b'.', b'_', 1)
376 label = label.replace(b'.', b'_', 1)
377
377
378 if name:
378 if name:
379 if shelvedfile(repo, name, patchextension).exists():
379 if shelvedfile(repo, name, patchextension).exists():
380 e = _(b"a shelved change named '%s' already exists") % name
380 e = _(b"a shelved change named '%s' already exists") % name
381 raise error.Abort(e)
381 raise error.Abort(e)
382
382
383 # ensure we are not creating a subdirectory or a hidden file
383 # ensure we are not creating a subdirectory or a hidden file
384 if b'/' in name or b'\\' in name:
384 if b'/' in name or b'\\' in name:
385 raise error.Abort(
385 raise error.Abort(
386 _(b'shelved change names can not contain slashes')
386 _(b'shelved change names can not contain slashes')
387 )
387 )
388 if name.startswith(b'.'):
388 if name.startswith(b'.'):
389 raise error.Abort(_(b"shelved change names can not start with '.'"))
389 raise error.Abort(_(b"shelved change names can not start with '.'"))
390
390
391 else:
391 else:
392 for n in gennames():
392 for n in gennames():
393 if not shelvedfile(repo, n, patchextension).exists():
393 if not shelvedfile(repo, n, patchextension).exists():
394 name = n
394 name = n
395 break
395 break
396
396
397 return name
397 return name
398
398
399
399
400 def mutableancestors(ctx):
400 def mutableancestors(ctx):
401 """return all mutable ancestors for ctx (included)
401 """return all mutable ancestors for ctx (included)
402
402
403 Much faster than the revset ancestors(ctx) & draft()"""
403 Much faster than the revset ancestors(ctx) & draft()"""
404 seen = {nodemod.nullrev}
404 seen = {nodemod.nullrev}
405 visit = collections.deque()
405 visit = collections.deque()
406 visit.append(ctx)
406 visit.append(ctx)
407 while visit:
407 while visit:
408 ctx = visit.popleft()
408 ctx = visit.popleft()
409 yield ctx.node()
409 yield ctx.node()
410 for parent in ctx.parents():
410 for parent in ctx.parents():
411 rev = parent.rev()
411 rev = parent.rev()
412 if rev not in seen:
412 if rev not in seen:
413 seen.add(rev)
413 seen.add(rev)
414 if parent.mutable():
414 if parent.mutable():
415 visit.append(parent)
415 visit.append(parent)
416
416
417
417
418 def getcommitfunc(extra, interactive, editor=False):
418 def getcommitfunc(extra, interactive, editor=False):
419 def commitfunc(ui, repo, message, match, opts):
419 def commitfunc(ui, repo, message, match, opts):
420 hasmq = util.safehasattr(repo, b'mq')
420 hasmq = util.safehasattr(repo, b'mq')
421 if hasmq:
421 if hasmq:
422 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
422 saved, repo.mq.checkapplied = repo.mq.checkapplied, False
423
423
424 targetphase = phases.internal
424 targetphase = phases.internal
425 if not phases.supportinternal(repo):
425 if not phases.supportinternal(repo):
426 targetphase = phases.secret
426 targetphase = phases.secret
427 overrides = {(b'phases', b'new-commit'): targetphase}
427 overrides = {(b'phases', b'new-commit'): targetphase}
428 try:
428 try:
429 editor_ = False
429 editor_ = False
430 if editor:
430 if editor:
431 editor_ = cmdutil.getcommiteditor(
431 editor_ = cmdutil.getcommiteditor(
432 editform=b'shelve.shelve', **pycompat.strkwargs(opts)
432 editform=b'shelve.shelve', **pycompat.strkwargs(opts)
433 )
433 )
434 with repo.ui.configoverride(overrides):
434 with repo.ui.configoverride(overrides):
435 return repo.commit(
435 return repo.commit(
436 message,
436 message,
437 shelveuser,
437 shelveuser,
438 opts.get(b'date'),
438 opts.get(b'date'),
439 match,
439 match,
440 editor=editor_,
440 editor=editor_,
441 extra=extra,
441 extra=extra,
442 )
442 )
443 finally:
443 finally:
444 if hasmq:
444 if hasmq:
445 repo.mq.checkapplied = saved
445 repo.mq.checkapplied = saved
446
446
447 def interactivecommitfunc(ui, repo, *pats, **opts):
447 def interactivecommitfunc(ui, repo, *pats, **opts):
448 opts = pycompat.byteskwargs(opts)
448 opts = pycompat.byteskwargs(opts)
449 match = scmutil.match(repo[b'.'], pats, {})
449 match = scmutil.match(repo[b'.'], pats, {})
450 message = opts[b'message']
450 message = opts[b'message']
451 return commitfunc(ui, repo, message, match, opts)
451 return commitfunc(ui, repo, message, match, opts)
452
452
453 return interactivecommitfunc if interactive else commitfunc
453 return interactivecommitfunc if interactive else commitfunc
454
454
455
455
456 def _nothingtoshelvemessaging(ui, repo, pats, opts):
456 def _nothingtoshelvemessaging(ui, repo, pats, opts):
457 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
457 stat = repo.status(match=scmutil.match(repo[None], pats, opts))
458 if stat.deleted:
458 if stat.deleted:
459 ui.status(
459 ui.status(
460 _(b"nothing changed (%d missing files, see 'hg status')\n")
460 _(b"nothing changed (%d missing files, see 'hg status')\n")
461 % len(stat.deleted)
461 % len(stat.deleted)
462 )
462 )
463 else:
463 else:
464 ui.status(_(b"nothing changed\n"))
464 ui.status(_(b"nothing changed\n"))
465
465
466
466
467 def _shelvecreatedcommit(repo, node, name, match):
467 def _shelvecreatedcommit(repo, node, name, match):
468 info = {b'node': nodemod.hex(node)}
468 info = {b'node': nodemod.hex(node)}
469 shelvedfile(repo, name, b'shelve').writeinfo(info)
469 shelvedfile(repo, name, b'shelve').writeinfo(info)
470 bases = list(mutableancestors(repo[node]))
470 bases = list(mutableancestors(repo[node]))
471 shelvedfile(repo, name, b'hg').writebundle(bases, node)
471 shelvedfile(repo, name, b'hg').writebundle(bases, node)
472 with shelvedfile(repo, name, patchextension).opener(b'wb') as fp:
472 with shelvedfile(repo, name, patchextension).opener(b'wb') as fp:
473 cmdutil.exportfile(
473 cmdutil.exportfile(
474 repo, [node], fp, opts=mdiff.diffopts(git=True), match=match
474 repo, [node], fp, opts=mdiff.diffopts(git=True), match=match
475 )
475 )
476
476
477
477
478 def _includeunknownfiles(repo, pats, opts, extra):
478 def _includeunknownfiles(repo, pats, opts, extra):
479 s = repo.status(match=scmutil.match(repo[None], pats, opts), unknown=True)
479 s = repo.status(match=scmutil.match(repo[None], pats, opts), unknown=True)
480 if s.unknown:
480 if s.unknown:
481 extra[b'shelve_unknown'] = b'\0'.join(s.unknown)
481 extra[b'shelve_unknown'] = b'\0'.join(s.unknown)
482 repo[None].add(s.unknown)
482 repo[None].add(s.unknown)
483
483
484
484
485 def _finishshelve(repo, tr):
485 def _finishshelve(repo, tr):
486 if phases.supportinternal(repo):
486 if phases.supportinternal(repo):
487 tr.close()
487 tr.close()
488 else:
488 else:
489 _aborttransaction(repo, tr)
489 _aborttransaction(repo, tr)
490
490
491
491
492 def createcmd(ui, repo, pats, opts):
492 def createcmd(ui, repo, pats, opts):
493 """subcommand that creates a new shelve"""
493 """subcommand that creates a new shelve"""
494 with repo.wlock():
494 with repo.wlock():
495 cmdutil.checkunfinished(repo)
495 cmdutil.checkunfinished(repo)
496 return _docreatecmd(ui, repo, pats, opts)
496 return _docreatecmd(ui, repo, pats, opts)
497
497
498
498
499 def _docreatecmd(ui, repo, pats, opts):
499 def _docreatecmd(ui, repo, pats, opts):
500 wctx = repo[None]
500 wctx = repo[None]
501 parents = wctx.parents()
501 parents = wctx.parents()
502 parent = parents[0]
502 parent = parents[0]
503 origbranch = wctx.branch()
503 origbranch = wctx.branch()
504
504
505 if parent.node() != nodemod.nullid:
505 if parent.node() != nodemod.nullid:
506 desc = b"changes to: %s" % parent.description().split(b'\n', 1)[0]
506 desc = b"changes to: %s" % parent.description().split(b'\n', 1)[0]
507 else:
507 else:
508 desc = b'(changes in empty repository)'
508 desc = b'(changes in empty repository)'
509
509
510 if not opts.get(b'message'):
510 if not opts.get(b'message'):
511 opts[b'message'] = desc
511 opts[b'message'] = desc
512
512
513 lock = tr = activebookmark = None
513 lock = tr = activebookmark = None
514 try:
514 try:
515 lock = repo.lock()
515 lock = repo.lock()
516
516
517 # use an uncommitted transaction to generate the bundle to avoid
517 # use an uncommitted transaction to generate the bundle to avoid
518 # pull races. ensure we don't print the abort message to stderr.
518 # pull races. ensure we don't print the abort message to stderr.
519 tr = repo.transaction(b'shelve', report=lambda x: None)
519 tr = repo.transaction(b'shelve', report=lambda x: None)
520
520
521 interactive = opts.get(b'interactive', False)
521 interactive = opts.get(b'interactive', False)
522 includeunknown = opts.get(b'unknown', False) and not opts.get(
522 includeunknown = opts.get(b'unknown', False) and not opts.get(
523 b'addremove', False
523 b'addremove', False
524 )
524 )
525
525
526 name = getshelvename(repo, parent, opts)
526 name = getshelvename(repo, parent, opts)
527 activebookmark = _backupactivebookmark(repo)
527 activebookmark = _backupactivebookmark(repo)
528 extra = {b'internal': b'shelve'}
528 extra = {b'internal': b'shelve'}
529 if includeunknown:
529 if includeunknown:
530 _includeunknownfiles(repo, pats, opts, extra)
530 _includeunknownfiles(repo, pats, opts, extra)
531
531
532 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
532 if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
533 # In non-bare shelve we don't store newly created branch
533 # In non-bare shelve we don't store newly created branch
534 # at bundled commit
534 # at bundled commit
535 repo.dirstate.setbranch(repo[b'.'].branch())
535 repo.dirstate.setbranch(repo[b'.'].branch())
536
536
537 commitfunc = getcommitfunc(extra, interactive, editor=True)
537 commitfunc = getcommitfunc(extra, interactive, editor=True)
538 if not interactive:
538 if not interactive:
539 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
539 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
540 else:
540 else:
541 node = cmdutil.dorecord(
541 node = cmdutil.dorecord(
542 ui,
542 ui,
543 repo,
543 repo,
544 commitfunc,
544 commitfunc,
545 None,
545 None,
546 False,
546 False,
547 cmdutil.recordfilter,
547 cmdutil.recordfilter,
548 *pats,
548 *pats,
549 **pycompat.strkwargs(opts)
549 **pycompat.strkwargs(opts)
550 )
550 )
551 if not node:
551 if not node:
552 _nothingtoshelvemessaging(ui, repo, pats, opts)
552 _nothingtoshelvemessaging(ui, repo, pats, opts)
553 return 1
553 return 1
554
554
555 # Create a matcher so that prefetch doesn't attempt to fetch
555 # Create a matcher so that prefetch doesn't attempt to fetch
556 # the entire repository pointlessly, and as an optimisation
556 # the entire repository pointlessly, and as an optimisation
557 # for movedirstate, if needed.
557 # for movedirstate, if needed.
558 match = scmutil.matchfiles(repo, repo[node].files())
558 match = scmutil.matchfiles(repo, repo[node].files())
559 _shelvecreatedcommit(repo, node, name, match)
559 _shelvecreatedcommit(repo, node, name, match)
560
560
561 ui.status(_(b'shelved as %s\n') % name)
561 ui.status(_(b'shelved as %s\n') % name)
562 if opts[b'keep']:
562 if opts[b'keep']:
563 with repo.dirstate.parentchange():
563 with repo.dirstate.parentchange():
564 scmutil.movedirstate(repo, parent, match)
564 scmutil.movedirstate(repo, parent, match)
565 else:
565 else:
566 hg.update(repo, parent.node())
566 hg.update(repo, parent.node())
567 if origbranch != repo[b'.'].branch() and not _isbareshelve(pats, opts):
567 if origbranch != repo[b'.'].branch() and not _isbareshelve(pats, opts):
568 repo.dirstate.setbranch(origbranch)
568 repo.dirstate.setbranch(origbranch)
569
569
570 _finishshelve(repo, tr)
570 _finishshelve(repo, tr)
571 finally:
571 finally:
572 _restoreactivebookmark(repo, activebookmark)
572 _restoreactivebookmark(repo, activebookmark)
573 lockmod.release(tr, lock)
573 lockmod.release(tr, lock)
574
574
575
575
576 def _isbareshelve(pats, opts):
576 def _isbareshelve(pats, opts):
577 return (
577 return (
578 not pats
578 not pats
579 and not opts.get(b'interactive', False)
579 and not opts.get(b'interactive', False)
580 and not opts.get(b'include', False)
580 and not opts.get(b'include', False)
581 and not opts.get(b'exclude', False)
581 and not opts.get(b'exclude', False)
582 )
582 )
583
583
584
584
585 def _iswctxonnewbranch(repo):
585 def _iswctxonnewbranch(repo):
586 return repo[None].branch() != repo[b'.'].branch()
586 return repo[None].branch() != repo[b'.'].branch()
587
587
588
588
589 def cleanupcmd(ui, repo):
589 def cleanupcmd(ui, repo):
590 """subcommand that deletes all shelves"""
590 """subcommand that deletes all shelves"""
591
591
592 with repo.wlock():
592 with repo.wlock():
593 for (name, _type) in repo.vfs.readdir(shelvedir):
593 for (name, _type) in repo.vfs.readdir(shelvedir):
594 suffix = name.rsplit(b'.', 1)[-1]
594 suffix = name.rsplit(b'.', 1)[-1]
595 if suffix in shelvefileextensions:
595 if suffix in shelvefileextensions:
596 shelvedfile(repo, name).movetobackup()
596 shelvedfile(repo, name).movetobackup()
597 cleanupoldbackups(repo)
597 cleanupoldbackups(repo)
598
598
599
599
600 def deletecmd(ui, repo, pats):
600 def deletecmd(ui, repo, pats):
601 """subcommand that deletes a specific shelve"""
601 """subcommand that deletes a specific shelve"""
602 if not pats:
602 if not pats:
603 raise error.Abort(_(b'no shelved changes specified!'))
603 raise error.Abort(_(b'no shelved changes specified!'))
604 with repo.wlock():
604 with repo.wlock():
605 for name in pats:
605 for name in pats:
606 try:
606 try:
607 for suffix in shelvefileextensions:
607 for suffix in shelvefileextensions:
608 shfile = shelvedfile(repo, name, suffix)
608 shfile = shelvedfile(repo, name, suffix)
609 # patch file is necessary, as it should
609 # patch file is necessary, as it should
610 # be present for any kind of shelve,
610 # be present for any kind of shelve,
611 # but the .hg file is optional as in future we
611 # but the .hg file is optional as in future we
612 # will add obsolete shelve with does not create a
612 # will add obsolete shelve with does not create a
613 # bundle
613 # bundle
614 if shfile.exists() or suffix == patchextension:
614 if shfile.exists() or suffix == patchextension:
615 shfile.movetobackup()
615 shfile.movetobackup()
616 except OSError as err:
616 except OSError as err:
617 if err.errno != errno.ENOENT:
617 if err.errno != errno.ENOENT:
618 raise
618 raise
619 raise error.Abort(_(b"shelved change '%s' not found") % name)
619 raise error.Abort(_(b"shelved change '%s' not found") % name)
620 cleanupoldbackups(repo)
620 cleanupoldbackups(repo)
621
621
622
622
623 def listshelves(repo):
623 def listshelves(repo):
624 """return all shelves in repo as list of (time, filename)"""
624 """return all shelves in repo as list of (time, filename)"""
625 try:
625 try:
626 names = repo.vfs.readdir(shelvedir)
626 names = repo.vfs.readdir(shelvedir)
627 except OSError as err:
627 except OSError as err:
628 if err.errno != errno.ENOENT:
628 if err.errno != errno.ENOENT:
629 raise
629 raise
630 return []
630 return []
631 info = []
631 info = []
632 for (name, _type) in names:
632 for (name, _type) in names:
633 pfx, sfx = name.rsplit(b'.', 1)
633 pfx, sfx = name.rsplit(b'.', 1)
634 if not pfx or sfx != patchextension:
634 if not pfx or sfx != patchextension:
635 continue
635 continue
636 st = shelvedfile(repo, name).stat()
636 st = shelvedfile(repo, name).stat()
637 info.append((st[stat.ST_MTIME], shelvedfile(repo, pfx).filename()))
637 info.append((st[stat.ST_MTIME], shelvedfile(repo, pfx).filename()))
638 return sorted(info, reverse=True)
638 return sorted(info, reverse=True)
639
639
640
640
641 def listcmd(ui, repo, pats, opts):
641 def listcmd(ui, repo, pats, opts):
642 """subcommand that displays the list of shelves"""
642 """subcommand that displays the list of shelves"""
643 pats = set(pats)
643 pats = set(pats)
644 width = 80
644 width = 80
645 if not ui.plain():
645 if not ui.plain():
646 width = ui.termwidth()
646 width = ui.termwidth()
647 namelabel = b'shelve.newest'
647 namelabel = b'shelve.newest'
648 ui.pager(b'shelve')
648 ui.pager(b'shelve')
649 for mtime, name in listshelves(repo):
649 for mtime, name in listshelves(repo):
650 sname = util.split(name)[1]
650 sname = util.split(name)[1]
651 if pats and sname not in pats:
651 if pats and sname not in pats:
652 continue
652 continue
653 ui.write(sname, label=namelabel)
653 ui.write(sname, label=namelabel)
654 namelabel = b'shelve.name'
654 namelabel = b'shelve.name'
655 if ui.quiet:
655 if ui.quiet:
656 ui.write(b'\n')
656 ui.write(b'\n')
657 continue
657 continue
658 ui.write(b' ' * (16 - len(sname)))
658 ui.write(b' ' * (16 - len(sname)))
659 used = 16
659 used = 16
660 date = dateutil.makedate(mtime)
660 date = dateutil.makedate(mtime)
661 age = b'(%s)' % templatefilters.age(date, abbrev=True)
661 age = b'(%s)' % templatefilters.age(date, abbrev=True)
662 ui.write(age, label=b'shelve.age')
662 ui.write(age, label=b'shelve.age')
663 ui.write(b' ' * (12 - len(age)))
663 ui.write(b' ' * (12 - len(age)))
664 used += 12
664 used += 12
665 with open(name + b'.' + patchextension, b'rb') as fp:
665 with open(name + b'.' + patchextension, b'rb') as fp:
666 while True:
666 while True:
667 line = fp.readline()
667 line = fp.readline()
668 if not line:
668 if not line:
669 break
669 break
670 if not line.startswith(b'#'):
670 if not line.startswith(b'#'):
671 desc = line.rstrip()
671 desc = line.rstrip()
672 if ui.formatted():
672 if ui.formatted():
673 desc = stringutil.ellipsis(desc, width - used)
673 desc = stringutil.ellipsis(desc, width - used)
674 ui.write(desc)
674 ui.write(desc)
675 break
675 break
676 ui.write(b'\n')
676 ui.write(b'\n')
677 if not (opts[b'patch'] or opts[b'stat']):
677 if not (opts[b'patch'] or opts[b'stat']):
678 continue
678 continue
679 difflines = fp.readlines()
679 difflines = fp.readlines()
680 if opts[b'patch']:
680 if opts[b'patch']:
681 for chunk, label in patch.difflabel(iter, difflines):
681 for chunk, label in patch.difflabel(iter, difflines):
682 ui.write(chunk, label=label)
682 ui.write(chunk, label=label)
683 if opts[b'stat']:
683 if opts[b'stat']:
684 for chunk, label in patch.diffstatui(difflines, width=width):
684 for chunk, label in patch.diffstatui(difflines, width=width):
685 ui.write(chunk, label=label)
685 ui.write(chunk, label=label)
686
686
687
687
688 def patchcmds(ui, repo, pats, opts):
688 def patchcmds(ui, repo, pats, opts):
689 """subcommand that displays shelves"""
689 """subcommand that displays shelves"""
690 if len(pats) == 0:
690 if len(pats) == 0:
691 shelves = listshelves(repo)
691 shelves = listshelves(repo)
692 if not shelves:
692 if not shelves:
693 raise error.Abort(_(b"there are no shelves to show"))
693 raise error.Abort(_(b"there are no shelves to show"))
694 mtime, name = shelves[0]
694 mtime, name = shelves[0]
695 sname = util.split(name)[1]
695 sname = util.split(name)[1]
696 pats = [sname]
696 pats = [sname]
697
697
698 for shelfname in pats:
698 for shelfname in pats:
699 if not shelvedfile(repo, shelfname, patchextension).exists():
699 if not shelvedfile(repo, shelfname, patchextension).exists():
700 raise error.Abort(_(b"cannot find shelf %s") % shelfname)
700 raise error.Abort(_(b"cannot find shelf %s") % shelfname)
701
701
702 listcmd(ui, repo, pats, opts)
702 listcmd(ui, repo, pats, opts)
703
703
704
704
705 def checkparents(repo, state):
705 def checkparents(repo, state):
706 """check parent while resuming an unshelve"""
706 """check parent while resuming an unshelve"""
707 if state.parents != repo.dirstate.parents():
707 if state.parents != repo.dirstate.parents():
708 raise error.Abort(
708 raise error.Abort(
709 _(b'working directory parents do not match unshelve state')
709 _(b'working directory parents do not match unshelve state')
710 )
710 )
711
711
712
712
713 def _loadshelvedstate(ui, repo, opts):
713 def _loadshelvedstate(ui, repo, opts):
714 try:
714 try:
715 state = shelvedstate.load(repo)
715 state = shelvedstate.load(repo)
716 if opts.get(b'keep') is None:
716 if opts.get(b'keep') is None:
717 opts[b'keep'] = state.keep
717 opts[b'keep'] = state.keep
718 except IOError as err:
718 except IOError as err:
719 if err.errno != errno.ENOENT:
719 if err.errno != errno.ENOENT:
720 raise
720 raise
721 cmdutil.wrongtooltocontinue(repo, _(b'unshelve'))
721 cmdutil.wrongtooltocontinue(repo, _(b'unshelve'))
722 except error.CorruptedState as err:
722 except error.CorruptedState as err:
723 ui.debug(pycompat.bytestr(err) + b'\n')
723 ui.debug(pycompat.bytestr(err) + b'\n')
724 if opts.get(b'continue'):
724 if opts.get(b'continue'):
725 msg = _(b'corrupted shelved state file')
725 msg = _(b'corrupted shelved state file')
726 hint = _(
726 hint = _(
727 b'please run hg unshelve --abort to abort unshelve '
727 b'please run hg unshelve --abort to abort unshelve '
728 b'operation'
728 b'operation'
729 )
729 )
730 raise error.Abort(msg, hint=hint)
730 raise error.Abort(msg, hint=hint)
731 elif opts.get(b'abort'):
731 elif opts.get(b'abort'):
732 shelvedstate.clear(repo)
732 shelvedstate.clear(repo)
733 raise error.Abort(
733 raise error.Abort(
734 _(
734 _(
735 b'could not read shelved state file, your '
735 b'could not read shelved state file, your '
736 b'working copy may be in an unexpected state\n'
736 b'working copy may be in an unexpected state\n'
737 b'please update to some commit\n'
737 b'please update to some commit\n'
738 )
738 )
739 )
739 )
740 return state
740 return state
741
741
742
742
743 def unshelveabort(ui, repo, state):
743 def unshelveabort(ui, repo, state):
744 """subcommand that abort an in-progress unshelve"""
744 """subcommand that abort an in-progress unshelve"""
745 with repo.lock():
745 with repo.lock():
746 try:
746 try:
747 checkparents(repo, state)
747 checkparents(repo, state)
748
748
749 merge.clean_update(state.pendingctx)
749 merge.clean_update(state.pendingctx)
750 if state.activebookmark and state.activebookmark in repo._bookmarks:
750 if state.activebookmark and state.activebookmark in repo._bookmarks:
751 bookmarks.activate(repo, state.activebookmark)
751 bookmarks.activate(repo, state.activebookmark)
752 mergefiles(ui, repo, state.wctx, state.pendingctx)
752 mergefiles(ui, repo, state.wctx, state.pendingctx)
753 if not phases.supportinternal(repo):
753 if not phases.supportinternal(repo):
754 repair.strip(
754 repair.strip(
755 ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
755 ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
756 )
756 )
757 finally:
757 finally:
758 shelvedstate.clear(repo)
758 shelvedstate.clear(repo)
759 ui.warn(_(b"unshelve of '%s' aborted\n") % state.name)
759 ui.warn(_(b"unshelve of '%s' aborted\n") % state.name)
760
760
761
761
762 def hgabortunshelve(ui, repo):
762 def hgabortunshelve(ui, repo):
763 """logic to abort unshelve using 'hg abort"""
763 """logic to abort unshelve using 'hg abort"""
764 with repo.wlock():
764 with repo.wlock():
765 state = _loadshelvedstate(ui, repo, {b'abort': True})
765 state = _loadshelvedstate(ui, repo, {b'abort': True})
766 return unshelveabort(ui, repo, state)
766 return unshelveabort(ui, repo, state)
767
767
768
768
769 def mergefiles(ui, repo, wctx, shelvectx):
769 def mergefiles(ui, repo, wctx, shelvectx):
770 """updates to wctx and merges the changes from shelvectx into the
770 """updates to wctx and merges the changes from shelvectx into the
771 dirstate."""
771 dirstate."""
772 with ui.configoverride({(b'ui', b'quiet'): True}):
772 with ui.configoverride({(b'ui', b'quiet'): True}):
773 hg.update(repo, wctx.node())
773 hg.update(repo, wctx.node())
774 ui.pushbuffer(True)
774 ui.pushbuffer(True)
775 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents())
775 cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents())
776 ui.popbuffer()
776 ui.popbuffer()
777
777
778
778
779 def restorebranch(ui, repo, branchtorestore):
779 def restorebranch(ui, repo, branchtorestore):
780 if branchtorestore and branchtorestore != repo.dirstate.branch():
780 if branchtorestore and branchtorestore != repo.dirstate.branch():
781 repo.dirstate.setbranch(branchtorestore)
781 repo.dirstate.setbranch(branchtorestore)
782 ui.status(
782 ui.status(
783 _(b'marked working directory as branch %s\n') % branchtorestore
783 _(b'marked working directory as branch %s\n') % branchtorestore
784 )
784 )
785
785
786
786
787 def unshelvecleanup(ui, repo, name, opts):
787 def unshelvecleanup(ui, repo, name, opts):
788 """remove related files after an unshelve"""
788 """remove related files after an unshelve"""
789 if not opts.get(b'keep'):
789 if not opts.get(b'keep'):
790 for filetype in shelvefileextensions:
790 for filetype in shelvefileextensions:
791 shfile = shelvedfile(repo, name, filetype)
791 shfile = shelvedfile(repo, name, filetype)
792 if shfile.exists():
792 if shfile.exists():
793 shfile.movetobackup()
793 shfile.movetobackup()
794 cleanupoldbackups(repo)
794 cleanupoldbackups(repo)
795
795
796
796
797 def unshelvecontinue(ui, repo, state, opts):
797 def unshelvecontinue(ui, repo, state, opts):
798 """subcommand to continue an in-progress unshelve"""
798 """subcommand to continue an in-progress unshelve"""
799 # We're finishing off a merge. First parent is our original
799 # We're finishing off a merge. First parent is our original
800 # parent, second is the temporary "fake" commit we're unshelving.
800 # parent, second is the temporary "fake" commit we're unshelving.
801 interactive = state.interactive
801 interactive = state.interactive
802 basename = state.name
802 basename = state.name
803 with repo.lock():
803 with repo.lock():
804 checkparents(repo, state)
804 checkparents(repo, state)
805 ms = mergestatemod.mergestate.read(repo)
805 ms = mergestatemod.mergestate.read(repo)
806 if list(ms.unresolved()):
806 if list(ms.unresolved()):
807 raise error.Abort(
807 raise error.Abort(
808 _(b"unresolved conflicts, can't continue"),
808 _(b"unresolved conflicts, can't continue"),
809 hint=_(b"see 'hg resolve', then 'hg unshelve --continue'"),
809 hint=_(b"see 'hg resolve', then 'hg unshelve --continue'"),
810 )
810 )
811
811
812 shelvectx = repo[state.parents[1]]
812 shelvectx = repo[state.parents[1]]
813 pendingctx = state.pendingctx
813 pendingctx = state.pendingctx
814
814
815 with repo.dirstate.parentchange():
815 with repo.dirstate.parentchange():
816 repo.setparents(state.pendingctx.node(), nodemod.nullid)
816 repo.setparents(state.pendingctx.node(), nodemod.nullid)
817 repo.dirstate.write(repo.currenttransaction())
817 repo.dirstate.write(repo.currenttransaction())
818
818
819 targetphase = phases.internal
819 targetphase = phases.internal
820 if not phases.supportinternal(repo):
820 if not phases.supportinternal(repo):
821 targetphase = phases.secret
821 targetphase = phases.secret
822 overrides = {(b'phases', b'new-commit'): targetphase}
822 overrides = {(b'phases', b'new-commit'): targetphase}
823 with repo.ui.configoverride(overrides, b'unshelve'):
823 with repo.ui.configoverride(overrides, b'unshelve'):
824 with repo.dirstate.parentchange():
824 with repo.dirstate.parentchange():
825 repo.setparents(state.parents[0], nodemod.nullid)
825 repo.setparents(state.parents[0], nodemod.nullid)
826 newnode, ispartialunshelve = _createunshelvectx(
826 newnode, ispartialunshelve = _createunshelvectx(
827 ui, repo, shelvectx, basename, interactive, opts
827 ui, repo, shelvectx, basename, interactive, opts
828 )
828 )
829
829
830 if newnode is None:
830 if newnode is None:
831 shelvectx = state.pendingctx
831 shelvectx = state.pendingctx
832 msg = _(
832 msg = _(
833 b'note: unshelved changes already existed '
833 b'note: unshelved changes already existed '
834 b'in the working copy\n'
834 b'in the working copy\n'
835 )
835 )
836 ui.status(msg)
836 ui.status(msg)
837 else:
837 else:
838 # only strip the shelvectx if we produced one
838 # only strip the shelvectx if we produced one
839 state.nodestoremove.append(newnode)
839 state.nodestoremove.append(newnode)
840 shelvectx = repo[newnode]
840 shelvectx = repo[newnode]
841
841
842 hg.updaterepo(repo, pendingctx.node(), overwrite=False)
842 hg.updaterepo(repo, pendingctx.node(), overwrite=False)
843 mergefiles(ui, repo, state.wctx, shelvectx)
843 mergefiles(ui, repo, state.wctx, shelvectx)
844 restorebranch(ui, repo, state.branchtorestore)
844 restorebranch(ui, repo, state.branchtorestore)
845
845
846 if not phases.supportinternal(repo):
846 if not phases.supportinternal(repo):
847 repair.strip(
847 repair.strip(
848 ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
848 ui, repo, state.nodestoremove, backup=False, topic=b'shelve'
849 )
849 )
850 shelvedstate.clear(repo)
850 shelvedstate.clear(repo)
851 if not ispartialunshelve:
851 if not ispartialunshelve:
852 unshelvecleanup(ui, repo, state.name, opts)
852 unshelvecleanup(ui, repo, state.name, opts)
853 _restoreactivebookmark(repo, state.activebookmark)
853 _restoreactivebookmark(repo, state.activebookmark)
854 ui.status(_(b"unshelve of '%s' complete\n") % state.name)
854 ui.status(_(b"unshelve of '%s' complete\n") % state.name)
855
855
856
856
857 def hgcontinueunshelve(ui, repo):
857 def hgcontinueunshelve(ui, repo):
858 """logic to resume unshelve using 'hg continue'"""
858 """logic to resume unshelve using 'hg continue'"""
859 with repo.wlock():
859 with repo.wlock():
860 state = _loadshelvedstate(ui, repo, {b'continue': True})
860 state = _loadshelvedstate(ui, repo, {b'continue': True})
861 return unshelvecontinue(ui, repo, state, {b'keep': state.keep})
861 return unshelvecontinue(ui, repo, state, {b'keep': state.keep})
862
862
863
863
864 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
864 def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
865 """Temporarily commit working copy changes before moving unshelve commit"""
865 """Temporarily commit working copy changes before moving unshelve commit"""
866 # Store pending changes in a commit and remember added in case a shelve
866 # Store pending changes in a commit and remember added in case a shelve
867 # contains unknown files that are part of the pending change
867 # contains unknown files that are part of the pending change
868 s = repo.status()
868 s = repo.status()
869 addedbefore = frozenset(s.added)
869 addedbefore = frozenset(s.added)
870 if not (s.modified or s.added or s.removed):
870 if not (s.modified or s.added or s.removed):
871 return tmpwctx, addedbefore
871 return tmpwctx, addedbefore
872 ui.status(
872 ui.status(
873 _(
873 _(
874 b"temporarily committing pending changes "
874 b"temporarily committing pending changes "
875 b"(restore with 'hg unshelve --abort')\n"
875 b"(restore with 'hg unshelve --abort')\n"
876 )
876 )
877 )
877 )
878 extra = {b'internal': b'shelve'}
878 extra = {b'internal': b'shelve'}
879 commitfunc = getcommitfunc(extra=extra, interactive=False, editor=False)
879 commitfunc = getcommitfunc(extra=extra, interactive=False, editor=False)
880 tempopts = {}
880 tempopts = {}
881 tempopts[b'message'] = b"pending changes temporary commit"
881 tempopts[b'message'] = b"pending changes temporary commit"
882 tempopts[b'date'] = opts.get(b'date')
882 tempopts[b'date'] = opts.get(b'date')
883 with ui.configoverride({(b'ui', b'quiet'): True}):
883 with ui.configoverride({(b'ui', b'quiet'): True}):
884 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
884 node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
885 tmpwctx = repo[node]
885 tmpwctx = repo[node]
886 return tmpwctx, addedbefore
886 return tmpwctx, addedbefore
887
887
888
888
889 def _unshelverestorecommit(ui, repo, tr, basename):
889 def _unshelverestorecommit(ui, repo, tr, basename):
890 """Recreate commit in the repository during the unshelve"""
890 """Recreate commit in the repository during the unshelve"""
891 repo = repo.unfiltered()
891 repo = repo.unfiltered()
892 node = None
892 node = None
893 if shelvedfile(repo, basename, b'shelve').exists():
893 if shelvedfile(repo, basename, b'shelve').exists():
894 node = shelvedfile(repo, basename, b'shelve').readinfo()[b'node']
894 node = shelvedfile(repo, basename, b'shelve').readinfo()[b'node']
895 if node is None or node not in repo:
895 if node is None or node not in repo:
896 with ui.configoverride({(b'ui', b'quiet'): True}):
896 with ui.configoverride({(b'ui', b'quiet'): True}):
897 shelvectx = shelvedfile(repo, basename, b'hg').applybundle(tr)
897 shelvectx = shelvedfile(repo, basename, b'hg').applybundle(tr)
898 # We might not strip the unbundled changeset, so we should keep track of
898 # We might not strip the unbundled changeset, so we should keep track of
899 # the unshelve node in case we need to reuse it (eg: unshelve --keep)
899 # the unshelve node in case we need to reuse it (eg: unshelve --keep)
900 if node is None:
900 if node is None:
901 info = {b'node': nodemod.hex(shelvectx.node())}
901 info = {b'node': nodemod.hex(shelvectx.node())}
902 shelvedfile(repo, basename, b'shelve').writeinfo(info)
902 shelvedfile(repo, basename, b'shelve').writeinfo(info)
903 else:
903 else:
904 shelvectx = repo[node]
904 shelvectx = repo[node]
905
905
906 return repo, shelvectx
906 return repo, shelvectx
907
907
908
908
909 def _createunshelvectx(ui, repo, shelvectx, basename, interactive, opts):
909 def _createunshelvectx(ui, repo, shelvectx, basename, interactive, opts):
910 """Handles the creation of unshelve commit and updates the shelve if it
910 """Handles the creation of unshelve commit and updates the shelve if it
911 was partially unshelved.
911 was partially unshelved.
912
912
913 If interactive is:
913 If interactive is:
914
914
915 * False: Commits all the changes in the working directory.
915 * False: Commits all the changes in the working directory.
916 * True: Prompts the user to select changes to unshelve and commit them.
916 * True: Prompts the user to select changes to unshelve and commit them.
917 Update the shelve with remaining changes.
917 Update the shelve with remaining changes.
918
918
919 Returns the node of the new commit formed and a bool indicating whether
919 Returns the node of the new commit formed and a bool indicating whether
920 the shelve was partially unshelved.Creates a commit ctx to unshelve
920 the shelve was partially unshelved.Creates a commit ctx to unshelve
921 interactively or non-interactively.
921 interactively or non-interactively.
922
922
923 The user might want to unshelve certain changes only from the stored
923 The user might want to unshelve certain changes only from the stored
924 shelve in interactive. So, we would create two commits. One with requested
924 shelve in interactive. So, we would create two commits. One with requested
925 changes to unshelve at that time and the latter is shelved for future.
925 changes to unshelve at that time and the latter is shelved for future.
926
926
927 Here, we return both the newnode which is created interactively and a
927 Here, we return both the newnode which is created interactively and a
928 bool to know whether the shelve is partly done or completely done.
928 bool to know whether the shelve is partly done or completely done.
929 """
929 """
930 opts[b'message'] = shelvectx.description()
930 opts[b'message'] = shelvectx.description()
931 opts[b'interactive-unshelve'] = True
931 opts[b'interactive-unshelve'] = True
932 pats = []
932 pats = []
933 if not interactive:
933 if not interactive:
934 newnode = repo.commit(
934 newnode = repo.commit(
935 text=shelvectx.description(),
935 text=shelvectx.description(),
936 extra=shelvectx.extra(),
936 extra=shelvectx.extra(),
937 user=shelvectx.user(),
937 user=shelvectx.user(),
938 date=shelvectx.date(),
938 date=shelvectx.date(),
939 )
939 )
940 return newnode, False
940 return newnode, False
941
941
942 commitfunc = getcommitfunc(shelvectx.extra(), interactive=True, editor=True)
942 commitfunc = getcommitfunc(shelvectx.extra(), interactive=True, editor=True)
943 newnode = cmdutil.dorecord(
943 newnode = cmdutil.dorecord(
944 ui,
944 ui,
945 repo,
945 repo,
946 commitfunc,
946 commitfunc,
947 None,
947 None,
948 False,
948 False,
949 cmdutil.recordfilter,
949 cmdutil.recordfilter,
950 *pats,
950 *pats,
951 **pycompat.strkwargs(opts)
951 **pycompat.strkwargs(opts)
952 )
952 )
953 snode = repo.commit(
953 snode = repo.commit(
954 text=shelvectx.description(),
954 text=shelvectx.description(),
955 extra=shelvectx.extra(),
955 extra=shelvectx.extra(),
956 user=shelvectx.user(),
956 user=shelvectx.user(),
957 )
957 )
958 if snode:
958 if snode:
959 m = scmutil.matchfiles(repo, repo[snode].files())
959 m = scmutil.matchfiles(repo, repo[snode].files())
960 _shelvecreatedcommit(repo, snode, basename, m)
960 _shelvecreatedcommit(repo, snode, basename, m)
961
961
962 return newnode, bool(snode)
962 return newnode, bool(snode)
963
963
964
964
965 def _rebaserestoredcommit(
965 def _rebaserestoredcommit(
966 ui,
966 ui,
967 repo,
967 repo,
968 opts,
968 opts,
969 tr,
969 tr,
970 oldtiprev,
970 oldtiprev,
971 basename,
971 basename,
972 pctx,
972 pctx,
973 tmpwctx,
973 tmpwctx,
974 shelvectx,
974 shelvectx,
975 branchtorestore,
975 branchtorestore,
976 activebookmark,
976 activebookmark,
977 ):
977 ):
978 """Rebase restored commit from its original location to a destination"""
978 """Rebase restored commit from its original location to a destination"""
979 # If the shelve is not immediately on top of the commit
979 # If the shelve is not immediately on top of the commit
980 # we'll be merging with, rebase it to be on top.
980 # we'll be merging with, rebase it to be on top.
981 interactive = opts.get(b'interactive')
981 interactive = opts.get(b'interactive')
982 if tmpwctx.node() == shelvectx.p1().node() and not interactive:
982 if tmpwctx.node() == shelvectx.p1().node() and not interactive:
983 # We won't skip on interactive mode because, the user might want to
983 # We won't skip on interactive mode because, the user might want to
984 # unshelve certain changes only.
984 # unshelve certain changes only.
985 return shelvectx, False
985 return shelvectx, False
986
986
987 overrides = {
987 overrides = {
988 (b'ui', b'forcemerge'): opts.get(b'tool', b''),
988 (b'ui', b'forcemerge'): opts.get(b'tool', b''),
989 (b'phases', b'new-commit'): phases.secret,
989 (b'phases', b'new-commit'): phases.secret,
990 }
990 }
991 with repo.ui.configoverride(overrides, b'unshelve'):
991 with repo.ui.configoverride(overrides, b'unshelve'):
992 ui.status(_(b'rebasing shelved changes\n'))
992 ui.status(_(b'rebasing shelved changes\n'))
993 stats = merge.graft(
993 stats = merge.graft(
994 repo,
994 repo,
995 shelvectx,
995 shelvectx,
996 labels=[b'working-copy', b'shelve'],
996 labels=[b'working-copy', b'shelve'],
997 keepconflictparent=True,
997 keepconflictparent=True,
998 )
998 )
999 if stats.unresolvedcount:
999 if stats.unresolvedcount:
1000 tr.close()
1000 tr.close()
1001
1001
1002 nodestoremove = [
1002 nodestoremove = [
1003 repo.changelog.node(rev)
1003 repo.changelog.node(rev)
1004 for rev in pycompat.xrange(oldtiprev, len(repo))
1004 for rev in pycompat.xrange(oldtiprev, len(repo))
1005 ]
1005 ]
1006 shelvedstate.save(
1006 shelvedstate.save(
1007 repo,
1007 repo,
1008 basename,
1008 basename,
1009 pctx,
1009 pctx,
1010 tmpwctx,
1010 tmpwctx,
1011 nodestoremove,
1011 nodestoremove,
1012 branchtorestore,
1012 branchtorestore,
1013 opts.get(b'keep'),
1013 opts.get(b'keep'),
1014 activebookmark,
1014 activebookmark,
1015 interactive,
1015 interactive,
1016 )
1016 )
1017 raise error.InterventionRequired(
1017 raise error.ConflictResolutionRequired(b'unshelve')
1018 _(
1019 b"unresolved conflicts (see 'hg resolve', then "
1020 b"'hg unshelve --continue')"
1021 )
1022 )
1023
1018
1024 with repo.dirstate.parentchange():
1019 with repo.dirstate.parentchange():
1025 repo.setparents(tmpwctx.node(), nodemod.nullid)
1020 repo.setparents(tmpwctx.node(), nodemod.nullid)
1026 newnode, ispartialunshelve = _createunshelvectx(
1021 newnode, ispartialunshelve = _createunshelvectx(
1027 ui, repo, shelvectx, basename, interactive, opts
1022 ui, repo, shelvectx, basename, interactive, opts
1028 )
1023 )
1029
1024
1030 if newnode is None:
1025 if newnode is None:
1031 shelvectx = tmpwctx
1026 shelvectx = tmpwctx
1032 msg = _(
1027 msg = _(
1033 b'note: unshelved changes already existed '
1028 b'note: unshelved changes already existed '
1034 b'in the working copy\n'
1029 b'in the working copy\n'
1035 )
1030 )
1036 ui.status(msg)
1031 ui.status(msg)
1037 else:
1032 else:
1038 shelvectx = repo[newnode]
1033 shelvectx = repo[newnode]
1039 hg.updaterepo(repo, tmpwctx.node(), False)
1034 hg.updaterepo(repo, tmpwctx.node(), False)
1040
1035
1041 return shelvectx, ispartialunshelve
1036 return shelvectx, ispartialunshelve
1042
1037
1043
1038
1044 def _forgetunknownfiles(repo, shelvectx, addedbefore):
1039 def _forgetunknownfiles(repo, shelvectx, addedbefore):
1045 # Forget any files that were unknown before the shelve, unknown before
1040 # Forget any files that were unknown before the shelve, unknown before
1046 # unshelve started, but are now added.
1041 # unshelve started, but are now added.
1047 shelveunknown = shelvectx.extra().get(b'shelve_unknown')
1042 shelveunknown = shelvectx.extra().get(b'shelve_unknown')
1048 if not shelveunknown:
1043 if not shelveunknown:
1049 return
1044 return
1050 shelveunknown = frozenset(shelveunknown.split(b'\0'))
1045 shelveunknown = frozenset(shelveunknown.split(b'\0'))
1051 addedafter = frozenset(repo.status().added)
1046 addedafter = frozenset(repo.status().added)
1052 toforget = (addedafter & shelveunknown) - addedbefore
1047 toforget = (addedafter & shelveunknown) - addedbefore
1053 repo[None].forget(toforget)
1048 repo[None].forget(toforget)
1054
1049
1055
1050
1056 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
1051 def _finishunshelve(repo, oldtiprev, tr, activebookmark):
1057 _restoreactivebookmark(repo, activebookmark)
1052 _restoreactivebookmark(repo, activebookmark)
1058 # The transaction aborting will strip all the commits for us,
1053 # The transaction aborting will strip all the commits for us,
1059 # but it doesn't update the inmemory structures, so addchangegroup
1054 # but it doesn't update the inmemory structures, so addchangegroup
1060 # hooks still fire and try to operate on the missing commits.
1055 # hooks still fire and try to operate on the missing commits.
1061 # Clean up manually to prevent this.
1056 # Clean up manually to prevent this.
1062 repo.unfiltered().changelog.strip(oldtiprev, tr)
1057 repo.unfiltered().changelog.strip(oldtiprev, tr)
1063 _aborttransaction(repo, tr)
1058 _aborttransaction(repo, tr)
1064
1059
1065
1060
1066 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
1061 def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
1067 """Check potential problems which may result from working
1062 """Check potential problems which may result from working
1068 copy having untracked changes."""
1063 copy having untracked changes."""
1069 wcdeleted = set(repo.status().deleted)
1064 wcdeleted = set(repo.status().deleted)
1070 shelvetouched = set(shelvectx.files())
1065 shelvetouched = set(shelvectx.files())
1071 intersection = wcdeleted.intersection(shelvetouched)
1066 intersection = wcdeleted.intersection(shelvetouched)
1072 if intersection:
1067 if intersection:
1073 m = _(b"shelved change touches missing files")
1068 m = _(b"shelved change touches missing files")
1074 hint = _(b"run hg status to see which files are missing")
1069 hint = _(b"run hg status to see which files are missing")
1075 raise error.Abort(m, hint=hint)
1070 raise error.Abort(m, hint=hint)
1076
1071
1077
1072
1078 def unshelvecmd(ui, repo, *shelved, **opts):
1073 def unshelvecmd(ui, repo, *shelved, **opts):
1079 opts = pycompat.byteskwargs(opts)
1074 opts = pycompat.byteskwargs(opts)
1080 abortf = opts.get(b'abort')
1075 abortf = opts.get(b'abort')
1081 continuef = opts.get(b'continue')
1076 continuef = opts.get(b'continue')
1082 interactive = opts.get(b'interactive')
1077 interactive = opts.get(b'interactive')
1083 if not abortf and not continuef:
1078 if not abortf and not continuef:
1084 cmdutil.checkunfinished(repo)
1079 cmdutil.checkunfinished(repo)
1085 shelved = list(shelved)
1080 shelved = list(shelved)
1086 if opts.get(b"name"):
1081 if opts.get(b"name"):
1087 shelved.append(opts[b"name"])
1082 shelved.append(opts[b"name"])
1088
1083
1089 if interactive and opts.get(b'keep'):
1084 if interactive and opts.get(b'keep'):
1090 raise error.Abort(_(b'--keep on --interactive is not yet supported'))
1085 raise error.Abort(_(b'--keep on --interactive is not yet supported'))
1091 if abortf or continuef:
1086 if abortf or continuef:
1092 if abortf and continuef:
1087 if abortf and continuef:
1093 raise error.Abort(_(b'cannot use both abort and continue'))
1088 raise error.Abort(_(b'cannot use both abort and continue'))
1094 if shelved:
1089 if shelved:
1095 raise error.Abort(
1090 raise error.Abort(
1096 _(
1091 _(
1097 b'cannot combine abort/continue with '
1092 b'cannot combine abort/continue with '
1098 b'naming a shelved change'
1093 b'naming a shelved change'
1099 )
1094 )
1100 )
1095 )
1101 if abortf and opts.get(b'tool', False):
1096 if abortf and opts.get(b'tool', False):
1102 ui.warn(_(b'tool option will be ignored\n'))
1097 ui.warn(_(b'tool option will be ignored\n'))
1103
1098
1104 state = _loadshelvedstate(ui, repo, opts)
1099 state = _loadshelvedstate(ui, repo, opts)
1105 if abortf:
1100 if abortf:
1106 return unshelveabort(ui, repo, state)
1101 return unshelveabort(ui, repo, state)
1107 elif continuef and interactive:
1102 elif continuef and interactive:
1108 raise error.Abort(_(b'cannot use both continue and interactive'))
1103 raise error.Abort(_(b'cannot use both continue and interactive'))
1109 elif continuef:
1104 elif continuef:
1110 return unshelvecontinue(ui, repo, state, opts)
1105 return unshelvecontinue(ui, repo, state, opts)
1111 elif len(shelved) > 1:
1106 elif len(shelved) > 1:
1112 raise error.Abort(_(b'can only unshelve one change at a time'))
1107 raise error.Abort(_(b'can only unshelve one change at a time'))
1113 elif not shelved:
1108 elif not shelved:
1114 shelved = listshelves(repo)
1109 shelved = listshelves(repo)
1115 if not shelved:
1110 if not shelved:
1116 raise error.Abort(_(b'no shelved changes to apply!'))
1111 raise error.Abort(_(b'no shelved changes to apply!'))
1117 basename = util.split(shelved[0][1])[1]
1112 basename = util.split(shelved[0][1])[1]
1118 ui.status(_(b"unshelving change '%s'\n") % basename)
1113 ui.status(_(b"unshelving change '%s'\n") % basename)
1119 else:
1114 else:
1120 basename = shelved[0]
1115 basename = shelved[0]
1121
1116
1122 if not shelvedfile(repo, basename, patchextension).exists():
1117 if not shelvedfile(repo, basename, patchextension).exists():
1123 raise error.Abort(_(b"shelved change '%s' not found") % basename)
1118 raise error.Abort(_(b"shelved change '%s' not found") % basename)
1124
1119
1125 return _dounshelve(ui, repo, basename, opts)
1120 return _dounshelve(ui, repo, basename, opts)
1126
1121
1127
1122
1128 def _dounshelve(ui, repo, basename, opts):
1123 def _dounshelve(ui, repo, basename, opts):
1129 repo = repo.unfiltered()
1124 repo = repo.unfiltered()
1130 lock = tr = None
1125 lock = tr = None
1131 try:
1126 try:
1132 lock = repo.lock()
1127 lock = repo.lock()
1133 tr = repo.transaction(b'unshelve', report=lambda x: None)
1128 tr = repo.transaction(b'unshelve', report=lambda x: None)
1134 oldtiprev = len(repo)
1129 oldtiprev = len(repo)
1135
1130
1136 pctx = repo[b'.']
1131 pctx = repo[b'.']
1137 tmpwctx = pctx
1132 tmpwctx = pctx
1138 # The goal is to have a commit structure like so:
1133 # The goal is to have a commit structure like so:
1139 # ...-> pctx -> tmpwctx -> shelvectx
1134 # ...-> pctx -> tmpwctx -> shelvectx
1140 # where tmpwctx is an optional commit with the user's pending changes
1135 # where tmpwctx is an optional commit with the user's pending changes
1141 # and shelvectx is the unshelved changes. Then we merge it all down
1136 # and shelvectx is the unshelved changes. Then we merge it all down
1142 # to the original pctx.
1137 # to the original pctx.
1143
1138
1144 activebookmark = _backupactivebookmark(repo)
1139 activebookmark = _backupactivebookmark(repo)
1145 tmpwctx, addedbefore = _commitworkingcopychanges(
1140 tmpwctx, addedbefore = _commitworkingcopychanges(
1146 ui, repo, opts, tmpwctx
1141 ui, repo, opts, tmpwctx
1147 )
1142 )
1148 repo, shelvectx = _unshelverestorecommit(ui, repo, tr, basename)
1143 repo, shelvectx = _unshelverestorecommit(ui, repo, tr, basename)
1149 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
1144 _checkunshelveuntrackedproblems(ui, repo, shelvectx)
1150 branchtorestore = b''
1145 branchtorestore = b''
1151 if shelvectx.branch() != shelvectx.p1().branch():
1146 if shelvectx.branch() != shelvectx.p1().branch():
1152 branchtorestore = shelvectx.branch()
1147 branchtorestore = shelvectx.branch()
1153
1148
1154 shelvectx, ispartialunshelve = _rebaserestoredcommit(
1149 shelvectx, ispartialunshelve = _rebaserestoredcommit(
1155 ui,
1150 ui,
1156 repo,
1151 repo,
1157 opts,
1152 opts,
1158 tr,
1153 tr,
1159 oldtiprev,
1154 oldtiprev,
1160 basename,
1155 basename,
1161 pctx,
1156 pctx,
1162 tmpwctx,
1157 tmpwctx,
1163 shelvectx,
1158 shelvectx,
1164 branchtorestore,
1159 branchtorestore,
1165 activebookmark,
1160 activebookmark,
1166 )
1161 )
1167 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
1162 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
1168 with ui.configoverride(overrides, b'unshelve'):
1163 with ui.configoverride(overrides, b'unshelve'):
1169 mergefiles(ui, repo, pctx, shelvectx)
1164 mergefiles(ui, repo, pctx, shelvectx)
1170 restorebranch(ui, repo, branchtorestore)
1165 restorebranch(ui, repo, branchtorestore)
1171 shelvedstate.clear(repo)
1166 shelvedstate.clear(repo)
1172 _finishunshelve(repo, oldtiprev, tr, activebookmark)
1167 _finishunshelve(repo, oldtiprev, tr, activebookmark)
1173 _forgetunknownfiles(repo, shelvectx, addedbefore)
1168 _forgetunknownfiles(repo, shelvectx, addedbefore)
1174 if not ispartialunshelve:
1169 if not ispartialunshelve:
1175 unshelvecleanup(ui, repo, basename, opts)
1170 unshelvecleanup(ui, repo, basename, opts)
1176 finally:
1171 finally:
1177 if tr:
1172 if tr:
1178 tr.release()
1173 tr.release()
1179 lockmod.release(lock)
1174 lockmod.release(lock)
General Comments 0
You need to be logged in to leave comments. Login now