##// END OF EJS Templates
overlayworkingctx: default branch to base context's branch...
Martin von Zweigbergk -
r44502:2ecbc4ec default
parent child Browse files
Show More
@@ -1,2284 +1,2284 b''
1 # rebase.py - rebasing feature for mercurial
1 # rebase.py - rebasing feature for mercurial
2 #
2 #
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to move sets of revisions to a different ancestor
8 '''command to move sets of revisions to a different ancestor
9
9
10 This extension lets you rebase changesets in an existing Mercurial
10 This extension lets you rebase changesets in an existing Mercurial
11 repository.
11 repository.
12
12
13 For more information:
13 For more information:
14 https://mercurial-scm.org/wiki/RebaseExtension
14 https://mercurial-scm.org/wiki/RebaseExtension
15 '''
15 '''
16
16
17 from __future__ import absolute_import
17 from __future__ import absolute_import
18
18
19 import errno
19 import errno
20 import os
20 import os
21
21
22 from mercurial.i18n import _
22 from mercurial.i18n import _
23 from mercurial.node import (
23 from mercurial.node import (
24 nullrev,
24 nullrev,
25 short,
25 short,
26 )
26 )
27 from mercurial.pycompat import open
27 from mercurial.pycompat import open
28 from mercurial import (
28 from mercurial import (
29 bookmarks,
29 bookmarks,
30 cmdutil,
30 cmdutil,
31 commands,
31 commands,
32 copies,
32 copies,
33 destutil,
33 destutil,
34 dirstateguard,
34 dirstateguard,
35 error,
35 error,
36 extensions,
36 extensions,
37 hg,
37 hg,
38 merge as mergemod,
38 merge as mergemod,
39 mergeutil,
39 mergeutil,
40 obsolete,
40 obsolete,
41 obsutil,
41 obsutil,
42 patch,
42 patch,
43 phases,
43 phases,
44 pycompat,
44 pycompat,
45 registrar,
45 registrar,
46 repair,
46 repair,
47 revset,
47 revset,
48 revsetlang,
48 revsetlang,
49 rewriteutil,
49 rewriteutil,
50 scmutil,
50 scmutil,
51 smartset,
51 smartset,
52 state as statemod,
52 state as statemod,
53 util,
53 util,
54 )
54 )
55
55
56 # The following constants are used throughout the rebase module. The ordering of
56 # The following constants are used throughout the rebase module. The ordering of
57 # their values must be maintained.
57 # their values must be maintained.
58
58
59 # Indicates that a revision needs to be rebased
59 # Indicates that a revision needs to be rebased
60 revtodo = -1
60 revtodo = -1
61 revtodostr = b'-1'
61 revtodostr = b'-1'
62
62
63 # legacy revstates no longer needed in current code
63 # legacy revstates no longer needed in current code
64 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
64 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
65 legacystates = {b'-2', b'-3', b'-4', b'-5'}
65 legacystates = {b'-2', b'-3', b'-4', b'-5'}
66
66
67 cmdtable = {}
67 cmdtable = {}
68 command = registrar.command(cmdtable)
68 command = registrar.command(cmdtable)
69 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
69 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
70 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
70 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
71 # be specifying the version(s) of Mercurial they are tested with, or
71 # be specifying the version(s) of Mercurial they are tested with, or
72 # leave the attribute unspecified.
72 # leave the attribute unspecified.
73 testedwith = b'ships-with-hg-core'
73 testedwith = b'ships-with-hg-core'
74
74
75
75
76 def _nothingtorebase():
76 def _nothingtorebase():
77 return 1
77 return 1
78
78
79
79
80 def _savegraft(ctx, extra):
80 def _savegraft(ctx, extra):
81 s = ctx.extra().get(b'source', None)
81 s = ctx.extra().get(b'source', None)
82 if s is not None:
82 if s is not None:
83 extra[b'source'] = s
83 extra[b'source'] = s
84 s = ctx.extra().get(b'intermediate-source', None)
84 s = ctx.extra().get(b'intermediate-source', None)
85 if s is not None:
85 if s is not None:
86 extra[b'intermediate-source'] = s
86 extra[b'intermediate-source'] = s
87
87
88
88
89 def _savebranch(ctx, extra):
89 def _savebranch(ctx, extra):
90 extra[b'branch'] = ctx.branch()
90 extra[b'branch'] = ctx.branch()
91
91
92
92
93 def _destrebase(repo, sourceset, destspace=None):
93 def _destrebase(repo, sourceset, destspace=None):
94 """small wrapper around destmerge to pass the right extra args
94 """small wrapper around destmerge to pass the right extra args
95
95
96 Please wrap destutil.destmerge instead."""
96 Please wrap destutil.destmerge instead."""
97 return destutil.destmerge(
97 return destutil.destmerge(
98 repo,
98 repo,
99 action=b'rebase',
99 action=b'rebase',
100 sourceset=sourceset,
100 sourceset=sourceset,
101 onheadcheck=False,
101 onheadcheck=False,
102 destspace=destspace,
102 destspace=destspace,
103 )
103 )
104
104
105
105
106 revsetpredicate = registrar.revsetpredicate()
106 revsetpredicate = registrar.revsetpredicate()
107
107
108
108
109 @revsetpredicate(b'_destrebase')
109 @revsetpredicate(b'_destrebase')
110 def _revsetdestrebase(repo, subset, x):
110 def _revsetdestrebase(repo, subset, x):
111 # ``_rebasedefaultdest()``
111 # ``_rebasedefaultdest()``
112
112
113 # default destination for rebase.
113 # default destination for rebase.
114 # # XXX: Currently private because I expect the signature to change.
114 # # XXX: Currently private because I expect the signature to change.
115 # # XXX: - bailing out in case of ambiguity vs returning all data.
115 # # XXX: - bailing out in case of ambiguity vs returning all data.
116 # i18n: "_rebasedefaultdest" is a keyword
116 # i18n: "_rebasedefaultdest" is a keyword
117 sourceset = None
117 sourceset = None
118 if x is not None:
118 if x is not None:
119 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
119 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
120 return subset & smartset.baseset([_destrebase(repo, sourceset)])
120 return subset & smartset.baseset([_destrebase(repo, sourceset)])
121
121
122
122
123 @revsetpredicate(b'_destautoorphanrebase')
123 @revsetpredicate(b'_destautoorphanrebase')
124 def _revsetdestautoorphanrebase(repo, subset, x):
124 def _revsetdestautoorphanrebase(repo, subset, x):
125 # ``_destautoorphanrebase()``
125 # ``_destautoorphanrebase()``
126
126
127 # automatic rebase destination for a single orphan revision.
127 # automatic rebase destination for a single orphan revision.
128 unfi = repo.unfiltered()
128 unfi = repo.unfiltered()
129 obsoleted = unfi.revs(b'obsolete()')
129 obsoleted = unfi.revs(b'obsolete()')
130
130
131 src = revset.getset(repo, subset, x).first()
131 src = revset.getset(repo, subset, x).first()
132
132
133 # Empty src or already obsoleted - Do not return a destination
133 # Empty src or already obsoleted - Do not return a destination
134 if not src or src in obsoleted:
134 if not src or src in obsoleted:
135 return smartset.baseset()
135 return smartset.baseset()
136 dests = destutil.orphanpossibledestination(repo, src)
136 dests = destutil.orphanpossibledestination(repo, src)
137 if len(dests) > 1:
137 if len(dests) > 1:
138 raise error.Abort(
138 raise error.Abort(
139 _(b"ambiguous automatic rebase: %r could end up on any of %r")
139 _(b"ambiguous automatic rebase: %r could end up on any of %r")
140 % (src, dests)
140 % (src, dests)
141 )
141 )
142 # We have zero or one destination, so we can just return here.
142 # We have zero or one destination, so we can just return here.
143 return smartset.baseset(dests)
143 return smartset.baseset(dests)
144
144
145
145
146 def _ctxdesc(ctx):
146 def _ctxdesc(ctx):
147 """short description for a context"""
147 """short description for a context"""
148 desc = b'%d:%s "%s"' % (
148 desc = b'%d:%s "%s"' % (
149 ctx.rev(),
149 ctx.rev(),
150 ctx,
150 ctx,
151 ctx.description().split(b'\n', 1)[0],
151 ctx.description().split(b'\n', 1)[0],
152 )
152 )
153 repo = ctx.repo()
153 repo = ctx.repo()
154 names = []
154 names = []
155 for nsname, ns in pycompat.iteritems(repo.names):
155 for nsname, ns in pycompat.iteritems(repo.names):
156 if nsname == b'branches':
156 if nsname == b'branches':
157 continue
157 continue
158 names.extend(ns.names(repo, ctx.node()))
158 names.extend(ns.names(repo, ctx.node()))
159 if names:
159 if names:
160 desc += b' (%s)' % b' '.join(names)
160 desc += b' (%s)' % b' '.join(names)
161 return desc
161 return desc
162
162
163
163
164 class rebaseruntime(object):
164 class rebaseruntime(object):
165 """This class is a container for rebase runtime state"""
165 """This class is a container for rebase runtime state"""
166
166
167 def __init__(self, repo, ui, inmemory=False, opts=None):
167 def __init__(self, repo, ui, inmemory=False, opts=None):
168 if opts is None:
168 if opts is None:
169 opts = {}
169 opts = {}
170
170
171 # prepared: whether we have rebasestate prepared or not. Currently it
171 # prepared: whether we have rebasestate prepared or not. Currently it
172 # decides whether "self.repo" is unfiltered or not.
172 # decides whether "self.repo" is unfiltered or not.
173 # The rebasestate has explicit hash to hash instructions not depending
173 # The rebasestate has explicit hash to hash instructions not depending
174 # on visibility. If rebasestate exists (in-memory or on-disk), use
174 # on visibility. If rebasestate exists (in-memory or on-disk), use
175 # unfiltered repo to avoid visibility issues.
175 # unfiltered repo to avoid visibility issues.
176 # Before knowing rebasestate (i.e. when starting a new rebase (not
176 # Before knowing rebasestate (i.e. when starting a new rebase (not
177 # --continue or --abort)), the original repo should be used so
177 # --continue or --abort)), the original repo should be used so
178 # visibility-dependent revsets are correct.
178 # visibility-dependent revsets are correct.
179 self.prepared = False
179 self.prepared = False
180 self._repo = repo
180 self._repo = repo
181
181
182 self.ui = ui
182 self.ui = ui
183 self.opts = opts
183 self.opts = opts
184 self.originalwd = None
184 self.originalwd = None
185 self.external = nullrev
185 self.external = nullrev
186 # Mapping between the old revision id and either what is the new rebased
186 # Mapping between the old revision id and either what is the new rebased
187 # revision or what needs to be done with the old revision. The state
187 # revision or what needs to be done with the old revision. The state
188 # dict will be what contains most of the rebase progress state.
188 # dict will be what contains most of the rebase progress state.
189 self.state = {}
189 self.state = {}
190 self.activebookmark = None
190 self.activebookmark = None
191 self.destmap = {}
191 self.destmap = {}
192 self.skipped = set()
192 self.skipped = set()
193
193
194 self.collapsef = opts.get(b'collapse', False)
194 self.collapsef = opts.get(b'collapse', False)
195 self.collapsemsg = cmdutil.logmessage(ui, opts)
195 self.collapsemsg = cmdutil.logmessage(ui, opts)
196 self.date = opts.get(b'date', None)
196 self.date = opts.get(b'date', None)
197
197
198 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
198 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
199 self.extrafns = [_savegraft]
199 self.extrafns = [_savegraft]
200 if e:
200 if e:
201 self.extrafns = [e]
201 self.extrafns = [e]
202
202
203 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
203 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
204 self.keepf = opts.get(b'keep', False)
204 self.keepf = opts.get(b'keep', False)
205 self.keepbranchesf = opts.get(b'keepbranches', False)
205 self.keepbranchesf = opts.get(b'keepbranches', False)
206 self.obsoletenotrebased = {}
206 self.obsoletenotrebased = {}
207 self.obsoletewithoutsuccessorindestination = set()
207 self.obsoletewithoutsuccessorindestination = set()
208 self.inmemory = inmemory
208 self.inmemory = inmemory
209 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
209 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
210
210
211 @property
211 @property
212 def repo(self):
212 def repo(self):
213 if self.prepared:
213 if self.prepared:
214 return self._repo.unfiltered()
214 return self._repo.unfiltered()
215 else:
215 else:
216 return self._repo
216 return self._repo
217
217
218 def storestatus(self, tr=None):
218 def storestatus(self, tr=None):
219 """Store the current status to allow recovery"""
219 """Store the current status to allow recovery"""
220 if tr:
220 if tr:
221 tr.addfilegenerator(
221 tr.addfilegenerator(
222 b'rebasestate',
222 b'rebasestate',
223 (b'rebasestate',),
223 (b'rebasestate',),
224 self._writestatus,
224 self._writestatus,
225 location=b'plain',
225 location=b'plain',
226 )
226 )
227 else:
227 else:
228 with self.repo.vfs(b"rebasestate", b"w") as f:
228 with self.repo.vfs(b"rebasestate", b"w") as f:
229 self._writestatus(f)
229 self._writestatus(f)
230
230
231 def _writestatus(self, f):
231 def _writestatus(self, f):
232 repo = self.repo
232 repo = self.repo
233 assert repo.filtername is None
233 assert repo.filtername is None
234 f.write(repo[self.originalwd].hex() + b'\n')
234 f.write(repo[self.originalwd].hex() + b'\n')
235 # was "dest". we now write dest per src root below.
235 # was "dest". we now write dest per src root below.
236 f.write(b'\n')
236 f.write(b'\n')
237 f.write(repo[self.external].hex() + b'\n')
237 f.write(repo[self.external].hex() + b'\n')
238 f.write(b'%d\n' % int(self.collapsef))
238 f.write(b'%d\n' % int(self.collapsef))
239 f.write(b'%d\n' % int(self.keepf))
239 f.write(b'%d\n' % int(self.keepf))
240 f.write(b'%d\n' % int(self.keepbranchesf))
240 f.write(b'%d\n' % int(self.keepbranchesf))
241 f.write(b'%s\n' % (self.activebookmark or b''))
241 f.write(b'%s\n' % (self.activebookmark or b''))
242 destmap = self.destmap
242 destmap = self.destmap
243 for d, v in pycompat.iteritems(self.state):
243 for d, v in pycompat.iteritems(self.state):
244 oldrev = repo[d].hex()
244 oldrev = repo[d].hex()
245 if v >= 0:
245 if v >= 0:
246 newrev = repo[v].hex()
246 newrev = repo[v].hex()
247 else:
247 else:
248 newrev = b"%d" % v
248 newrev = b"%d" % v
249 destnode = repo[destmap[d]].hex()
249 destnode = repo[destmap[d]].hex()
250 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
250 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
251 repo.ui.debug(b'rebase status stored\n')
251 repo.ui.debug(b'rebase status stored\n')
252
252
253 def restorestatus(self):
253 def restorestatus(self):
254 """Restore a previously stored status"""
254 """Restore a previously stored status"""
255 if not self.stateobj.exists():
255 if not self.stateobj.exists():
256 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
256 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
257
257
258 data = self._read()
258 data = self._read()
259 self.repo.ui.debug(b'rebase status resumed\n')
259 self.repo.ui.debug(b'rebase status resumed\n')
260
260
261 self.originalwd = data[b'originalwd']
261 self.originalwd = data[b'originalwd']
262 self.destmap = data[b'destmap']
262 self.destmap = data[b'destmap']
263 self.state = data[b'state']
263 self.state = data[b'state']
264 self.skipped = data[b'skipped']
264 self.skipped = data[b'skipped']
265 self.collapsef = data[b'collapse']
265 self.collapsef = data[b'collapse']
266 self.keepf = data[b'keep']
266 self.keepf = data[b'keep']
267 self.keepbranchesf = data[b'keepbranches']
267 self.keepbranchesf = data[b'keepbranches']
268 self.external = data[b'external']
268 self.external = data[b'external']
269 self.activebookmark = data[b'activebookmark']
269 self.activebookmark = data[b'activebookmark']
270
270
271 def _read(self):
271 def _read(self):
272 self.prepared = True
272 self.prepared = True
273 repo = self.repo
273 repo = self.repo
274 assert repo.filtername is None
274 assert repo.filtername is None
275 data = {
275 data = {
276 b'keepbranches': None,
276 b'keepbranches': None,
277 b'collapse': None,
277 b'collapse': None,
278 b'activebookmark': None,
278 b'activebookmark': None,
279 b'external': nullrev,
279 b'external': nullrev,
280 b'keep': None,
280 b'keep': None,
281 b'originalwd': None,
281 b'originalwd': None,
282 }
282 }
283 legacydest = None
283 legacydest = None
284 state = {}
284 state = {}
285 destmap = {}
285 destmap = {}
286
286
287 if True:
287 if True:
288 f = repo.vfs(b"rebasestate")
288 f = repo.vfs(b"rebasestate")
289 for i, l in enumerate(f.read().splitlines()):
289 for i, l in enumerate(f.read().splitlines()):
290 if i == 0:
290 if i == 0:
291 data[b'originalwd'] = repo[l].rev()
291 data[b'originalwd'] = repo[l].rev()
292 elif i == 1:
292 elif i == 1:
293 # this line should be empty in newer version. but legacy
293 # this line should be empty in newer version. but legacy
294 # clients may still use it
294 # clients may still use it
295 if l:
295 if l:
296 legacydest = repo[l].rev()
296 legacydest = repo[l].rev()
297 elif i == 2:
297 elif i == 2:
298 data[b'external'] = repo[l].rev()
298 data[b'external'] = repo[l].rev()
299 elif i == 3:
299 elif i == 3:
300 data[b'collapse'] = bool(int(l))
300 data[b'collapse'] = bool(int(l))
301 elif i == 4:
301 elif i == 4:
302 data[b'keep'] = bool(int(l))
302 data[b'keep'] = bool(int(l))
303 elif i == 5:
303 elif i == 5:
304 data[b'keepbranches'] = bool(int(l))
304 data[b'keepbranches'] = bool(int(l))
305 elif i == 6 and not (len(l) == 81 and b':' in l):
305 elif i == 6 and not (len(l) == 81 and b':' in l):
306 # line 6 is a recent addition, so for backwards
306 # line 6 is a recent addition, so for backwards
307 # compatibility check that the line doesn't look like the
307 # compatibility check that the line doesn't look like the
308 # oldrev:newrev lines
308 # oldrev:newrev lines
309 data[b'activebookmark'] = l
309 data[b'activebookmark'] = l
310 else:
310 else:
311 args = l.split(b':')
311 args = l.split(b':')
312 oldrev = repo[args[0]].rev()
312 oldrev = repo[args[0]].rev()
313 newrev = args[1]
313 newrev = args[1]
314 if newrev in legacystates:
314 if newrev in legacystates:
315 continue
315 continue
316 if len(args) > 2:
316 if len(args) > 2:
317 destrev = repo[args[2]].rev()
317 destrev = repo[args[2]].rev()
318 else:
318 else:
319 destrev = legacydest
319 destrev = legacydest
320 destmap[oldrev] = destrev
320 destmap[oldrev] = destrev
321 if newrev == revtodostr:
321 if newrev == revtodostr:
322 state[oldrev] = revtodo
322 state[oldrev] = revtodo
323 # Legacy compat special case
323 # Legacy compat special case
324 else:
324 else:
325 state[oldrev] = repo[newrev].rev()
325 state[oldrev] = repo[newrev].rev()
326
326
327 if data[b'keepbranches'] is None:
327 if data[b'keepbranches'] is None:
328 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
328 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
329
329
330 data[b'destmap'] = destmap
330 data[b'destmap'] = destmap
331 data[b'state'] = state
331 data[b'state'] = state
332 skipped = set()
332 skipped = set()
333 # recompute the set of skipped revs
333 # recompute the set of skipped revs
334 if not data[b'collapse']:
334 if not data[b'collapse']:
335 seen = set(destmap.values())
335 seen = set(destmap.values())
336 for old, new in sorted(state.items()):
336 for old, new in sorted(state.items()):
337 if new != revtodo and new in seen:
337 if new != revtodo and new in seen:
338 skipped.add(old)
338 skipped.add(old)
339 seen.add(new)
339 seen.add(new)
340 data[b'skipped'] = skipped
340 data[b'skipped'] = skipped
341 repo.ui.debug(
341 repo.ui.debug(
342 b'computed skipped revs: %s\n'
342 b'computed skipped revs: %s\n'
343 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
343 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
344 )
344 )
345
345
346 return data
346 return data
347
347
348 def _handleskippingobsolete(self, obsoleterevs, destmap):
348 def _handleskippingobsolete(self, obsoleterevs, destmap):
349 """Compute structures necessary for skipping obsolete revisions
349 """Compute structures necessary for skipping obsolete revisions
350
350
351 obsoleterevs: iterable of all obsolete revisions in rebaseset
351 obsoleterevs: iterable of all obsolete revisions in rebaseset
352 destmap: {srcrev: destrev} destination revisions
352 destmap: {srcrev: destrev} destination revisions
353 """
353 """
354 self.obsoletenotrebased = {}
354 self.obsoletenotrebased = {}
355 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
355 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
356 return
356 return
357 obsoleteset = set(obsoleterevs)
357 obsoleteset = set(obsoleterevs)
358 (
358 (
359 self.obsoletenotrebased,
359 self.obsoletenotrebased,
360 self.obsoletewithoutsuccessorindestination,
360 self.obsoletewithoutsuccessorindestination,
361 obsoleteextinctsuccessors,
361 obsoleteextinctsuccessors,
362 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
362 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
363 skippedset = set(self.obsoletenotrebased)
363 skippedset = set(self.obsoletenotrebased)
364 skippedset.update(self.obsoletewithoutsuccessorindestination)
364 skippedset.update(self.obsoletewithoutsuccessorindestination)
365 skippedset.update(obsoleteextinctsuccessors)
365 skippedset.update(obsoleteextinctsuccessors)
366 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
366 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
367
367
368 def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False):
368 def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False):
369 try:
369 try:
370 self.restorestatus()
370 self.restorestatus()
371 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
371 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
372 except error.RepoLookupError:
372 except error.RepoLookupError:
373 if isabort:
373 if isabort:
374 clearstatus(self.repo)
374 clearstatus(self.repo)
375 clearcollapsemsg(self.repo)
375 clearcollapsemsg(self.repo)
376 self.repo.ui.warn(
376 self.repo.ui.warn(
377 _(
377 _(
378 b'rebase aborted (no revision is removed,'
378 b'rebase aborted (no revision is removed,'
379 b' only broken state is cleared)\n'
379 b' only broken state is cleared)\n'
380 )
380 )
381 )
381 )
382 return 0
382 return 0
383 else:
383 else:
384 msg = _(b'cannot continue inconsistent rebase')
384 msg = _(b'cannot continue inconsistent rebase')
385 hint = _(b'use "hg rebase --abort" to clear broken state')
385 hint = _(b'use "hg rebase --abort" to clear broken state')
386 raise error.Abort(msg, hint=hint)
386 raise error.Abort(msg, hint=hint)
387
387
388 if isabort:
388 if isabort:
389 backup = backup and self.backupf
389 backup = backup and self.backupf
390 return self._abort(backup=backup, suppwarns=suppwarns)
390 return self._abort(backup=backup, suppwarns=suppwarns)
391
391
392 def _preparenewrebase(self, destmap):
392 def _preparenewrebase(self, destmap):
393 if not destmap:
393 if not destmap:
394 return _nothingtorebase()
394 return _nothingtorebase()
395
395
396 rebaseset = destmap.keys()
396 rebaseset = destmap.keys()
397 if not self.keepf:
397 if not self.keepf:
398 try:
398 try:
399 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
399 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
400 except error.Abort as e:
400 except error.Abort as e:
401 if e.hint is None:
401 if e.hint is None:
402 e.hint = _(b'use --keep to keep original changesets')
402 e.hint = _(b'use --keep to keep original changesets')
403 raise e
403 raise e
404
404
405 result = buildstate(self.repo, destmap, self.collapsef)
405 result = buildstate(self.repo, destmap, self.collapsef)
406
406
407 if not result:
407 if not result:
408 # Empty state built, nothing to rebase
408 # Empty state built, nothing to rebase
409 self.ui.status(_(b'nothing to rebase\n'))
409 self.ui.status(_(b'nothing to rebase\n'))
410 return _nothingtorebase()
410 return _nothingtorebase()
411
411
412 (self.originalwd, self.destmap, self.state) = result
412 (self.originalwd, self.destmap, self.state) = result
413 if self.collapsef:
413 if self.collapsef:
414 dests = set(self.destmap.values())
414 dests = set(self.destmap.values())
415 if len(dests) != 1:
415 if len(dests) != 1:
416 raise error.Abort(
416 raise error.Abort(
417 _(b'--collapse does not work with multiple destinations')
417 _(b'--collapse does not work with multiple destinations')
418 )
418 )
419 destrev = next(iter(dests))
419 destrev = next(iter(dests))
420 destancestors = self.repo.changelog.ancestors(
420 destancestors = self.repo.changelog.ancestors(
421 [destrev], inclusive=True
421 [destrev], inclusive=True
422 )
422 )
423 self.external = externalparent(self.repo, self.state, destancestors)
423 self.external = externalparent(self.repo, self.state, destancestors)
424
424
425 for destrev in sorted(set(destmap.values())):
425 for destrev in sorted(set(destmap.values())):
426 dest = self.repo[destrev]
426 dest = self.repo[destrev]
427 if dest.closesbranch() and not self.keepbranchesf:
427 if dest.closesbranch() and not self.keepbranchesf:
428 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
428 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
429
429
430 self.prepared = True
430 self.prepared = True
431
431
432 def _assignworkingcopy(self):
432 def _assignworkingcopy(self):
433 if self.inmemory:
433 if self.inmemory:
434 from mercurial.context import overlayworkingctx
434 from mercurial.context import overlayworkingctx
435
435
436 self.wctx = overlayworkingctx(self.repo)
436 self.wctx = overlayworkingctx(self.repo)
437 self.repo.ui.debug(b"rebasing in-memory\n")
437 self.repo.ui.debug(b"rebasing in-memory\n")
438 else:
438 else:
439 self.wctx = self.repo[None]
439 self.wctx = self.repo[None]
440 self.repo.ui.debug(b"rebasing on disk\n")
440 self.repo.ui.debug(b"rebasing on disk\n")
441 self.repo.ui.log(
441 self.repo.ui.log(
442 b"rebase",
442 b"rebase",
443 b"using in-memory rebase: %r\n",
443 b"using in-memory rebase: %r\n",
444 self.inmemory,
444 self.inmemory,
445 rebase_imm_used=self.inmemory,
445 rebase_imm_used=self.inmemory,
446 )
446 )
447
447
448 def _performrebase(self, tr):
448 def _performrebase(self, tr):
449 self._assignworkingcopy()
449 self._assignworkingcopy()
450 repo, ui = self.repo, self.ui
450 repo, ui = self.repo, self.ui
451 if self.keepbranchesf:
451 if self.keepbranchesf:
452 # insert _savebranch at the start of extrafns so if
452 # insert _savebranch at the start of extrafns so if
453 # there's a user-provided extrafn it can clobber branch if
453 # there's a user-provided extrafn it can clobber branch if
454 # desired
454 # desired
455 self.extrafns.insert(0, _savebranch)
455 self.extrafns.insert(0, _savebranch)
456 if self.collapsef:
456 if self.collapsef:
457 branches = set()
457 branches = set()
458 for rev in self.state:
458 for rev in self.state:
459 branches.add(repo[rev].branch())
459 branches.add(repo[rev].branch())
460 if len(branches) > 1:
460 if len(branches) > 1:
461 raise error.Abort(
461 raise error.Abort(
462 _(b'cannot collapse multiple named branches')
462 _(b'cannot collapse multiple named branches')
463 )
463 )
464
464
465 # Calculate self.obsoletenotrebased
465 # Calculate self.obsoletenotrebased
466 obsrevs = _filterobsoleterevs(self.repo, self.state)
466 obsrevs = _filterobsoleterevs(self.repo, self.state)
467 self._handleskippingobsolete(obsrevs, self.destmap)
467 self._handleskippingobsolete(obsrevs, self.destmap)
468
468
469 # Keep track of the active bookmarks in order to reset them later
469 # Keep track of the active bookmarks in order to reset them later
470 self.activebookmark = self.activebookmark or repo._activebookmark
470 self.activebookmark = self.activebookmark or repo._activebookmark
471 if self.activebookmark:
471 if self.activebookmark:
472 bookmarks.deactivate(repo)
472 bookmarks.deactivate(repo)
473
473
474 # Store the state before we begin so users can run 'hg rebase --abort'
474 # Store the state before we begin so users can run 'hg rebase --abort'
475 # if we fail before the transaction closes.
475 # if we fail before the transaction closes.
476 self.storestatus()
476 self.storestatus()
477 if tr:
477 if tr:
478 # When using single transaction, store state when transaction
478 # When using single transaction, store state when transaction
479 # commits.
479 # commits.
480 self.storestatus(tr)
480 self.storestatus(tr)
481
481
482 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
482 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
483 p = repo.ui.makeprogress(
483 p = repo.ui.makeprogress(
484 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
484 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
485 )
485 )
486
486
487 def progress(ctx):
487 def progress(ctx):
488 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
488 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
489
489
490 allowdivergence = self.ui.configbool(
490 allowdivergence = self.ui.configbool(
491 b'experimental', b'evolution.allowdivergence'
491 b'experimental', b'evolution.allowdivergence'
492 )
492 )
493 for subset in sortsource(self.destmap):
493 for subset in sortsource(self.destmap):
494 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
494 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
495 if not allowdivergence:
495 if not allowdivergence:
496 sortedrevs -= self.repo.revs(
496 sortedrevs -= self.repo.revs(
497 b'descendants(%ld) and not %ld',
497 b'descendants(%ld) and not %ld',
498 self.obsoletewithoutsuccessorindestination,
498 self.obsoletewithoutsuccessorindestination,
499 self.obsoletewithoutsuccessorindestination,
499 self.obsoletewithoutsuccessorindestination,
500 )
500 )
501 for rev in sortedrevs:
501 for rev in sortedrevs:
502 self._rebasenode(tr, rev, allowdivergence, progress)
502 self._rebasenode(tr, rev, allowdivergence, progress)
503 p.complete()
503 p.complete()
504 ui.note(_(b'rebase merging completed\n'))
504 ui.note(_(b'rebase merging completed\n'))
505
505
506 def _concludenode(self, rev, p1, p2, editor, commitmsg=None):
506 def _concludenode(self, rev, p1, p2, editor, commitmsg=None):
507 '''Commit the wd changes with parents p1 and p2.
507 '''Commit the wd changes with parents p1 and p2.
508
508
509 Reuse commit info from rev but also store useful information in extra.
509 Reuse commit info from rev but also store useful information in extra.
510 Return node of committed revision.'''
510 Return node of committed revision.'''
511 repo = self.repo
511 repo = self.repo
512 ctx = repo[rev]
512 ctx = repo[rev]
513 if commitmsg is None:
513 if commitmsg is None:
514 commitmsg = ctx.description()
514 commitmsg = ctx.description()
515 date = self.date
515 date = self.date
516 if date is None:
516 if date is None:
517 date = ctx.date()
517 date = ctx.date()
518 extra = {b'rebase_source': ctx.hex()}
518 extra = {b'rebase_source': ctx.hex()}
519 for c in self.extrafns:
519 for c in self.extrafns:
520 c(ctx, extra)
520 c(ctx, extra)
521 keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch()
521 keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch()
522 destphase = max(ctx.phase(), phases.draft)
522 destphase = max(ctx.phase(), phases.draft)
523 overrides = {(b'phases', b'new-commit'): destphase}
523 overrides = {(b'phases', b'new-commit'): destphase}
524 if keepbranch:
524 if keepbranch:
525 overrides[(b'ui', b'allowemptycommit')] = True
525 overrides[(b'ui', b'allowemptycommit')] = True
526 with repo.ui.configoverride(overrides, b'rebase'):
526 with repo.ui.configoverride(overrides, b'rebase'):
527 if self.inmemory:
527 if self.inmemory:
528 newnode = commitmemorynode(
528 newnode = commitmemorynode(
529 repo,
529 repo,
530 p1,
530 p1,
531 p2,
531 p2,
532 wctx=self.wctx,
532 wctx=self.wctx,
533 extra=extra,
533 extra=extra,
534 commitmsg=commitmsg,
534 commitmsg=commitmsg,
535 editor=editor,
535 editor=editor,
536 user=ctx.user(),
536 user=ctx.user(),
537 date=date,
537 date=date,
538 )
538 )
539 mergemod.mergestate.clean(repo)
539 mergemod.mergestate.clean(repo)
540 else:
540 else:
541 newnode = commitnode(
541 newnode = commitnode(
542 repo,
542 repo,
543 p1,
543 p1,
544 p2,
544 p2,
545 extra=extra,
545 extra=extra,
546 commitmsg=commitmsg,
546 commitmsg=commitmsg,
547 editor=editor,
547 editor=editor,
548 user=ctx.user(),
548 user=ctx.user(),
549 date=date,
549 date=date,
550 )
550 )
551
551
552 if newnode is None:
552 if newnode is None:
553 # If it ended up being a no-op commit, then the normal
553 # If it ended up being a no-op commit, then the normal
554 # merge state clean-up path doesn't happen, so do it
554 # merge state clean-up path doesn't happen, so do it
555 # here. Fix issue5494
555 # here. Fix issue5494
556 mergemod.mergestate.clean(repo)
556 mergemod.mergestate.clean(repo)
557 return newnode
557 return newnode
558
558
559 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
559 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
560 repo, ui, opts = self.repo, self.ui, self.opts
560 repo, ui, opts = self.repo, self.ui, self.opts
561 dest = self.destmap[rev]
561 dest = self.destmap[rev]
562 ctx = repo[rev]
562 ctx = repo[rev]
563 desc = _ctxdesc(ctx)
563 desc = _ctxdesc(ctx)
564 if self.state[rev] == rev:
564 if self.state[rev] == rev:
565 ui.status(_(b'already rebased %s\n') % desc)
565 ui.status(_(b'already rebased %s\n') % desc)
566 elif (
566 elif (
567 not allowdivergence
567 not allowdivergence
568 and rev in self.obsoletewithoutsuccessorindestination
568 and rev in self.obsoletewithoutsuccessorindestination
569 ):
569 ):
570 msg = (
570 msg = (
571 _(
571 _(
572 b'note: not rebasing %s and its descendants as '
572 b'note: not rebasing %s and its descendants as '
573 b'this would cause divergence\n'
573 b'this would cause divergence\n'
574 )
574 )
575 % desc
575 % desc
576 )
576 )
577 repo.ui.status(msg)
577 repo.ui.status(msg)
578 self.skipped.add(rev)
578 self.skipped.add(rev)
579 elif rev in self.obsoletenotrebased:
579 elif rev in self.obsoletenotrebased:
580 succ = self.obsoletenotrebased[rev]
580 succ = self.obsoletenotrebased[rev]
581 if succ is None:
581 if succ is None:
582 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
582 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
583 else:
583 else:
584 succdesc = _ctxdesc(repo[succ])
584 succdesc = _ctxdesc(repo[succ])
585 msg = _(
585 msg = _(
586 b'note: not rebasing %s, already in destination as %s\n'
586 b'note: not rebasing %s, already in destination as %s\n'
587 ) % (desc, succdesc)
587 ) % (desc, succdesc)
588 repo.ui.status(msg)
588 repo.ui.status(msg)
589 # Make clearrebased aware state[rev] is not a true successor
589 # Make clearrebased aware state[rev] is not a true successor
590 self.skipped.add(rev)
590 self.skipped.add(rev)
591 # Record rev as moved to its desired destination in self.state.
591 # Record rev as moved to its desired destination in self.state.
592 # This helps bookmark and working parent movement.
592 # This helps bookmark and working parent movement.
593 dest = max(
593 dest = max(
594 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
594 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
595 )
595 )
596 self.state[rev] = dest
596 self.state[rev] = dest
597 elif self.state[rev] == revtodo:
597 elif self.state[rev] == revtodo:
598 ui.status(_(b'rebasing %s\n') % desc)
598 ui.status(_(b'rebasing %s\n') % desc)
599 progressfn(ctx)
599 progressfn(ctx)
600 p1, p2, base = defineparents(
600 p1, p2, base = defineparents(
601 repo,
601 repo,
602 rev,
602 rev,
603 self.destmap,
603 self.destmap,
604 self.state,
604 self.state,
605 self.skipped,
605 self.skipped,
606 self.obsoletenotrebased,
606 self.obsoletenotrebased,
607 )
607 )
608 if not self.inmemory and len(repo[None].parents()) == 2:
608 if not self.inmemory and len(repo[None].parents()) == 2:
609 repo.ui.debug(b'resuming interrupted rebase\n')
609 repo.ui.debug(b'resuming interrupted rebase\n')
610 else:
610 else:
611 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
611 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
612 with ui.configoverride(overrides, b'rebase'):
612 with ui.configoverride(overrides, b'rebase'):
613 stats = rebasenode(
613 stats = rebasenode(
614 repo,
614 repo,
615 rev,
615 rev,
616 p1,
616 p1,
617 base,
617 base,
618 self.collapsef,
618 self.collapsef,
619 dest,
619 dest,
620 wctx=self.wctx,
620 wctx=self.wctx,
621 )
621 )
622 if stats.unresolvedcount > 0:
622 if stats.unresolvedcount > 0:
623 if self.inmemory:
623 if self.inmemory:
624 raise error.InMemoryMergeConflictsError()
624 raise error.InMemoryMergeConflictsError()
625 else:
625 else:
626 raise error.InterventionRequired(
626 raise error.InterventionRequired(
627 _(
627 _(
628 b'unresolved conflicts (see hg '
628 b'unresolved conflicts (see hg '
629 b'resolve, then hg rebase --continue)'
629 b'resolve, then hg rebase --continue)'
630 )
630 )
631 )
631 )
632 if not self.collapsef:
632 if not self.collapsef:
633 merging = p2 != nullrev
633 merging = p2 != nullrev
634 editform = cmdutil.mergeeditform(merging, b'rebase')
634 editform = cmdutil.mergeeditform(merging, b'rebase')
635 editor = cmdutil.getcommiteditor(
635 editor = cmdutil.getcommiteditor(
636 editform=editform, **pycompat.strkwargs(opts)
636 editform=editform, **pycompat.strkwargs(opts)
637 )
637 )
638 newnode = self._concludenode(rev, p1, p2, editor)
638 newnode = self._concludenode(rev, p1, p2, editor)
639 else:
639 else:
640 # Skip commit if we are collapsing
640 # Skip commit if we are collapsing
641 if self.inmemory:
641 if self.inmemory:
642 self.wctx.setbase(repo[p1])
642 self.wctx.setbase(repo[p1])
643 else:
643 else:
644 repo.setparents(repo[p1].node())
644 repo.setparents(repo[p1].node())
645 newnode = None
645 newnode = None
646 # Update the state
646 # Update the state
647 if newnode is not None:
647 if newnode is not None:
648 self.state[rev] = repo[newnode].rev()
648 self.state[rev] = repo[newnode].rev()
649 ui.debug(b'rebased as %s\n' % short(newnode))
649 ui.debug(b'rebased as %s\n' % short(newnode))
650 else:
650 else:
651 if not self.collapsef:
651 if not self.collapsef:
652 ui.warn(
652 ui.warn(
653 _(
653 _(
654 b'note: not rebasing %s, its destination already '
654 b'note: not rebasing %s, its destination already '
655 b'has all its changes\n'
655 b'has all its changes\n'
656 )
656 )
657 % desc
657 % desc
658 )
658 )
659 self.skipped.add(rev)
659 self.skipped.add(rev)
660 self.state[rev] = p1
660 self.state[rev] = p1
661 ui.debug(b'next revision set to %d\n' % p1)
661 ui.debug(b'next revision set to %d\n' % p1)
662 else:
662 else:
663 ui.status(
663 ui.status(
664 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
664 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
665 )
665 )
666 if not tr:
666 if not tr:
667 # When not using single transaction, store state after each
667 # When not using single transaction, store state after each
668 # commit is completely done. On InterventionRequired, we thus
668 # commit is completely done. On InterventionRequired, we thus
669 # won't store the status. Instead, we'll hit the "len(parents) == 2"
669 # won't store the status. Instead, we'll hit the "len(parents) == 2"
670 # case and realize that the commit was in progress.
670 # case and realize that the commit was in progress.
671 self.storestatus()
671 self.storestatus()
672
672
673 def _finishrebase(self):
673 def _finishrebase(self):
674 repo, ui, opts = self.repo, self.ui, self.opts
674 repo, ui, opts = self.repo, self.ui, self.opts
675 fm = ui.formatter(b'rebase', opts)
675 fm = ui.formatter(b'rebase', opts)
676 fm.startitem()
676 fm.startitem()
677 if self.collapsef:
677 if self.collapsef:
678 p1, p2, _base = defineparents(
678 p1, p2, _base = defineparents(
679 repo,
679 repo,
680 min(self.state),
680 min(self.state),
681 self.destmap,
681 self.destmap,
682 self.state,
682 self.state,
683 self.skipped,
683 self.skipped,
684 self.obsoletenotrebased,
684 self.obsoletenotrebased,
685 )
685 )
686 editopt = opts.get(b'edit')
686 editopt = opts.get(b'edit')
687 editform = b'rebase.collapse'
687 editform = b'rebase.collapse'
688 if self.collapsemsg:
688 if self.collapsemsg:
689 commitmsg = self.collapsemsg
689 commitmsg = self.collapsemsg
690 else:
690 else:
691 commitmsg = b'Collapsed revision'
691 commitmsg = b'Collapsed revision'
692 for rebased in sorted(self.state):
692 for rebased in sorted(self.state):
693 if rebased not in self.skipped:
693 if rebased not in self.skipped:
694 commitmsg += b'\n* %s' % repo[rebased].description()
694 commitmsg += b'\n* %s' % repo[rebased].description()
695 editopt = True
695 editopt = True
696 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
696 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
697 revtoreuse = max(self.state)
697 revtoreuse = max(self.state)
698
698
699 newnode = self._concludenode(
699 newnode = self._concludenode(
700 revtoreuse, p1, self.external, editor, commitmsg=commitmsg
700 revtoreuse, p1, self.external, editor, commitmsg=commitmsg
701 )
701 )
702
702
703 if newnode is not None:
703 if newnode is not None:
704 newrev = repo[newnode].rev()
704 newrev = repo[newnode].rev()
705 for oldrev in self.state:
705 for oldrev in self.state:
706 self.state[oldrev] = newrev
706 self.state[oldrev] = newrev
707
707
708 if b'qtip' in repo.tags():
708 if b'qtip' in repo.tags():
709 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
709 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
710
710
711 # restore original working directory
711 # restore original working directory
712 # (we do this before stripping)
712 # (we do this before stripping)
713 newwd = self.state.get(self.originalwd, self.originalwd)
713 newwd = self.state.get(self.originalwd, self.originalwd)
714 if newwd < 0:
714 if newwd < 0:
715 # original directory is a parent of rebase set root or ignored
715 # original directory is a parent of rebase set root or ignored
716 newwd = self.originalwd
716 newwd = self.originalwd
717 if newwd not in [c.rev() for c in repo[None].parents()]:
717 if newwd not in [c.rev() for c in repo[None].parents()]:
718 ui.note(_(b"update back to initial working directory parent\n"))
718 ui.note(_(b"update back to initial working directory parent\n"))
719 hg.updaterepo(repo, newwd, overwrite=False)
719 hg.updaterepo(repo, newwd, overwrite=False)
720
720
721 collapsedas = None
721 collapsedas = None
722 if self.collapsef and not self.keepf:
722 if self.collapsef and not self.keepf:
723 collapsedas = newnode
723 collapsedas = newnode
724 clearrebased(
724 clearrebased(
725 ui,
725 ui,
726 repo,
726 repo,
727 self.destmap,
727 self.destmap,
728 self.state,
728 self.state,
729 self.skipped,
729 self.skipped,
730 collapsedas,
730 collapsedas,
731 self.keepf,
731 self.keepf,
732 fm=fm,
732 fm=fm,
733 backup=self.backupf,
733 backup=self.backupf,
734 )
734 )
735
735
736 clearstatus(repo)
736 clearstatus(repo)
737 clearcollapsemsg(repo)
737 clearcollapsemsg(repo)
738
738
739 ui.note(_(b"rebase completed\n"))
739 ui.note(_(b"rebase completed\n"))
740 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
740 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
741 if self.skipped:
741 if self.skipped:
742 skippedlen = len(self.skipped)
742 skippedlen = len(self.skipped)
743 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
743 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
744 fm.end()
744 fm.end()
745
745
746 if (
746 if (
747 self.activebookmark
747 self.activebookmark
748 and self.activebookmark in repo._bookmarks
748 and self.activebookmark in repo._bookmarks
749 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
749 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
750 ):
750 ):
751 bookmarks.activate(repo, self.activebookmark)
751 bookmarks.activate(repo, self.activebookmark)
752
752
753 def _abort(self, backup=True, suppwarns=False):
753 def _abort(self, backup=True, suppwarns=False):
754 '''Restore the repository to its original state.'''
754 '''Restore the repository to its original state.'''
755
755
756 repo = self.repo
756 repo = self.repo
757 try:
757 try:
758 # If the first commits in the rebased set get skipped during the
758 # If the first commits in the rebased set get skipped during the
759 # rebase, their values within the state mapping will be the dest
759 # rebase, their values within the state mapping will be the dest
760 # rev id. The rebased list must must not contain the dest rev
760 # rev id. The rebased list must must not contain the dest rev
761 # (issue4896)
761 # (issue4896)
762 rebased = [
762 rebased = [
763 s
763 s
764 for r, s in self.state.items()
764 for r, s in self.state.items()
765 if s >= 0 and s != r and s != self.destmap[r]
765 if s >= 0 and s != r and s != self.destmap[r]
766 ]
766 ]
767 immutable = [d for d in rebased if not repo[d].mutable()]
767 immutable = [d for d in rebased if not repo[d].mutable()]
768 cleanup = True
768 cleanup = True
769 if immutable:
769 if immutable:
770 repo.ui.warn(
770 repo.ui.warn(
771 _(b"warning: can't clean up public changesets %s\n")
771 _(b"warning: can't clean up public changesets %s\n")
772 % b', '.join(bytes(repo[r]) for r in immutable),
772 % b', '.join(bytes(repo[r]) for r in immutable),
773 hint=_(b"see 'hg help phases' for details"),
773 hint=_(b"see 'hg help phases' for details"),
774 )
774 )
775 cleanup = False
775 cleanup = False
776
776
777 descendants = set()
777 descendants = set()
778 if rebased:
778 if rebased:
779 descendants = set(repo.changelog.descendants(rebased))
779 descendants = set(repo.changelog.descendants(rebased))
780 if descendants - set(rebased):
780 if descendants - set(rebased):
781 repo.ui.warn(
781 repo.ui.warn(
782 _(
782 _(
783 b"warning: new changesets detected on "
783 b"warning: new changesets detected on "
784 b"destination branch, can't strip\n"
784 b"destination branch, can't strip\n"
785 )
785 )
786 )
786 )
787 cleanup = False
787 cleanup = False
788
788
789 if cleanup:
789 if cleanup:
790 if rebased:
790 if rebased:
791 strippoints = [
791 strippoints = [
792 c.node() for c in repo.set(b'roots(%ld)', rebased)
792 c.node() for c in repo.set(b'roots(%ld)', rebased)
793 ]
793 ]
794
794
795 updateifonnodes = set(rebased)
795 updateifonnodes = set(rebased)
796 updateifonnodes.update(self.destmap.values())
796 updateifonnodes.update(self.destmap.values())
797 updateifonnodes.add(self.originalwd)
797 updateifonnodes.add(self.originalwd)
798 shouldupdate = repo[b'.'].rev() in updateifonnodes
798 shouldupdate = repo[b'.'].rev() in updateifonnodes
799
799
800 # Update away from the rebase if necessary
800 # Update away from the rebase if necessary
801 if shouldupdate or needupdate(repo, self.state):
801 if shouldupdate or needupdate(repo, self.state):
802 mergemod.update(
802 mergemod.update(
803 repo, self.originalwd, branchmerge=False, force=True
803 repo, self.originalwd, branchmerge=False, force=True
804 )
804 )
805
805
806 # Strip from the first rebased revision
806 # Strip from the first rebased revision
807 if rebased:
807 if rebased:
808 repair.strip(repo.ui, repo, strippoints, backup=backup)
808 repair.strip(repo.ui, repo, strippoints, backup=backup)
809
809
810 if self.activebookmark and self.activebookmark in repo._bookmarks:
810 if self.activebookmark and self.activebookmark in repo._bookmarks:
811 bookmarks.activate(repo, self.activebookmark)
811 bookmarks.activate(repo, self.activebookmark)
812
812
813 finally:
813 finally:
814 clearstatus(repo)
814 clearstatus(repo)
815 clearcollapsemsg(repo)
815 clearcollapsemsg(repo)
816 if not suppwarns:
816 if not suppwarns:
817 repo.ui.warn(_(b'rebase aborted\n'))
817 repo.ui.warn(_(b'rebase aborted\n'))
818 return 0
818 return 0
819
819
820
820
821 @command(
821 @command(
822 b'rebase',
822 b'rebase',
823 [
823 [
824 (
824 (
825 b's',
825 b's',
826 b'source',
826 b'source',
827 b'',
827 b'',
828 _(b'rebase the specified changeset and descendants'),
828 _(b'rebase the specified changeset and descendants'),
829 _(b'REV'),
829 _(b'REV'),
830 ),
830 ),
831 (
831 (
832 b'b',
832 b'b',
833 b'base',
833 b'base',
834 b'',
834 b'',
835 _(b'rebase everything from branching point of specified changeset'),
835 _(b'rebase everything from branching point of specified changeset'),
836 _(b'REV'),
836 _(b'REV'),
837 ),
837 ),
838 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
838 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
839 (
839 (
840 b'd',
840 b'd',
841 b'dest',
841 b'dest',
842 b'',
842 b'',
843 _(b'rebase onto the specified changeset'),
843 _(b'rebase onto the specified changeset'),
844 _(b'REV'),
844 _(b'REV'),
845 ),
845 ),
846 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
846 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
847 (
847 (
848 b'm',
848 b'm',
849 b'message',
849 b'message',
850 b'',
850 b'',
851 _(b'use text as collapse commit message'),
851 _(b'use text as collapse commit message'),
852 _(b'TEXT'),
852 _(b'TEXT'),
853 ),
853 ),
854 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
854 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
855 (
855 (
856 b'l',
856 b'l',
857 b'logfile',
857 b'logfile',
858 b'',
858 b'',
859 _(b'read collapse commit message from file'),
859 _(b'read collapse commit message from file'),
860 _(b'FILE'),
860 _(b'FILE'),
861 ),
861 ),
862 (b'k', b'keep', False, _(b'keep original changesets')),
862 (b'k', b'keep', False, _(b'keep original changesets')),
863 (b'', b'keepbranches', False, _(b'keep original branch names')),
863 (b'', b'keepbranches', False, _(b'keep original branch names')),
864 (b'D', b'detach', False, _(b'(DEPRECATED)')),
864 (b'D', b'detach', False, _(b'(DEPRECATED)')),
865 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
865 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
866 (b't', b'tool', b'', _(b'specify merge tool')),
866 (b't', b'tool', b'', _(b'specify merge tool')),
867 (b'', b'stop', False, _(b'stop interrupted rebase')),
867 (b'', b'stop', False, _(b'stop interrupted rebase')),
868 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
868 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
869 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
869 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
870 (
870 (
871 b'',
871 b'',
872 b'auto-orphans',
872 b'auto-orphans',
873 b'',
873 b'',
874 _(
874 _(
875 b'automatically rebase orphan revisions '
875 b'automatically rebase orphan revisions '
876 b'in the specified revset (EXPERIMENTAL)'
876 b'in the specified revset (EXPERIMENTAL)'
877 ),
877 ),
878 ),
878 ),
879 ]
879 ]
880 + cmdutil.dryrunopts
880 + cmdutil.dryrunopts
881 + cmdutil.formatteropts
881 + cmdutil.formatteropts
882 + cmdutil.confirmopts,
882 + cmdutil.confirmopts,
883 _(b'[-s REV | -b REV] [-d REV] [OPTION]'),
883 _(b'[-s REV | -b REV] [-d REV] [OPTION]'),
884 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
884 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
885 )
885 )
886 def rebase(ui, repo, **opts):
886 def rebase(ui, repo, **opts):
887 """move changeset (and descendants) to a different branch
887 """move changeset (and descendants) to a different branch
888
888
889 Rebase uses repeated merging to graft changesets from one part of
889 Rebase uses repeated merging to graft changesets from one part of
890 history (the source) onto another (the destination). This can be
890 history (the source) onto another (the destination). This can be
891 useful for linearizing *local* changes relative to a master
891 useful for linearizing *local* changes relative to a master
892 development tree.
892 development tree.
893
893
894 Published commits cannot be rebased (see :hg:`help phases`).
894 Published commits cannot be rebased (see :hg:`help phases`).
895 To copy commits, see :hg:`help graft`.
895 To copy commits, see :hg:`help graft`.
896
896
897 If you don't specify a destination changeset (``-d/--dest``), rebase
897 If you don't specify a destination changeset (``-d/--dest``), rebase
898 will use the same logic as :hg:`merge` to pick a destination. if
898 will use the same logic as :hg:`merge` to pick a destination. if
899 the current branch contains exactly one other head, the other head
899 the current branch contains exactly one other head, the other head
900 is merged with by default. Otherwise, an explicit revision with
900 is merged with by default. Otherwise, an explicit revision with
901 which to merge with must be provided. (destination changeset is not
901 which to merge with must be provided. (destination changeset is not
902 modified by rebasing, but new changesets are added as its
902 modified by rebasing, but new changesets are added as its
903 descendants.)
903 descendants.)
904
904
905 Here are the ways to select changesets:
905 Here are the ways to select changesets:
906
906
907 1. Explicitly select them using ``--rev``.
907 1. Explicitly select them using ``--rev``.
908
908
909 2. Use ``--source`` to select a root changeset and include all of its
909 2. Use ``--source`` to select a root changeset and include all of its
910 descendants.
910 descendants.
911
911
912 3. Use ``--base`` to select a changeset; rebase will find ancestors
912 3. Use ``--base`` to select a changeset; rebase will find ancestors
913 and their descendants which are not also ancestors of the destination.
913 and their descendants which are not also ancestors of the destination.
914
914
915 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
915 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
916 rebase will use ``--base .`` as above.
916 rebase will use ``--base .`` as above.
917
917
918 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
918 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
919 can be used in ``--dest``. Destination would be calculated per source
919 can be used in ``--dest``. Destination would be calculated per source
920 revision with ``SRC`` substituted by that single source revision and
920 revision with ``SRC`` substituted by that single source revision and
921 ``ALLSRC`` substituted by all source revisions.
921 ``ALLSRC`` substituted by all source revisions.
922
922
923 Rebase will destroy original changesets unless you use ``--keep``.
923 Rebase will destroy original changesets unless you use ``--keep``.
924 It will also move your bookmarks (even if you do).
924 It will also move your bookmarks (even if you do).
925
925
926 Some changesets may be dropped if they do not contribute changes
926 Some changesets may be dropped if they do not contribute changes
927 (e.g. merges from the destination branch).
927 (e.g. merges from the destination branch).
928
928
929 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
929 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
930 a named branch with two heads. You will need to explicitly specify source
930 a named branch with two heads. You will need to explicitly specify source
931 and/or destination.
931 and/or destination.
932
932
933 If you need to use a tool to automate merge/conflict decisions, you
933 If you need to use a tool to automate merge/conflict decisions, you
934 can specify one with ``--tool``, see :hg:`help merge-tools`.
934 can specify one with ``--tool``, see :hg:`help merge-tools`.
935 As a caveat: the tool will not be used to mediate when a file was
935 As a caveat: the tool will not be used to mediate when a file was
936 deleted, there is no hook presently available for this.
936 deleted, there is no hook presently available for this.
937
937
938 If a rebase is interrupted to manually resolve a conflict, it can be
938 If a rebase is interrupted to manually resolve a conflict, it can be
939 continued with --continue/-c, aborted with --abort/-a, or stopped with
939 continued with --continue/-c, aborted with --abort/-a, or stopped with
940 --stop.
940 --stop.
941
941
942 .. container:: verbose
942 .. container:: verbose
943
943
944 Examples:
944 Examples:
945
945
946 - move "local changes" (current commit back to branching point)
946 - move "local changes" (current commit back to branching point)
947 to the current branch tip after a pull::
947 to the current branch tip after a pull::
948
948
949 hg rebase
949 hg rebase
950
950
951 - move a single changeset to the stable branch::
951 - move a single changeset to the stable branch::
952
952
953 hg rebase -r 5f493448 -d stable
953 hg rebase -r 5f493448 -d stable
954
954
955 - splice a commit and all its descendants onto another part of history::
955 - splice a commit and all its descendants onto another part of history::
956
956
957 hg rebase --source c0c3 --dest 4cf9
957 hg rebase --source c0c3 --dest 4cf9
958
958
959 - rebase everything on a branch marked by a bookmark onto the
959 - rebase everything on a branch marked by a bookmark onto the
960 default branch::
960 default branch::
961
961
962 hg rebase --base myfeature --dest default
962 hg rebase --base myfeature --dest default
963
963
964 - collapse a sequence of changes into a single commit::
964 - collapse a sequence of changes into a single commit::
965
965
966 hg rebase --collapse -r 1520:1525 -d .
966 hg rebase --collapse -r 1520:1525 -d .
967
967
968 - move a named branch while preserving its name::
968 - move a named branch while preserving its name::
969
969
970 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
970 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
971
971
972 - stabilize orphaned changesets so history looks linear::
972 - stabilize orphaned changesets so history looks linear::
973
973
974 hg rebase -r 'orphan()-obsolete()'\
974 hg rebase -r 'orphan()-obsolete()'\
975 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
975 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
976 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
976 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
977
977
978 Configuration Options:
978 Configuration Options:
979
979
980 You can make rebase require a destination if you set the following config
980 You can make rebase require a destination if you set the following config
981 option::
981 option::
982
982
983 [commands]
983 [commands]
984 rebase.requiredest = True
984 rebase.requiredest = True
985
985
986 By default, rebase will close the transaction after each commit. For
986 By default, rebase will close the transaction after each commit. For
987 performance purposes, you can configure rebase to use a single transaction
987 performance purposes, you can configure rebase to use a single transaction
988 across the entire rebase. WARNING: This setting introduces a significant
988 across the entire rebase. WARNING: This setting introduces a significant
989 risk of losing the work you've done in a rebase if the rebase aborts
989 risk of losing the work you've done in a rebase if the rebase aborts
990 unexpectedly::
990 unexpectedly::
991
991
992 [rebase]
992 [rebase]
993 singletransaction = True
993 singletransaction = True
994
994
995 By default, rebase writes to the working copy, but you can configure it to
995 By default, rebase writes to the working copy, but you can configure it to
996 run in-memory for better performance. When the rebase is not moving the
996 run in-memory for better performance. When the rebase is not moving the
997 parent(s) of the working copy (AKA the "currently checked out changesets"),
997 parent(s) of the working copy (AKA the "currently checked out changesets"),
998 this may also allow it to run even if the working copy is dirty::
998 this may also allow it to run even if the working copy is dirty::
999
999
1000 [rebase]
1000 [rebase]
1001 experimental.inmemory = True
1001 experimental.inmemory = True
1002
1002
1003 Return Values:
1003 Return Values:
1004
1004
1005 Returns 0 on success, 1 if nothing to rebase or there are
1005 Returns 0 on success, 1 if nothing to rebase or there are
1006 unresolved conflicts.
1006 unresolved conflicts.
1007
1007
1008 """
1008 """
1009 opts = pycompat.byteskwargs(opts)
1009 opts = pycompat.byteskwargs(opts)
1010 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1010 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1011 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1011 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1012 if action:
1012 if action:
1013 cmdutil.check_incompatible_arguments(
1013 cmdutil.check_incompatible_arguments(
1014 opts, action, b'confirm', b'dry_run'
1014 opts, action, b'confirm', b'dry_run'
1015 )
1015 )
1016 cmdutil.check_incompatible_arguments(
1016 cmdutil.check_incompatible_arguments(
1017 opts, action, b'rev', b'source', b'base', b'dest'
1017 opts, action, b'rev', b'source', b'base', b'dest'
1018 )
1018 )
1019 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1019 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1020 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1020 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1021
1021
1022 if action or repo.currenttransaction() is not None:
1022 if action or repo.currenttransaction() is not None:
1023 # in-memory rebase is not compatible with resuming rebases.
1023 # in-memory rebase is not compatible with resuming rebases.
1024 # (Or if it is run within a transaction, since the restart logic can
1024 # (Or if it is run within a transaction, since the restart logic can
1025 # fail the entire transaction.)
1025 # fail the entire transaction.)
1026 inmemory = False
1026 inmemory = False
1027
1027
1028 if opts.get(b'auto_orphans'):
1028 if opts.get(b'auto_orphans'):
1029 disallowed_opts = set(opts) - {b'auto_orphans'}
1029 disallowed_opts = set(opts) - {b'auto_orphans'}
1030 cmdutil.check_incompatible_arguments(
1030 cmdutil.check_incompatible_arguments(
1031 opts, b'auto_orphans', *disallowed_opts
1031 opts, b'auto_orphans', *disallowed_opts
1032 )
1032 )
1033
1033
1034 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1034 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1035 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1035 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1036 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1036 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1037
1037
1038 if opts.get(b'dry_run') or opts.get(b'confirm'):
1038 if opts.get(b'dry_run') or opts.get(b'confirm'):
1039 return _dryrunrebase(ui, repo, action, opts)
1039 return _dryrunrebase(ui, repo, action, opts)
1040 elif action == b'stop':
1040 elif action == b'stop':
1041 rbsrt = rebaseruntime(repo, ui)
1041 rbsrt = rebaseruntime(repo, ui)
1042 with repo.wlock(), repo.lock():
1042 with repo.wlock(), repo.lock():
1043 rbsrt.restorestatus()
1043 rbsrt.restorestatus()
1044 if rbsrt.collapsef:
1044 if rbsrt.collapsef:
1045 raise error.Abort(_(b"cannot stop in --collapse session"))
1045 raise error.Abort(_(b"cannot stop in --collapse session"))
1046 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1046 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1047 if not (rbsrt.keepf or allowunstable):
1047 if not (rbsrt.keepf or allowunstable):
1048 raise error.Abort(
1048 raise error.Abort(
1049 _(
1049 _(
1050 b"cannot remove original changesets with"
1050 b"cannot remove original changesets with"
1051 b" unrebased descendants"
1051 b" unrebased descendants"
1052 ),
1052 ),
1053 hint=_(
1053 hint=_(
1054 b'either enable obsmarkers to allow unstable '
1054 b'either enable obsmarkers to allow unstable '
1055 b'revisions or use --keep to keep original '
1055 b'revisions or use --keep to keep original '
1056 b'changesets'
1056 b'changesets'
1057 ),
1057 ),
1058 )
1058 )
1059 if needupdate(repo, rbsrt.state):
1059 if needupdate(repo, rbsrt.state):
1060 # update to the current working revision
1060 # update to the current working revision
1061 # to clear interrupted merge
1061 # to clear interrupted merge
1062 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1062 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1063 rbsrt._finishrebase()
1063 rbsrt._finishrebase()
1064 return 0
1064 return 0
1065 elif inmemory:
1065 elif inmemory:
1066 try:
1066 try:
1067 # in-memory merge doesn't support conflicts, so if we hit any, abort
1067 # in-memory merge doesn't support conflicts, so if we hit any, abort
1068 # and re-run as an on-disk merge.
1068 # and re-run as an on-disk merge.
1069 overrides = {(b'rebase', b'singletransaction'): True}
1069 overrides = {(b'rebase', b'singletransaction'): True}
1070 with ui.configoverride(overrides, b'rebase'):
1070 with ui.configoverride(overrides, b'rebase'):
1071 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1071 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1072 except error.InMemoryMergeConflictsError:
1072 except error.InMemoryMergeConflictsError:
1073 ui.warn(
1073 ui.warn(
1074 _(
1074 _(
1075 b'hit merge conflicts; re-running rebase without in-memory'
1075 b'hit merge conflicts; re-running rebase without in-memory'
1076 b' merge\n'
1076 b' merge\n'
1077 )
1077 )
1078 )
1078 )
1079 # TODO: Make in-memory merge not use the on-disk merge state, so
1079 # TODO: Make in-memory merge not use the on-disk merge state, so
1080 # we don't have to clean it here
1080 # we don't have to clean it here
1081 mergemod.mergestate.clean(repo)
1081 mergemod.mergestate.clean(repo)
1082 clearstatus(repo)
1082 clearstatus(repo)
1083 clearcollapsemsg(repo)
1083 clearcollapsemsg(repo)
1084 return _dorebase(ui, repo, action, opts, inmemory=False)
1084 return _dorebase(ui, repo, action, opts, inmemory=False)
1085 else:
1085 else:
1086 return _dorebase(ui, repo, action, opts)
1086 return _dorebase(ui, repo, action, opts)
1087
1087
1088
1088
1089 def _dryrunrebase(ui, repo, action, opts):
1089 def _dryrunrebase(ui, repo, action, opts):
1090 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
1090 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
1091 confirm = opts.get(b'confirm')
1091 confirm = opts.get(b'confirm')
1092 if confirm:
1092 if confirm:
1093 ui.status(_(b'starting in-memory rebase\n'))
1093 ui.status(_(b'starting in-memory rebase\n'))
1094 else:
1094 else:
1095 ui.status(
1095 ui.status(
1096 _(b'starting dry-run rebase; repository will not be changed\n')
1096 _(b'starting dry-run rebase; repository will not be changed\n')
1097 )
1097 )
1098 with repo.wlock(), repo.lock():
1098 with repo.wlock(), repo.lock():
1099 needsabort = True
1099 needsabort = True
1100 try:
1100 try:
1101 overrides = {(b'rebase', b'singletransaction'): True}
1101 overrides = {(b'rebase', b'singletransaction'): True}
1102 with ui.configoverride(overrides, b'rebase'):
1102 with ui.configoverride(overrides, b'rebase'):
1103 _origrebase(
1103 _origrebase(
1104 ui,
1104 ui,
1105 repo,
1105 repo,
1106 action,
1106 action,
1107 opts,
1107 opts,
1108 rbsrt,
1108 rbsrt,
1109 inmemory=True,
1109 inmemory=True,
1110 leaveunfinished=True,
1110 leaveunfinished=True,
1111 )
1111 )
1112 except error.InMemoryMergeConflictsError:
1112 except error.InMemoryMergeConflictsError:
1113 ui.status(_(b'hit a merge conflict\n'))
1113 ui.status(_(b'hit a merge conflict\n'))
1114 return 1
1114 return 1
1115 except error.Abort:
1115 except error.Abort:
1116 needsabort = False
1116 needsabort = False
1117 raise
1117 raise
1118 else:
1118 else:
1119 if confirm:
1119 if confirm:
1120 ui.status(_(b'rebase completed successfully\n'))
1120 ui.status(_(b'rebase completed successfully\n'))
1121 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1121 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1122 # finish unfinished rebase
1122 # finish unfinished rebase
1123 rbsrt._finishrebase()
1123 rbsrt._finishrebase()
1124 else:
1124 else:
1125 rbsrt._prepareabortorcontinue(
1125 rbsrt._prepareabortorcontinue(
1126 isabort=True, backup=False, suppwarns=True
1126 isabort=True, backup=False, suppwarns=True
1127 )
1127 )
1128 needsabort = False
1128 needsabort = False
1129 else:
1129 else:
1130 ui.status(
1130 ui.status(
1131 _(
1131 _(
1132 b'dry-run rebase completed successfully; run without'
1132 b'dry-run rebase completed successfully; run without'
1133 b' -n/--dry-run to perform this rebase\n'
1133 b' -n/--dry-run to perform this rebase\n'
1134 )
1134 )
1135 )
1135 )
1136 return 0
1136 return 0
1137 finally:
1137 finally:
1138 if needsabort:
1138 if needsabort:
1139 # no need to store backup in case of dryrun
1139 # no need to store backup in case of dryrun
1140 rbsrt._prepareabortorcontinue(
1140 rbsrt._prepareabortorcontinue(
1141 isabort=True, backup=False, suppwarns=True
1141 isabort=True, backup=False, suppwarns=True
1142 )
1142 )
1143
1143
1144
1144
1145 def _dorebase(ui, repo, action, opts, inmemory=False):
1145 def _dorebase(ui, repo, action, opts, inmemory=False):
1146 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
1146 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
1147 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
1147 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
1148
1148
1149
1149
1150 def _origrebase(
1150 def _origrebase(
1151 ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
1151 ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
1152 ):
1152 ):
1153 assert action != b'stop'
1153 assert action != b'stop'
1154 with repo.wlock(), repo.lock():
1154 with repo.wlock(), repo.lock():
1155 if opts.get(b'interactive'):
1155 if opts.get(b'interactive'):
1156 try:
1156 try:
1157 if extensions.find(b'histedit'):
1157 if extensions.find(b'histedit'):
1158 enablehistedit = b''
1158 enablehistedit = b''
1159 except KeyError:
1159 except KeyError:
1160 enablehistedit = b" --config extensions.histedit="
1160 enablehistedit = b" --config extensions.histedit="
1161 help = b"hg%s help -e histedit" % enablehistedit
1161 help = b"hg%s help -e histedit" % enablehistedit
1162 msg = (
1162 msg = (
1163 _(
1163 _(
1164 b"interactive history editing is supported by the "
1164 b"interactive history editing is supported by the "
1165 b"'histedit' extension (see \"%s\")"
1165 b"'histedit' extension (see \"%s\")"
1166 )
1166 )
1167 % help
1167 % help
1168 )
1168 )
1169 raise error.Abort(msg)
1169 raise error.Abort(msg)
1170
1170
1171 if rbsrt.collapsemsg and not rbsrt.collapsef:
1171 if rbsrt.collapsemsg and not rbsrt.collapsef:
1172 raise error.Abort(_(b'message can only be specified with collapse'))
1172 raise error.Abort(_(b'message can only be specified with collapse'))
1173
1173
1174 if action:
1174 if action:
1175 if rbsrt.collapsef:
1175 if rbsrt.collapsef:
1176 raise error.Abort(
1176 raise error.Abort(
1177 _(b'cannot use collapse with continue or abort')
1177 _(b'cannot use collapse with continue or abort')
1178 )
1178 )
1179 if action == b'abort' and opts.get(b'tool', False):
1179 if action == b'abort' and opts.get(b'tool', False):
1180 ui.warn(_(b'tool option will be ignored\n'))
1180 ui.warn(_(b'tool option will be ignored\n'))
1181 if action == b'continue':
1181 if action == b'continue':
1182 ms = mergemod.mergestate.read(repo)
1182 ms = mergemod.mergestate.read(repo)
1183 mergeutil.checkunresolved(ms)
1183 mergeutil.checkunresolved(ms)
1184
1184
1185 retcode = rbsrt._prepareabortorcontinue(
1185 retcode = rbsrt._prepareabortorcontinue(
1186 isabort=(action == b'abort')
1186 isabort=(action == b'abort')
1187 )
1187 )
1188 if retcode is not None:
1188 if retcode is not None:
1189 return retcode
1189 return retcode
1190 else:
1190 else:
1191 # search default destination in this space
1191 # search default destination in this space
1192 # used in the 'hg pull --rebase' case, see issue 5214.
1192 # used in the 'hg pull --rebase' case, see issue 5214.
1193 destspace = opts.get(b'_destspace')
1193 destspace = opts.get(b'_destspace')
1194 destmap = _definedestmap(
1194 destmap = _definedestmap(
1195 ui,
1195 ui,
1196 repo,
1196 repo,
1197 inmemory,
1197 inmemory,
1198 opts.get(b'dest', None),
1198 opts.get(b'dest', None),
1199 opts.get(b'source', None),
1199 opts.get(b'source', None),
1200 opts.get(b'base', None),
1200 opts.get(b'base', None),
1201 opts.get(b'rev', []),
1201 opts.get(b'rev', []),
1202 destspace=destspace,
1202 destspace=destspace,
1203 )
1203 )
1204 retcode = rbsrt._preparenewrebase(destmap)
1204 retcode = rbsrt._preparenewrebase(destmap)
1205 if retcode is not None:
1205 if retcode is not None:
1206 return retcode
1206 return retcode
1207 storecollapsemsg(repo, rbsrt.collapsemsg)
1207 storecollapsemsg(repo, rbsrt.collapsemsg)
1208
1208
1209 tr = None
1209 tr = None
1210
1210
1211 singletr = ui.configbool(b'rebase', b'singletransaction')
1211 singletr = ui.configbool(b'rebase', b'singletransaction')
1212 if singletr:
1212 if singletr:
1213 tr = repo.transaction(b'rebase')
1213 tr = repo.transaction(b'rebase')
1214
1214
1215 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1215 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1216 # one transaction here. Otherwise, transactions are obtained when
1216 # one transaction here. Otherwise, transactions are obtained when
1217 # committing each node, which is slower but allows partial success.
1217 # committing each node, which is slower but allows partial success.
1218 with util.acceptintervention(tr):
1218 with util.acceptintervention(tr):
1219 # Same logic for the dirstate guard, except we don't create one when
1219 # Same logic for the dirstate guard, except we don't create one when
1220 # rebasing in-memory (it's not needed).
1220 # rebasing in-memory (it's not needed).
1221 dsguard = None
1221 dsguard = None
1222 if singletr and not inmemory:
1222 if singletr and not inmemory:
1223 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1223 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1224 with util.acceptintervention(dsguard):
1224 with util.acceptintervention(dsguard):
1225 rbsrt._performrebase(tr)
1225 rbsrt._performrebase(tr)
1226 if not leaveunfinished:
1226 if not leaveunfinished:
1227 rbsrt._finishrebase()
1227 rbsrt._finishrebase()
1228
1228
1229
1229
1230 def _definedestmap(
1230 def _definedestmap(
1231 ui,
1231 ui,
1232 repo,
1232 repo,
1233 inmemory,
1233 inmemory,
1234 destf=None,
1234 destf=None,
1235 srcf=None,
1235 srcf=None,
1236 basef=None,
1236 basef=None,
1237 revf=None,
1237 revf=None,
1238 destspace=None,
1238 destspace=None,
1239 ):
1239 ):
1240 """use revisions argument to define destmap {srcrev: destrev}"""
1240 """use revisions argument to define destmap {srcrev: destrev}"""
1241 if revf is None:
1241 if revf is None:
1242 revf = []
1242 revf = []
1243
1243
1244 # destspace is here to work around issues with `hg pull --rebase` see
1244 # destspace is here to work around issues with `hg pull --rebase` see
1245 # issue5214 for details
1245 # issue5214 for details
1246
1246
1247 cmdutil.checkunfinished(repo)
1247 cmdutil.checkunfinished(repo)
1248 if not inmemory:
1248 if not inmemory:
1249 cmdutil.bailifchanged(repo)
1249 cmdutil.bailifchanged(repo)
1250
1250
1251 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1251 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1252 raise error.Abort(
1252 raise error.Abort(
1253 _(b'you must specify a destination'),
1253 _(b'you must specify a destination'),
1254 hint=_(b'use: hg rebase -d REV'),
1254 hint=_(b'use: hg rebase -d REV'),
1255 )
1255 )
1256
1256
1257 dest = None
1257 dest = None
1258
1258
1259 if revf:
1259 if revf:
1260 rebaseset = scmutil.revrange(repo, revf)
1260 rebaseset = scmutil.revrange(repo, revf)
1261 if not rebaseset:
1261 if not rebaseset:
1262 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1262 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1263 return None
1263 return None
1264 elif srcf:
1264 elif srcf:
1265 src = scmutil.revrange(repo, [srcf])
1265 src = scmutil.revrange(repo, [srcf])
1266 if not src:
1266 if not src:
1267 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1267 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1268 return None
1268 return None
1269 rebaseset = repo.revs(b'(%ld)::', src)
1269 rebaseset = repo.revs(b'(%ld)::', src)
1270 assert rebaseset
1270 assert rebaseset
1271 else:
1271 else:
1272 base = scmutil.revrange(repo, [basef or b'.'])
1272 base = scmutil.revrange(repo, [basef or b'.'])
1273 if not base:
1273 if not base:
1274 ui.status(
1274 ui.status(
1275 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1275 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1276 )
1276 )
1277 return None
1277 return None
1278 if destf:
1278 if destf:
1279 # --base does not support multiple destinations
1279 # --base does not support multiple destinations
1280 dest = scmutil.revsingle(repo, destf)
1280 dest = scmutil.revsingle(repo, destf)
1281 else:
1281 else:
1282 dest = repo[_destrebase(repo, base, destspace=destspace)]
1282 dest = repo[_destrebase(repo, base, destspace=destspace)]
1283 destf = bytes(dest)
1283 destf = bytes(dest)
1284
1284
1285 roots = [] # selected children of branching points
1285 roots = [] # selected children of branching points
1286 bpbase = {} # {branchingpoint: [origbase]}
1286 bpbase = {} # {branchingpoint: [origbase]}
1287 for b in base: # group bases by branching points
1287 for b in base: # group bases by branching points
1288 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1288 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1289 bpbase[bp] = bpbase.get(bp, []) + [b]
1289 bpbase[bp] = bpbase.get(bp, []) + [b]
1290 if None in bpbase:
1290 if None in bpbase:
1291 # emulate the old behavior, showing "nothing to rebase" (a better
1291 # emulate the old behavior, showing "nothing to rebase" (a better
1292 # behavior may be abort with "cannot find branching point" error)
1292 # behavior may be abort with "cannot find branching point" error)
1293 bpbase.clear()
1293 bpbase.clear()
1294 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1294 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1295 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1295 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1296
1296
1297 rebaseset = repo.revs(b'%ld::', roots)
1297 rebaseset = repo.revs(b'%ld::', roots)
1298
1298
1299 if not rebaseset:
1299 if not rebaseset:
1300 # transform to list because smartsets are not comparable to
1300 # transform to list because smartsets are not comparable to
1301 # lists. This should be improved to honor laziness of
1301 # lists. This should be improved to honor laziness of
1302 # smartset.
1302 # smartset.
1303 if list(base) == [dest.rev()]:
1303 if list(base) == [dest.rev()]:
1304 if basef:
1304 if basef:
1305 ui.status(
1305 ui.status(
1306 _(
1306 _(
1307 b'nothing to rebase - %s is both "base"'
1307 b'nothing to rebase - %s is both "base"'
1308 b' and destination\n'
1308 b' and destination\n'
1309 )
1309 )
1310 % dest
1310 % dest
1311 )
1311 )
1312 else:
1312 else:
1313 ui.status(
1313 ui.status(
1314 _(
1314 _(
1315 b'nothing to rebase - working directory '
1315 b'nothing to rebase - working directory '
1316 b'parent is also destination\n'
1316 b'parent is also destination\n'
1317 )
1317 )
1318 )
1318 )
1319 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1319 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1320 if basef:
1320 if basef:
1321 ui.status(
1321 ui.status(
1322 _(
1322 _(
1323 b'nothing to rebase - "base" %s is '
1323 b'nothing to rebase - "base" %s is '
1324 b'already an ancestor of destination '
1324 b'already an ancestor of destination '
1325 b'%s\n'
1325 b'%s\n'
1326 )
1326 )
1327 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1327 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1328 )
1328 )
1329 else:
1329 else:
1330 ui.status(
1330 ui.status(
1331 _(
1331 _(
1332 b'nothing to rebase - working '
1332 b'nothing to rebase - working '
1333 b'directory parent is already an '
1333 b'directory parent is already an '
1334 b'ancestor of destination %s\n'
1334 b'ancestor of destination %s\n'
1335 )
1335 )
1336 % dest
1336 % dest
1337 )
1337 )
1338 else: # can it happen?
1338 else: # can it happen?
1339 ui.status(
1339 ui.status(
1340 _(b'nothing to rebase from %s to %s\n')
1340 _(b'nothing to rebase from %s to %s\n')
1341 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1341 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1342 )
1342 )
1343 return None
1343 return None
1344
1344
1345 rebasingwcp = repo[b'.'].rev() in rebaseset
1345 rebasingwcp = repo[b'.'].rev() in rebaseset
1346 ui.log(
1346 ui.log(
1347 b"rebase",
1347 b"rebase",
1348 b"rebasing working copy parent: %r\n",
1348 b"rebasing working copy parent: %r\n",
1349 rebasingwcp,
1349 rebasingwcp,
1350 rebase_rebasing_wcp=rebasingwcp,
1350 rebase_rebasing_wcp=rebasingwcp,
1351 )
1351 )
1352 if inmemory and rebasingwcp:
1352 if inmemory and rebasingwcp:
1353 # Check these since we did not before.
1353 # Check these since we did not before.
1354 cmdutil.checkunfinished(repo)
1354 cmdutil.checkunfinished(repo)
1355 cmdutil.bailifchanged(repo)
1355 cmdutil.bailifchanged(repo)
1356
1356
1357 if not destf:
1357 if not destf:
1358 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1358 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1359 destf = bytes(dest)
1359 destf = bytes(dest)
1360
1360
1361 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1361 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1362 alias = {b'ALLSRC': allsrc}
1362 alias = {b'ALLSRC': allsrc}
1363
1363
1364 if dest is None:
1364 if dest is None:
1365 try:
1365 try:
1366 # fast path: try to resolve dest without SRC alias
1366 # fast path: try to resolve dest without SRC alias
1367 dest = scmutil.revsingle(repo, destf, localalias=alias)
1367 dest = scmutil.revsingle(repo, destf, localalias=alias)
1368 except error.RepoLookupError:
1368 except error.RepoLookupError:
1369 # multi-dest path: resolve dest for each SRC separately
1369 # multi-dest path: resolve dest for each SRC separately
1370 destmap = {}
1370 destmap = {}
1371 for r in rebaseset:
1371 for r in rebaseset:
1372 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1372 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1373 # use repo.anyrevs instead of scmutil.revsingle because we
1373 # use repo.anyrevs instead of scmutil.revsingle because we
1374 # don't want to abort if destset is empty.
1374 # don't want to abort if destset is empty.
1375 destset = repo.anyrevs([destf], user=True, localalias=alias)
1375 destset = repo.anyrevs([destf], user=True, localalias=alias)
1376 size = len(destset)
1376 size = len(destset)
1377 if size == 1:
1377 if size == 1:
1378 destmap[r] = destset.first()
1378 destmap[r] = destset.first()
1379 elif size == 0:
1379 elif size == 0:
1380 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1380 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1381 else:
1381 else:
1382 raise error.Abort(
1382 raise error.Abort(
1383 _(b'rebase destination for %s is not unique') % repo[r]
1383 _(b'rebase destination for %s is not unique') % repo[r]
1384 )
1384 )
1385
1385
1386 if dest is not None:
1386 if dest is not None:
1387 # single-dest case: assign dest to each rev in rebaseset
1387 # single-dest case: assign dest to each rev in rebaseset
1388 destrev = dest.rev()
1388 destrev = dest.rev()
1389 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1389 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1390
1390
1391 if not destmap:
1391 if not destmap:
1392 ui.status(_(b'nothing to rebase - empty destination\n'))
1392 ui.status(_(b'nothing to rebase - empty destination\n'))
1393 return None
1393 return None
1394
1394
1395 return destmap
1395 return destmap
1396
1396
1397
1397
1398 def externalparent(repo, state, destancestors):
1398 def externalparent(repo, state, destancestors):
1399 """Return the revision that should be used as the second parent
1399 """Return the revision that should be used as the second parent
1400 when the revisions in state is collapsed on top of destancestors.
1400 when the revisions in state is collapsed on top of destancestors.
1401 Abort if there is more than one parent.
1401 Abort if there is more than one parent.
1402 """
1402 """
1403 parents = set()
1403 parents = set()
1404 source = min(state)
1404 source = min(state)
1405 for rev in state:
1405 for rev in state:
1406 if rev == source:
1406 if rev == source:
1407 continue
1407 continue
1408 for p in repo[rev].parents():
1408 for p in repo[rev].parents():
1409 if p.rev() not in state and p.rev() not in destancestors:
1409 if p.rev() not in state and p.rev() not in destancestors:
1410 parents.add(p.rev())
1410 parents.add(p.rev())
1411 if not parents:
1411 if not parents:
1412 return nullrev
1412 return nullrev
1413 if len(parents) == 1:
1413 if len(parents) == 1:
1414 return parents.pop()
1414 return parents.pop()
1415 raise error.Abort(
1415 raise error.Abort(
1416 _(
1416 _(
1417 b'unable to collapse on top of %d, there is more '
1417 b'unable to collapse on top of %d, there is more '
1418 b'than one external parent: %s'
1418 b'than one external parent: %s'
1419 )
1419 )
1420 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1420 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1421 )
1421 )
1422
1422
1423
1423
1424 def commitmemorynode(repo, p1, p2, wctx, editor, extra, user, date, commitmsg):
1424 def commitmemorynode(repo, p1, p2, wctx, editor, extra, user, date, commitmsg):
1425 '''Commit the memory changes with parents p1 and p2.
1425 '''Commit the memory changes with parents p1 and p2.
1426 Return node of committed revision.'''
1426 Return node of committed revision.'''
1427 # Replicates the empty check in ``repo.commit``.
1427 # Replicates the empty check in ``repo.commit``.
1428 if wctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1428 if wctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1429 return None
1429 return None
1430
1430
1431 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1431 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1432 # ``branch`` (used when passing ``--keepbranches``).
1432 # ``branch`` (used when passing ``--keepbranches``).
1433 branch = repo[p1].branch()
1433 branch = None
1434 if b'branch' in extra:
1434 if b'branch' in extra:
1435 branch = extra[b'branch']
1435 branch = extra[b'branch']
1436
1436
1437 memctx = wctx.tomemctx(
1437 memctx = wctx.tomemctx(
1438 commitmsg,
1438 commitmsg,
1439 parents=(p1, p2),
1439 parents=(p1, p2),
1440 date=date,
1440 date=date,
1441 extra=extra,
1441 extra=extra,
1442 user=user,
1442 user=user,
1443 branch=branch,
1443 branch=branch,
1444 editor=editor,
1444 editor=editor,
1445 )
1445 )
1446 commitres = repo.commitctx(memctx)
1446 commitres = repo.commitctx(memctx)
1447 wctx.clean() # Might be reused
1447 wctx.clean() # Might be reused
1448 return commitres
1448 return commitres
1449
1449
1450
1450
1451 def commitnode(repo, p1, p2, editor, extra, user, date, commitmsg):
1451 def commitnode(repo, p1, p2, editor, extra, user, date, commitmsg):
1452 '''Commit the wd changes with parents p1 and p2.
1452 '''Commit the wd changes with parents p1 and p2.
1453 Return node of committed revision.'''
1453 Return node of committed revision.'''
1454 dsguard = util.nullcontextmanager()
1454 dsguard = util.nullcontextmanager()
1455 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1455 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1456 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1456 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1457 with dsguard:
1457 with dsguard:
1458 repo.setparents(repo[p1].node(), repo[p2].node())
1458 repo.setparents(repo[p1].node(), repo[p2].node())
1459
1459
1460 # Commit might fail if unresolved files exist
1460 # Commit might fail if unresolved files exist
1461 newnode = repo.commit(
1461 newnode = repo.commit(
1462 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1462 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1463 )
1463 )
1464
1464
1465 repo.dirstate.setbranch(repo[newnode].branch())
1465 repo.dirstate.setbranch(repo[newnode].branch())
1466 return newnode
1466 return newnode
1467
1467
1468
1468
1469 def rebasenode(repo, rev, p1, base, collapse, dest, wctx):
1469 def rebasenode(repo, rev, p1, base, collapse, dest, wctx):
1470 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1470 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1471 # Merge phase
1471 # Merge phase
1472 # Update to destination and merge it with local
1472 # Update to destination and merge it with local
1473 if wctx.isinmemory():
1473 if wctx.isinmemory():
1474 wctx.setbase(repo[p1])
1474 wctx.setbase(repo[p1])
1475 else:
1475 else:
1476 if repo[b'.'].rev() != p1:
1476 if repo[b'.'].rev() != p1:
1477 repo.ui.debug(b" update to %d:%s\n" % (p1, repo[p1]))
1477 repo.ui.debug(b" update to %d:%s\n" % (p1, repo[p1]))
1478 mergemod.update(repo, p1, branchmerge=False, force=True)
1478 mergemod.update(repo, p1, branchmerge=False, force=True)
1479 else:
1479 else:
1480 repo.ui.debug(b" already in destination\n")
1480 repo.ui.debug(b" already in destination\n")
1481 # This is, alas, necessary to invalidate workingctx's manifest cache,
1481 # This is, alas, necessary to invalidate workingctx's manifest cache,
1482 # as well as other data we litter on it in other places.
1482 # as well as other data we litter on it in other places.
1483 wctx = repo[None]
1483 wctx = repo[None]
1484 repo.dirstate.write(repo.currenttransaction())
1484 repo.dirstate.write(repo.currenttransaction())
1485 repo.ui.debug(b" merge against %d:%s\n" % (rev, repo[rev]))
1485 repo.ui.debug(b" merge against %d:%s\n" % (rev, repo[rev]))
1486 if base is not None:
1486 if base is not None:
1487 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1487 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1488 # When collapsing in-place, the parent is the common ancestor, we
1488 # When collapsing in-place, the parent is the common ancestor, we
1489 # have to allow merging with it.
1489 # have to allow merging with it.
1490 stats = mergemod.update(
1490 stats = mergemod.update(
1491 repo,
1491 repo,
1492 rev,
1492 rev,
1493 branchmerge=True,
1493 branchmerge=True,
1494 force=True,
1494 force=True,
1495 ancestor=base,
1495 ancestor=base,
1496 mergeancestor=collapse,
1496 mergeancestor=collapse,
1497 labels=[b'dest', b'source'],
1497 labels=[b'dest', b'source'],
1498 wc=wctx,
1498 wc=wctx,
1499 )
1499 )
1500 if collapse:
1500 if collapse:
1501 copies.duplicatecopies(repo, wctx, rev, dest)
1501 copies.duplicatecopies(repo, wctx, rev, dest)
1502 else:
1502 else:
1503 # If we're not using --collapse, we need to
1503 # If we're not using --collapse, we need to
1504 # duplicate copies between the revision we're
1504 # duplicate copies between the revision we're
1505 # rebasing and its first parent, but *not*
1505 # rebasing and its first parent, but *not*
1506 # duplicate any copies that have already been
1506 # duplicate any copies that have already been
1507 # performed in the destination.
1507 # performed in the destination.
1508 p1rev = repo[rev].p1().rev()
1508 p1rev = repo[rev].p1().rev()
1509 copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest)
1509 copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest)
1510 return stats
1510 return stats
1511
1511
1512
1512
1513 def adjustdest(repo, rev, destmap, state, skipped):
1513 def adjustdest(repo, rev, destmap, state, skipped):
1514 r"""adjust rebase destination given the current rebase state
1514 r"""adjust rebase destination given the current rebase state
1515
1515
1516 rev is what is being rebased. Return a list of two revs, which are the
1516 rev is what is being rebased. Return a list of two revs, which are the
1517 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1517 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1518 nullrev, return dest without adjustment for it.
1518 nullrev, return dest without adjustment for it.
1519
1519
1520 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1520 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1521 to B1, and E's destination will be adjusted from F to B1.
1521 to B1, and E's destination will be adjusted from F to B1.
1522
1522
1523 B1 <- written during rebasing B
1523 B1 <- written during rebasing B
1524 |
1524 |
1525 F <- original destination of B, E
1525 F <- original destination of B, E
1526 |
1526 |
1527 | E <- rev, which is being rebased
1527 | E <- rev, which is being rebased
1528 | |
1528 | |
1529 | D <- prev, one parent of rev being checked
1529 | D <- prev, one parent of rev being checked
1530 | |
1530 | |
1531 | x <- skipped, ex. no successor or successor in (::dest)
1531 | x <- skipped, ex. no successor or successor in (::dest)
1532 | |
1532 | |
1533 | C <- rebased as C', different destination
1533 | C <- rebased as C', different destination
1534 | |
1534 | |
1535 | B <- rebased as B1 C'
1535 | B <- rebased as B1 C'
1536 |/ |
1536 |/ |
1537 A G <- destination of C, different
1537 A G <- destination of C, different
1538
1538
1539 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1539 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1540 first move C to C1, G to G1, and when it's checking H, the adjusted
1540 first move C to C1, G to G1, and when it's checking H, the adjusted
1541 destinations will be [C1, G1].
1541 destinations will be [C1, G1].
1542
1542
1543 H C1 G1
1543 H C1 G1
1544 /| | /
1544 /| | /
1545 F G |/
1545 F G |/
1546 K | | -> K
1546 K | | -> K
1547 | C D |
1547 | C D |
1548 | |/ |
1548 | |/ |
1549 | B | ...
1549 | B | ...
1550 |/ |/
1550 |/ |/
1551 A A
1551 A A
1552
1552
1553 Besides, adjust dest according to existing rebase information. For example,
1553 Besides, adjust dest according to existing rebase information. For example,
1554
1554
1555 B C D B needs to be rebased on top of C, C needs to be rebased on top
1555 B C D B needs to be rebased on top of C, C needs to be rebased on top
1556 \|/ of D. We will rebase C first.
1556 \|/ of D. We will rebase C first.
1557 A
1557 A
1558
1558
1559 C' After rebasing C, when considering B's destination, use C'
1559 C' After rebasing C, when considering B's destination, use C'
1560 | instead of the original C.
1560 | instead of the original C.
1561 B D
1561 B D
1562 \ /
1562 \ /
1563 A
1563 A
1564 """
1564 """
1565 # pick already rebased revs with same dest from state as interesting source
1565 # pick already rebased revs with same dest from state as interesting source
1566 dest = destmap[rev]
1566 dest = destmap[rev]
1567 source = [
1567 source = [
1568 s
1568 s
1569 for s, d in state.items()
1569 for s, d in state.items()
1570 if d > 0 and destmap[s] == dest and s not in skipped
1570 if d > 0 and destmap[s] == dest and s not in skipped
1571 ]
1571 ]
1572
1572
1573 result = []
1573 result = []
1574 for prev in repo.changelog.parentrevs(rev):
1574 for prev in repo.changelog.parentrevs(rev):
1575 adjusted = dest
1575 adjusted = dest
1576 if prev != nullrev:
1576 if prev != nullrev:
1577 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1577 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1578 if candidate is not None:
1578 if candidate is not None:
1579 adjusted = state[candidate]
1579 adjusted = state[candidate]
1580 if adjusted == dest and dest in state:
1580 if adjusted == dest and dest in state:
1581 adjusted = state[dest]
1581 adjusted = state[dest]
1582 if adjusted == revtodo:
1582 if adjusted == revtodo:
1583 # sortsource should produce an order that makes this impossible
1583 # sortsource should produce an order that makes this impossible
1584 raise error.ProgrammingError(
1584 raise error.ProgrammingError(
1585 b'rev %d should be rebased already at this time' % dest
1585 b'rev %d should be rebased already at this time' % dest
1586 )
1586 )
1587 result.append(adjusted)
1587 result.append(adjusted)
1588 return result
1588 return result
1589
1589
1590
1590
1591 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1591 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1592 """
1592 """
1593 Abort if rebase will create divergence or rebase is noop because of markers
1593 Abort if rebase will create divergence or rebase is noop because of markers
1594
1594
1595 `rebaseobsrevs`: set of obsolete revision in source
1595 `rebaseobsrevs`: set of obsolete revision in source
1596 `rebaseobsskipped`: set of revisions from source skipped because they have
1596 `rebaseobsskipped`: set of revisions from source skipped because they have
1597 successors in destination or no non-obsolete successor.
1597 successors in destination or no non-obsolete successor.
1598 """
1598 """
1599 # Obsolete node with successors not in dest leads to divergence
1599 # Obsolete node with successors not in dest leads to divergence
1600 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1600 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1601 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1601 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1602
1602
1603 if divergencebasecandidates and not divergenceok:
1603 if divergencebasecandidates and not divergenceok:
1604 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1604 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1605 msg = _(b"this rebase will cause divergences from: %s")
1605 msg = _(b"this rebase will cause divergences from: %s")
1606 h = _(
1606 h = _(
1607 b"to force the rebase please set "
1607 b"to force the rebase please set "
1608 b"experimental.evolution.allowdivergence=True"
1608 b"experimental.evolution.allowdivergence=True"
1609 )
1609 )
1610 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1610 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1611
1611
1612
1612
1613 def successorrevs(unfi, rev):
1613 def successorrevs(unfi, rev):
1614 """yield revision numbers for successors of rev"""
1614 """yield revision numbers for successors of rev"""
1615 assert unfi.filtername is None
1615 assert unfi.filtername is None
1616 get_rev = unfi.changelog.index.get_rev
1616 get_rev = unfi.changelog.index.get_rev
1617 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1617 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1618 r = get_rev(s)
1618 r = get_rev(s)
1619 if r is not None:
1619 if r is not None:
1620 yield r
1620 yield r
1621
1621
1622
1622
1623 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1623 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1624 """Return new parents and optionally a merge base for rev being rebased
1624 """Return new parents and optionally a merge base for rev being rebased
1625
1625
1626 The destination specified by "dest" cannot always be used directly because
1626 The destination specified by "dest" cannot always be used directly because
1627 previously rebase result could affect destination. For example,
1627 previously rebase result could affect destination. For example,
1628
1628
1629 D E rebase -r C+D+E -d B
1629 D E rebase -r C+D+E -d B
1630 |/ C will be rebased to C'
1630 |/ C will be rebased to C'
1631 B C D's new destination will be C' instead of B
1631 B C D's new destination will be C' instead of B
1632 |/ E's new destination will be C' instead of B
1632 |/ E's new destination will be C' instead of B
1633 A
1633 A
1634
1634
1635 The new parents of a merge is slightly more complicated. See the comment
1635 The new parents of a merge is slightly more complicated. See the comment
1636 block below.
1636 block below.
1637 """
1637 """
1638 # use unfiltered changelog since successorrevs may return filtered nodes
1638 # use unfiltered changelog since successorrevs may return filtered nodes
1639 assert repo.filtername is None
1639 assert repo.filtername is None
1640 cl = repo.changelog
1640 cl = repo.changelog
1641 isancestor = cl.isancestorrev
1641 isancestor = cl.isancestorrev
1642
1642
1643 dest = destmap[rev]
1643 dest = destmap[rev]
1644 oldps = repo.changelog.parentrevs(rev) # old parents
1644 oldps = repo.changelog.parentrevs(rev) # old parents
1645 newps = [nullrev, nullrev] # new parents
1645 newps = [nullrev, nullrev] # new parents
1646 dests = adjustdest(repo, rev, destmap, state, skipped)
1646 dests = adjustdest(repo, rev, destmap, state, skipped)
1647 bases = list(oldps) # merge base candidates, initially just old parents
1647 bases = list(oldps) # merge base candidates, initially just old parents
1648
1648
1649 if all(r == nullrev for r in oldps[1:]):
1649 if all(r == nullrev for r in oldps[1:]):
1650 # For non-merge changeset, just move p to adjusted dest as requested.
1650 # For non-merge changeset, just move p to adjusted dest as requested.
1651 newps[0] = dests[0]
1651 newps[0] = dests[0]
1652 else:
1652 else:
1653 # For merge changeset, if we move p to dests[i] unconditionally, both
1653 # For merge changeset, if we move p to dests[i] unconditionally, both
1654 # parents may change and the end result looks like "the merge loses a
1654 # parents may change and the end result looks like "the merge loses a
1655 # parent", which is a surprise. This is a limit because "--dest" only
1655 # parent", which is a surprise. This is a limit because "--dest" only
1656 # accepts one dest per src.
1656 # accepts one dest per src.
1657 #
1657 #
1658 # Therefore, only move p with reasonable conditions (in this order):
1658 # Therefore, only move p with reasonable conditions (in this order):
1659 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1659 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1660 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1660 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1661 #
1661 #
1662 # Comparing with adjustdest, the logic here does some additional work:
1662 # Comparing with adjustdest, the logic here does some additional work:
1663 # 1. decide which parents will not be moved towards dest
1663 # 1. decide which parents will not be moved towards dest
1664 # 2. if the above decision is "no", should a parent still be moved
1664 # 2. if the above decision is "no", should a parent still be moved
1665 # because it was rebased?
1665 # because it was rebased?
1666 #
1666 #
1667 # For example:
1667 # For example:
1668 #
1668 #
1669 # C # "rebase -r C -d D" is an error since none of the parents
1669 # C # "rebase -r C -d D" is an error since none of the parents
1670 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1670 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1671 # A B D # B (using rule "2."), since B will be rebased.
1671 # A B D # B (using rule "2."), since B will be rebased.
1672 #
1672 #
1673 # The loop tries to be not rely on the fact that a Mercurial node has
1673 # The loop tries to be not rely on the fact that a Mercurial node has
1674 # at most 2 parents.
1674 # at most 2 parents.
1675 for i, p in enumerate(oldps):
1675 for i, p in enumerate(oldps):
1676 np = p # new parent
1676 np = p # new parent
1677 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1677 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1678 np = dests[i]
1678 np = dests[i]
1679 elif p in state and state[p] > 0:
1679 elif p in state and state[p] > 0:
1680 np = state[p]
1680 np = state[p]
1681
1681
1682 # "bases" only record "special" merge bases that cannot be
1682 # "bases" only record "special" merge bases that cannot be
1683 # calculated from changelog DAG (i.e. isancestor(p, np) is False).
1683 # calculated from changelog DAG (i.e. isancestor(p, np) is False).
1684 # For example:
1684 # For example:
1685 #
1685 #
1686 # B' # rebase -s B -d D, when B was rebased to B'. dest for C
1686 # B' # rebase -s B -d D, when B was rebased to B'. dest for C
1687 # | C # is B', but merge base for C is B, instead of
1687 # | C # is B', but merge base for C is B, instead of
1688 # D | # changelog.ancestor(C, B') == A. If changelog DAG and
1688 # D | # changelog.ancestor(C, B') == A. If changelog DAG and
1689 # | B # "state" edges are merged (so there will be an edge from
1689 # | B # "state" edges are merged (so there will be an edge from
1690 # |/ # B to B'), the merge base is still ancestor(C, B') in
1690 # |/ # B to B'), the merge base is still ancestor(C, B') in
1691 # A # the merged graph.
1691 # A # the merged graph.
1692 #
1692 #
1693 # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8
1693 # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8
1694 # which uses "virtual null merge" to explain this situation.
1694 # which uses "virtual null merge" to explain this situation.
1695 if isancestor(p, np):
1695 if isancestor(p, np):
1696 bases[i] = nullrev
1696 bases[i] = nullrev
1697
1697
1698 # If one parent becomes an ancestor of the other, drop the ancestor
1698 # If one parent becomes an ancestor of the other, drop the ancestor
1699 for j, x in enumerate(newps[:i]):
1699 for j, x in enumerate(newps[:i]):
1700 if x == nullrev:
1700 if x == nullrev:
1701 continue
1701 continue
1702 if isancestor(np, x): # CASE-1
1702 if isancestor(np, x): # CASE-1
1703 np = nullrev
1703 np = nullrev
1704 elif isancestor(x, np): # CASE-2
1704 elif isancestor(x, np): # CASE-2
1705 newps[j] = np
1705 newps[j] = np
1706 np = nullrev
1706 np = nullrev
1707 # New parents forming an ancestor relationship does not
1707 # New parents forming an ancestor relationship does not
1708 # mean the old parents have a similar relationship. Do not
1708 # mean the old parents have a similar relationship. Do not
1709 # set bases[x] to nullrev.
1709 # set bases[x] to nullrev.
1710 bases[j], bases[i] = bases[i], bases[j]
1710 bases[j], bases[i] = bases[i], bases[j]
1711
1711
1712 newps[i] = np
1712 newps[i] = np
1713
1713
1714 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1714 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1715 # base. If only p2 changes, merging using unchanged p1 as merge base is
1715 # base. If only p2 changes, merging using unchanged p1 as merge base is
1716 # suboptimal. Therefore swap parents to make the merge sane.
1716 # suboptimal. Therefore swap parents to make the merge sane.
1717 if newps[1] != nullrev and oldps[0] == newps[0]:
1717 if newps[1] != nullrev and oldps[0] == newps[0]:
1718 assert len(newps) == 2 and len(oldps) == 2
1718 assert len(newps) == 2 and len(oldps) == 2
1719 newps.reverse()
1719 newps.reverse()
1720 bases.reverse()
1720 bases.reverse()
1721
1721
1722 # No parent change might be an error because we fail to make rev a
1722 # No parent change might be an error because we fail to make rev a
1723 # descendent of requested dest. This can happen, for example:
1723 # descendent of requested dest. This can happen, for example:
1724 #
1724 #
1725 # C # rebase -r C -d D
1725 # C # rebase -r C -d D
1726 # /| # None of A and B will be changed to D and rebase fails.
1726 # /| # None of A and B will be changed to D and rebase fails.
1727 # A B D
1727 # A B D
1728 if set(newps) == set(oldps) and dest not in newps:
1728 if set(newps) == set(oldps) and dest not in newps:
1729 raise error.Abort(
1729 raise error.Abort(
1730 _(
1730 _(
1731 b'cannot rebase %d:%s without '
1731 b'cannot rebase %d:%s without '
1732 b'moving at least one of its parents'
1732 b'moving at least one of its parents'
1733 )
1733 )
1734 % (rev, repo[rev])
1734 % (rev, repo[rev])
1735 )
1735 )
1736
1736
1737 # Source should not be ancestor of dest. The check here guarantees it's
1737 # Source should not be ancestor of dest. The check here guarantees it's
1738 # impossible. With multi-dest, the initial check does not cover complex
1738 # impossible. With multi-dest, the initial check does not cover complex
1739 # cases since we don't have abstractions to dry-run rebase cheaply.
1739 # cases since we don't have abstractions to dry-run rebase cheaply.
1740 if any(p != nullrev and isancestor(rev, p) for p in newps):
1740 if any(p != nullrev and isancestor(rev, p) for p in newps):
1741 raise error.Abort(_(b'source is ancestor of destination'))
1741 raise error.Abort(_(b'source is ancestor of destination'))
1742
1742
1743 # "rebasenode" updates to new p1, use the corresponding merge base.
1743 # "rebasenode" updates to new p1, use the corresponding merge base.
1744 if bases[0] != nullrev:
1744 if bases[0] != nullrev:
1745 base = bases[0]
1745 base = bases[0]
1746 else:
1746 else:
1747 base = None
1747 base = None
1748
1748
1749 # Check if the merge will contain unwanted changes. That may happen if
1749 # Check if the merge will contain unwanted changes. That may happen if
1750 # there are multiple special (non-changelog ancestor) merge bases, which
1750 # there are multiple special (non-changelog ancestor) merge bases, which
1751 # cannot be handled well by the 3-way merge algorithm. For example:
1751 # cannot be handled well by the 3-way merge algorithm. For example:
1752 #
1752 #
1753 # F
1753 # F
1754 # /|
1754 # /|
1755 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1755 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1756 # | | # as merge base, the difference between D and F will include
1756 # | | # as merge base, the difference between D and F will include
1757 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1757 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1758 # |/ # chosen, the rebased F will contain B.
1758 # |/ # chosen, the rebased F will contain B.
1759 # A Z
1759 # A Z
1760 #
1760 #
1761 # But our merge base candidates (D and E in above case) could still be
1761 # But our merge base candidates (D and E in above case) could still be
1762 # better than the default (ancestor(F, Z) == null). Therefore still
1762 # better than the default (ancestor(F, Z) == null). Therefore still
1763 # pick one (so choose p1 above).
1763 # pick one (so choose p1 above).
1764 if sum(1 for b in set(bases) if b != nullrev) > 1:
1764 if sum(1 for b in set(bases) if b != nullrev) > 1:
1765 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1765 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1766 for i, base in enumerate(bases):
1766 for i, base in enumerate(bases):
1767 if base == nullrev:
1767 if base == nullrev:
1768 continue
1768 continue
1769 # Revisions in the side (not chosen as merge base) branch that
1769 # Revisions in the side (not chosen as merge base) branch that
1770 # might contain "surprising" contents
1770 # might contain "surprising" contents
1771 siderevs = list(
1771 siderevs = list(
1772 repo.revs(b'((%ld-%d) %% (%d+%d))', bases, base, base, dest)
1772 repo.revs(b'((%ld-%d) %% (%d+%d))', bases, base, base, dest)
1773 )
1773 )
1774
1774
1775 # If those revisions are covered by rebaseset, the result is good.
1775 # If those revisions are covered by rebaseset, the result is good.
1776 # A merge in rebaseset would be considered to cover its ancestors.
1776 # A merge in rebaseset would be considered to cover its ancestors.
1777 if siderevs:
1777 if siderevs:
1778 rebaseset = [
1778 rebaseset = [
1779 r for r, d in state.items() if d > 0 and r not in obsskipped
1779 r for r, d in state.items() if d > 0 and r not in obsskipped
1780 ]
1780 ]
1781 merges = [
1781 merges = [
1782 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1782 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1783 ]
1783 ]
1784 unwanted[i] = list(
1784 unwanted[i] = list(
1785 repo.revs(
1785 repo.revs(
1786 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1786 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1787 )
1787 )
1788 )
1788 )
1789
1789
1790 # Choose a merge base that has a minimal number of unwanted revs.
1790 # Choose a merge base that has a minimal number of unwanted revs.
1791 l, i = min(
1791 l, i = min(
1792 (len(revs), i)
1792 (len(revs), i)
1793 for i, revs in enumerate(unwanted)
1793 for i, revs in enumerate(unwanted)
1794 if revs is not None
1794 if revs is not None
1795 )
1795 )
1796 base = bases[i]
1796 base = bases[i]
1797
1797
1798 # newps[0] should match merge base if possible. Currently, if newps[i]
1798 # newps[0] should match merge base if possible. Currently, if newps[i]
1799 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1799 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1800 # the other's ancestor. In that case, it's fine to not swap newps here.
1800 # the other's ancestor. In that case, it's fine to not swap newps here.
1801 # (see CASE-1 and CASE-2 above)
1801 # (see CASE-1 and CASE-2 above)
1802 if i != 0 and newps[i] != nullrev:
1802 if i != 0 and newps[i] != nullrev:
1803 newps[0], newps[i] = newps[i], newps[0]
1803 newps[0], newps[i] = newps[i], newps[0]
1804
1804
1805 # The merge will include unwanted revisions. Abort now. Revisit this if
1805 # The merge will include unwanted revisions. Abort now. Revisit this if
1806 # we have a more advanced merge algorithm that handles multiple bases.
1806 # we have a more advanced merge algorithm that handles multiple bases.
1807 if l > 0:
1807 if l > 0:
1808 unwanteddesc = _(b' or ').join(
1808 unwanteddesc = _(b' or ').join(
1809 (
1809 (
1810 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1810 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1811 for revs in unwanted
1811 for revs in unwanted
1812 if revs is not None
1812 if revs is not None
1813 )
1813 )
1814 )
1814 )
1815 raise error.Abort(
1815 raise error.Abort(
1816 _(b'rebasing %d:%s will include unwanted changes from %s')
1816 _(b'rebasing %d:%s will include unwanted changes from %s')
1817 % (rev, repo[rev], unwanteddesc)
1817 % (rev, repo[rev], unwanteddesc)
1818 )
1818 )
1819
1819
1820 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1820 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1821
1821
1822 return newps[0], newps[1], base
1822 return newps[0], newps[1], base
1823
1823
1824
1824
1825 def isagitpatch(repo, patchname):
1825 def isagitpatch(repo, patchname):
1826 """Return true if the given patch is in git format"""
1826 """Return true if the given patch is in git format"""
1827 mqpatch = os.path.join(repo.mq.path, patchname)
1827 mqpatch = os.path.join(repo.mq.path, patchname)
1828 for line in patch.linereader(open(mqpatch, b'rb')):
1828 for line in patch.linereader(open(mqpatch, b'rb')):
1829 if line.startswith(b'diff --git'):
1829 if line.startswith(b'diff --git'):
1830 return True
1830 return True
1831 return False
1831 return False
1832
1832
1833
1833
1834 def updatemq(repo, state, skipped, **opts):
1834 def updatemq(repo, state, skipped, **opts):
1835 """Update rebased mq patches - finalize and then import them"""
1835 """Update rebased mq patches - finalize and then import them"""
1836 mqrebase = {}
1836 mqrebase = {}
1837 mq = repo.mq
1837 mq = repo.mq
1838 original_series = mq.fullseries[:]
1838 original_series = mq.fullseries[:]
1839 skippedpatches = set()
1839 skippedpatches = set()
1840
1840
1841 for p in mq.applied:
1841 for p in mq.applied:
1842 rev = repo[p.node].rev()
1842 rev = repo[p.node].rev()
1843 if rev in state:
1843 if rev in state:
1844 repo.ui.debug(
1844 repo.ui.debug(
1845 b'revision %d is an mq patch (%s), finalize it.\n'
1845 b'revision %d is an mq patch (%s), finalize it.\n'
1846 % (rev, p.name)
1846 % (rev, p.name)
1847 )
1847 )
1848 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1848 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1849 else:
1849 else:
1850 # Applied but not rebased, not sure this should happen
1850 # Applied but not rebased, not sure this should happen
1851 skippedpatches.add(p.name)
1851 skippedpatches.add(p.name)
1852
1852
1853 if mqrebase:
1853 if mqrebase:
1854 mq.finish(repo, mqrebase.keys())
1854 mq.finish(repo, mqrebase.keys())
1855
1855
1856 # We must start import from the newest revision
1856 # We must start import from the newest revision
1857 for rev in sorted(mqrebase, reverse=True):
1857 for rev in sorted(mqrebase, reverse=True):
1858 if rev not in skipped:
1858 if rev not in skipped:
1859 name, isgit = mqrebase[rev]
1859 name, isgit = mqrebase[rev]
1860 repo.ui.note(
1860 repo.ui.note(
1861 _(b'updating mq patch %s to %d:%s\n')
1861 _(b'updating mq patch %s to %d:%s\n')
1862 % (name, state[rev], repo[state[rev]])
1862 % (name, state[rev], repo[state[rev]])
1863 )
1863 )
1864 mq.qimport(
1864 mq.qimport(
1865 repo,
1865 repo,
1866 (),
1866 (),
1867 patchname=name,
1867 patchname=name,
1868 git=isgit,
1868 git=isgit,
1869 rev=[b"%d" % state[rev]],
1869 rev=[b"%d" % state[rev]],
1870 )
1870 )
1871 else:
1871 else:
1872 # Rebased and skipped
1872 # Rebased and skipped
1873 skippedpatches.add(mqrebase[rev][0])
1873 skippedpatches.add(mqrebase[rev][0])
1874
1874
1875 # Patches were either applied and rebased and imported in
1875 # Patches were either applied and rebased and imported in
1876 # order, applied and removed or unapplied. Discard the removed
1876 # order, applied and removed or unapplied. Discard the removed
1877 # ones while preserving the original series order and guards.
1877 # ones while preserving the original series order and guards.
1878 newseries = [
1878 newseries = [
1879 s
1879 s
1880 for s in original_series
1880 for s in original_series
1881 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1881 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1882 ]
1882 ]
1883 mq.fullseries[:] = newseries
1883 mq.fullseries[:] = newseries
1884 mq.seriesdirty = True
1884 mq.seriesdirty = True
1885 mq.savedirty()
1885 mq.savedirty()
1886
1886
1887
1887
1888 def storecollapsemsg(repo, collapsemsg):
1888 def storecollapsemsg(repo, collapsemsg):
1889 """Store the collapse message to allow recovery"""
1889 """Store the collapse message to allow recovery"""
1890 collapsemsg = collapsemsg or b''
1890 collapsemsg = collapsemsg or b''
1891 f = repo.vfs(b"last-message.txt", b"w")
1891 f = repo.vfs(b"last-message.txt", b"w")
1892 f.write(b"%s\n" % collapsemsg)
1892 f.write(b"%s\n" % collapsemsg)
1893 f.close()
1893 f.close()
1894
1894
1895
1895
1896 def clearcollapsemsg(repo):
1896 def clearcollapsemsg(repo):
1897 """Remove collapse message file"""
1897 """Remove collapse message file"""
1898 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1898 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1899
1899
1900
1900
1901 def restorecollapsemsg(repo, isabort):
1901 def restorecollapsemsg(repo, isabort):
1902 """Restore previously stored collapse message"""
1902 """Restore previously stored collapse message"""
1903 try:
1903 try:
1904 f = repo.vfs(b"last-message.txt")
1904 f = repo.vfs(b"last-message.txt")
1905 collapsemsg = f.readline().strip()
1905 collapsemsg = f.readline().strip()
1906 f.close()
1906 f.close()
1907 except IOError as err:
1907 except IOError as err:
1908 if err.errno != errno.ENOENT:
1908 if err.errno != errno.ENOENT:
1909 raise
1909 raise
1910 if isabort:
1910 if isabort:
1911 # Oh well, just abort like normal
1911 # Oh well, just abort like normal
1912 collapsemsg = b''
1912 collapsemsg = b''
1913 else:
1913 else:
1914 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1914 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1915 return collapsemsg
1915 return collapsemsg
1916
1916
1917
1917
1918 def clearstatus(repo):
1918 def clearstatus(repo):
1919 """Remove the status files"""
1919 """Remove the status files"""
1920 # Make sure the active transaction won't write the state file
1920 # Make sure the active transaction won't write the state file
1921 tr = repo.currenttransaction()
1921 tr = repo.currenttransaction()
1922 if tr:
1922 if tr:
1923 tr.removefilegenerator(b'rebasestate')
1923 tr.removefilegenerator(b'rebasestate')
1924 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1924 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1925
1925
1926
1926
1927 def needupdate(repo, state):
1927 def needupdate(repo, state):
1928 '''check whether we should `update --clean` away from a merge, or if
1928 '''check whether we should `update --clean` away from a merge, or if
1929 somehow the working dir got forcibly updated, e.g. by older hg'''
1929 somehow the working dir got forcibly updated, e.g. by older hg'''
1930 parents = [p.rev() for p in repo[None].parents()]
1930 parents = [p.rev() for p in repo[None].parents()]
1931
1931
1932 # Are we in a merge state at all?
1932 # Are we in a merge state at all?
1933 if len(parents) < 2:
1933 if len(parents) < 2:
1934 return False
1934 return False
1935
1935
1936 # We should be standing on the first as-of-yet unrebased commit.
1936 # We should be standing on the first as-of-yet unrebased commit.
1937 firstunrebased = min(
1937 firstunrebased = min(
1938 [old for old, new in pycompat.iteritems(state) if new == nullrev]
1938 [old for old, new in pycompat.iteritems(state) if new == nullrev]
1939 )
1939 )
1940 if firstunrebased in parents:
1940 if firstunrebased in parents:
1941 return True
1941 return True
1942
1942
1943 return False
1943 return False
1944
1944
1945
1945
1946 def sortsource(destmap):
1946 def sortsource(destmap):
1947 """yield source revisions in an order that we only rebase things once
1947 """yield source revisions in an order that we only rebase things once
1948
1948
1949 If source and destination overlaps, we should filter out revisions
1949 If source and destination overlaps, we should filter out revisions
1950 depending on other revisions which hasn't been rebased yet.
1950 depending on other revisions which hasn't been rebased yet.
1951
1951
1952 Yield a sorted list of revisions each time.
1952 Yield a sorted list of revisions each time.
1953
1953
1954 For example, when rebasing A to B, B to C. This function yields [B], then
1954 For example, when rebasing A to B, B to C. This function yields [B], then
1955 [A], indicating B needs to be rebased first.
1955 [A], indicating B needs to be rebased first.
1956
1956
1957 Raise if there is a cycle so the rebase is impossible.
1957 Raise if there is a cycle so the rebase is impossible.
1958 """
1958 """
1959 srcset = set(destmap)
1959 srcset = set(destmap)
1960 while srcset:
1960 while srcset:
1961 srclist = sorted(srcset)
1961 srclist = sorted(srcset)
1962 result = []
1962 result = []
1963 for r in srclist:
1963 for r in srclist:
1964 if destmap[r] not in srcset:
1964 if destmap[r] not in srcset:
1965 result.append(r)
1965 result.append(r)
1966 if not result:
1966 if not result:
1967 raise error.Abort(_(b'source and destination form a cycle'))
1967 raise error.Abort(_(b'source and destination form a cycle'))
1968 srcset -= set(result)
1968 srcset -= set(result)
1969 yield result
1969 yield result
1970
1970
1971
1971
1972 def buildstate(repo, destmap, collapse):
1972 def buildstate(repo, destmap, collapse):
1973 '''Define which revisions are going to be rebased and where
1973 '''Define which revisions are going to be rebased and where
1974
1974
1975 repo: repo
1975 repo: repo
1976 destmap: {srcrev: destrev}
1976 destmap: {srcrev: destrev}
1977 '''
1977 '''
1978 rebaseset = destmap.keys()
1978 rebaseset = destmap.keys()
1979 originalwd = repo[b'.'].rev()
1979 originalwd = repo[b'.'].rev()
1980
1980
1981 # This check isn't strictly necessary, since mq detects commits over an
1981 # This check isn't strictly necessary, since mq detects commits over an
1982 # applied patch. But it prevents messing up the working directory when
1982 # applied patch. But it prevents messing up the working directory when
1983 # a partially completed rebase is blocked by mq.
1983 # a partially completed rebase is blocked by mq.
1984 if b'qtip' in repo.tags():
1984 if b'qtip' in repo.tags():
1985 mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
1985 mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
1986 if set(destmap.values()) & mqapplied:
1986 if set(destmap.values()) & mqapplied:
1987 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1987 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1988
1988
1989 # Get "cycle" error early by exhausting the generator.
1989 # Get "cycle" error early by exhausting the generator.
1990 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1990 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1991 if not sortedsrc:
1991 if not sortedsrc:
1992 raise error.Abort(_(b'no matching revisions'))
1992 raise error.Abort(_(b'no matching revisions'))
1993
1993
1994 # Only check the first batch of revisions to rebase not depending on other
1994 # Only check the first batch of revisions to rebase not depending on other
1995 # rebaseset. This means "source is ancestor of destination" for the second
1995 # rebaseset. This means "source is ancestor of destination" for the second
1996 # (and following) batches of revisions are not checked here. We rely on
1996 # (and following) batches of revisions are not checked here. We rely on
1997 # "defineparents" to do that check.
1997 # "defineparents" to do that check.
1998 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1998 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1999 if not roots:
1999 if not roots:
2000 raise error.Abort(_(b'no matching revisions'))
2000 raise error.Abort(_(b'no matching revisions'))
2001
2001
2002 def revof(r):
2002 def revof(r):
2003 return r.rev()
2003 return r.rev()
2004
2004
2005 roots = sorted(roots, key=revof)
2005 roots = sorted(roots, key=revof)
2006 state = dict.fromkeys(rebaseset, revtodo)
2006 state = dict.fromkeys(rebaseset, revtodo)
2007 emptyrebase = len(sortedsrc) == 1
2007 emptyrebase = len(sortedsrc) == 1
2008 for root in roots:
2008 for root in roots:
2009 dest = repo[destmap[root.rev()]]
2009 dest = repo[destmap[root.rev()]]
2010 commonbase = root.ancestor(dest)
2010 commonbase = root.ancestor(dest)
2011 if commonbase == root:
2011 if commonbase == root:
2012 raise error.Abort(_(b'source is ancestor of destination'))
2012 raise error.Abort(_(b'source is ancestor of destination'))
2013 if commonbase == dest:
2013 if commonbase == dest:
2014 wctx = repo[None]
2014 wctx = repo[None]
2015 if dest == wctx.p1():
2015 if dest == wctx.p1():
2016 # when rebasing to '.', it will use the current wd branch name
2016 # when rebasing to '.', it will use the current wd branch name
2017 samebranch = root.branch() == wctx.branch()
2017 samebranch = root.branch() == wctx.branch()
2018 else:
2018 else:
2019 samebranch = root.branch() == dest.branch()
2019 samebranch = root.branch() == dest.branch()
2020 if not collapse and samebranch and dest in root.parents():
2020 if not collapse and samebranch and dest in root.parents():
2021 # mark the revision as done by setting its new revision
2021 # mark the revision as done by setting its new revision
2022 # equal to its old (current) revisions
2022 # equal to its old (current) revisions
2023 state[root.rev()] = root.rev()
2023 state[root.rev()] = root.rev()
2024 repo.ui.debug(b'source is a child of destination\n')
2024 repo.ui.debug(b'source is a child of destination\n')
2025 continue
2025 continue
2026
2026
2027 emptyrebase = False
2027 emptyrebase = False
2028 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
2028 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
2029 if emptyrebase:
2029 if emptyrebase:
2030 return None
2030 return None
2031 for rev in sorted(state):
2031 for rev in sorted(state):
2032 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
2032 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
2033 # if all parents of this revision are done, then so is this revision
2033 # if all parents of this revision are done, then so is this revision
2034 if parents and all((state.get(p) == p for p in parents)):
2034 if parents and all((state.get(p) == p for p in parents)):
2035 state[rev] = rev
2035 state[rev] = rev
2036 return originalwd, destmap, state
2036 return originalwd, destmap, state
2037
2037
2038
2038
2039 def clearrebased(
2039 def clearrebased(
2040 ui,
2040 ui,
2041 repo,
2041 repo,
2042 destmap,
2042 destmap,
2043 state,
2043 state,
2044 skipped,
2044 skipped,
2045 collapsedas=None,
2045 collapsedas=None,
2046 keepf=False,
2046 keepf=False,
2047 fm=None,
2047 fm=None,
2048 backup=True,
2048 backup=True,
2049 ):
2049 ):
2050 """dispose of rebased revision at the end of the rebase
2050 """dispose of rebased revision at the end of the rebase
2051
2051
2052 If `collapsedas` is not None, the rebase was a collapse whose result if the
2052 If `collapsedas` is not None, the rebase was a collapse whose result if the
2053 `collapsedas` node.
2053 `collapsedas` node.
2054
2054
2055 If `keepf` is not True, the rebase has --keep set and no nodes should be
2055 If `keepf` is not True, the rebase has --keep set and no nodes should be
2056 removed (but bookmarks still need to be moved).
2056 removed (but bookmarks still need to be moved).
2057
2057
2058 If `backup` is False, no backup will be stored when stripping rebased
2058 If `backup` is False, no backup will be stored when stripping rebased
2059 revisions.
2059 revisions.
2060 """
2060 """
2061 tonode = repo.changelog.node
2061 tonode = repo.changelog.node
2062 replacements = {}
2062 replacements = {}
2063 moves = {}
2063 moves = {}
2064 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2064 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2065
2065
2066 collapsednodes = []
2066 collapsednodes = []
2067 for rev, newrev in sorted(state.items()):
2067 for rev, newrev in sorted(state.items()):
2068 if newrev >= 0 and newrev != rev:
2068 if newrev >= 0 and newrev != rev:
2069 oldnode = tonode(rev)
2069 oldnode = tonode(rev)
2070 newnode = collapsedas or tonode(newrev)
2070 newnode = collapsedas or tonode(newrev)
2071 moves[oldnode] = newnode
2071 moves[oldnode] = newnode
2072 succs = None
2072 succs = None
2073 if rev in skipped:
2073 if rev in skipped:
2074 if stripcleanup or not repo[rev].obsolete():
2074 if stripcleanup or not repo[rev].obsolete():
2075 succs = ()
2075 succs = ()
2076 elif collapsedas:
2076 elif collapsedas:
2077 collapsednodes.append(oldnode)
2077 collapsednodes.append(oldnode)
2078 else:
2078 else:
2079 succs = (newnode,)
2079 succs = (newnode,)
2080 if succs is not None:
2080 if succs is not None:
2081 replacements[(oldnode,)] = succs
2081 replacements[(oldnode,)] = succs
2082 if collapsednodes:
2082 if collapsednodes:
2083 replacements[tuple(collapsednodes)] = (collapsedas,)
2083 replacements[tuple(collapsednodes)] = (collapsedas,)
2084 if fm:
2084 if fm:
2085 hf = fm.hexfunc
2085 hf = fm.hexfunc
2086 fl = fm.formatlist
2086 fl = fm.formatlist
2087 fd = fm.formatdict
2087 fd = fm.formatdict
2088 changes = {}
2088 changes = {}
2089 for oldns, newn in pycompat.iteritems(replacements):
2089 for oldns, newn in pycompat.iteritems(replacements):
2090 for oldn in oldns:
2090 for oldn in oldns:
2091 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2091 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2092 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2092 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2093 fm.data(nodechanges=nodechanges)
2093 fm.data(nodechanges=nodechanges)
2094 if keepf:
2094 if keepf:
2095 replacements = {}
2095 replacements = {}
2096 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2096 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2097
2097
2098
2098
2099 def pullrebase(orig, ui, repo, *args, **opts):
2099 def pullrebase(orig, ui, repo, *args, **opts):
2100 """Call rebase after pull if the latter has been invoked with --rebase"""
2100 """Call rebase after pull if the latter has been invoked with --rebase"""
2101 if opts.get('rebase'):
2101 if opts.get('rebase'):
2102 if ui.configbool(b'commands', b'rebase.requiredest'):
2102 if ui.configbool(b'commands', b'rebase.requiredest'):
2103 msg = _(b'rebase destination required by configuration')
2103 msg = _(b'rebase destination required by configuration')
2104 hint = _(b'use hg pull followed by hg rebase -d DEST')
2104 hint = _(b'use hg pull followed by hg rebase -d DEST')
2105 raise error.Abort(msg, hint=hint)
2105 raise error.Abort(msg, hint=hint)
2106
2106
2107 with repo.wlock(), repo.lock():
2107 with repo.wlock(), repo.lock():
2108 if opts.get('update'):
2108 if opts.get('update'):
2109 del opts['update']
2109 del opts['update']
2110 ui.debug(
2110 ui.debug(
2111 b'--update and --rebase are not compatible, ignoring '
2111 b'--update and --rebase are not compatible, ignoring '
2112 b'the update flag\n'
2112 b'the update flag\n'
2113 )
2113 )
2114
2114
2115 cmdutil.checkunfinished(repo, skipmerge=True)
2115 cmdutil.checkunfinished(repo, skipmerge=True)
2116 cmdutil.bailifchanged(
2116 cmdutil.bailifchanged(
2117 repo,
2117 repo,
2118 hint=_(
2118 hint=_(
2119 b'cannot pull with rebase: '
2119 b'cannot pull with rebase: '
2120 b'please commit or shelve your changes first'
2120 b'please commit or shelve your changes first'
2121 ),
2121 ),
2122 )
2122 )
2123
2123
2124 revsprepull = len(repo)
2124 revsprepull = len(repo)
2125 origpostincoming = commands.postincoming
2125 origpostincoming = commands.postincoming
2126
2126
2127 def _dummy(*args, **kwargs):
2127 def _dummy(*args, **kwargs):
2128 pass
2128 pass
2129
2129
2130 commands.postincoming = _dummy
2130 commands.postincoming = _dummy
2131 try:
2131 try:
2132 ret = orig(ui, repo, *args, **opts)
2132 ret = orig(ui, repo, *args, **opts)
2133 finally:
2133 finally:
2134 commands.postincoming = origpostincoming
2134 commands.postincoming = origpostincoming
2135 revspostpull = len(repo)
2135 revspostpull = len(repo)
2136 if revspostpull > revsprepull:
2136 if revspostpull > revsprepull:
2137 # --rev option from pull conflict with rebase own --rev
2137 # --rev option from pull conflict with rebase own --rev
2138 # dropping it
2138 # dropping it
2139 if 'rev' in opts:
2139 if 'rev' in opts:
2140 del opts['rev']
2140 del opts['rev']
2141 # positional argument from pull conflicts with rebase's own
2141 # positional argument from pull conflicts with rebase's own
2142 # --source.
2142 # --source.
2143 if 'source' in opts:
2143 if 'source' in opts:
2144 del opts['source']
2144 del opts['source']
2145 # revsprepull is the len of the repo, not revnum of tip.
2145 # revsprepull is the len of the repo, not revnum of tip.
2146 destspace = list(repo.changelog.revs(start=revsprepull))
2146 destspace = list(repo.changelog.revs(start=revsprepull))
2147 opts['_destspace'] = destspace
2147 opts['_destspace'] = destspace
2148 try:
2148 try:
2149 rebase(ui, repo, **opts)
2149 rebase(ui, repo, **opts)
2150 except error.NoMergeDestAbort:
2150 except error.NoMergeDestAbort:
2151 # we can maybe update instead
2151 # we can maybe update instead
2152 rev, _a, _b = destutil.destupdate(repo)
2152 rev, _a, _b = destutil.destupdate(repo)
2153 if rev == repo[b'.'].rev():
2153 if rev == repo[b'.'].rev():
2154 ui.status(_(b'nothing to rebase\n'))
2154 ui.status(_(b'nothing to rebase\n'))
2155 else:
2155 else:
2156 ui.status(_(b'nothing to rebase - updating instead\n'))
2156 ui.status(_(b'nothing to rebase - updating instead\n'))
2157 # not passing argument to get the bare update behavior
2157 # not passing argument to get the bare update behavior
2158 # with warning and trumpets
2158 # with warning and trumpets
2159 commands.update(ui, repo)
2159 commands.update(ui, repo)
2160 else:
2160 else:
2161 if opts.get('tool'):
2161 if opts.get('tool'):
2162 raise error.Abort(_(b'--tool can only be used with --rebase'))
2162 raise error.Abort(_(b'--tool can only be used with --rebase'))
2163 ret = orig(ui, repo, *args, **opts)
2163 ret = orig(ui, repo, *args, **opts)
2164
2164
2165 return ret
2165 return ret
2166
2166
2167
2167
2168 def _filterobsoleterevs(repo, revs):
2168 def _filterobsoleterevs(repo, revs):
2169 """returns a set of the obsolete revisions in revs"""
2169 """returns a set of the obsolete revisions in revs"""
2170 return set(r for r in revs if repo[r].obsolete())
2170 return set(r for r in revs if repo[r].obsolete())
2171
2171
2172
2172
2173 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2173 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2174 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2174 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2175
2175
2176 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2176 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2177 obsolete nodes to be rebased given in `rebaseobsrevs`.
2177 obsolete nodes to be rebased given in `rebaseobsrevs`.
2178
2178
2179 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2179 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2180 without a successor in destination.
2180 without a successor in destination.
2181
2181
2182 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2182 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2183 obsolete successors.
2183 obsolete successors.
2184 """
2184 """
2185 obsoletenotrebased = {}
2185 obsoletenotrebased = {}
2186 obsoletewithoutsuccessorindestination = set()
2186 obsoletewithoutsuccessorindestination = set()
2187 obsoleteextinctsuccessors = set()
2187 obsoleteextinctsuccessors = set()
2188
2188
2189 assert repo.filtername is None
2189 assert repo.filtername is None
2190 cl = repo.changelog
2190 cl = repo.changelog
2191 get_rev = cl.index.get_rev
2191 get_rev = cl.index.get_rev
2192 extinctrevs = set(repo.revs(b'extinct()'))
2192 extinctrevs = set(repo.revs(b'extinct()'))
2193 for srcrev in rebaseobsrevs:
2193 for srcrev in rebaseobsrevs:
2194 srcnode = cl.node(srcrev)
2194 srcnode = cl.node(srcrev)
2195 # XXX: more advanced APIs are required to handle split correctly
2195 # XXX: more advanced APIs are required to handle split correctly
2196 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2196 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2197 # obsutil.allsuccessors includes node itself
2197 # obsutil.allsuccessors includes node itself
2198 successors.remove(srcnode)
2198 successors.remove(srcnode)
2199 succrevs = {get_rev(s) for s in successors}
2199 succrevs = {get_rev(s) for s in successors}
2200 succrevs.discard(None)
2200 succrevs.discard(None)
2201 if succrevs.issubset(extinctrevs):
2201 if succrevs.issubset(extinctrevs):
2202 # all successors are extinct
2202 # all successors are extinct
2203 obsoleteextinctsuccessors.add(srcrev)
2203 obsoleteextinctsuccessors.add(srcrev)
2204 if not successors:
2204 if not successors:
2205 # no successor
2205 # no successor
2206 obsoletenotrebased[srcrev] = None
2206 obsoletenotrebased[srcrev] = None
2207 else:
2207 else:
2208 dstrev = destmap[srcrev]
2208 dstrev = destmap[srcrev]
2209 for succrev in succrevs:
2209 for succrev in succrevs:
2210 if cl.isancestorrev(succrev, dstrev):
2210 if cl.isancestorrev(succrev, dstrev):
2211 obsoletenotrebased[srcrev] = succrev
2211 obsoletenotrebased[srcrev] = succrev
2212 break
2212 break
2213 else:
2213 else:
2214 # If 'srcrev' has a successor in rebase set but none in
2214 # If 'srcrev' has a successor in rebase set but none in
2215 # destination (which would be catched above), we shall skip it
2215 # destination (which would be catched above), we shall skip it
2216 # and its descendants to avoid divergence.
2216 # and its descendants to avoid divergence.
2217 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2217 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2218 obsoletewithoutsuccessorindestination.add(srcrev)
2218 obsoletewithoutsuccessorindestination.add(srcrev)
2219
2219
2220 return (
2220 return (
2221 obsoletenotrebased,
2221 obsoletenotrebased,
2222 obsoletewithoutsuccessorindestination,
2222 obsoletewithoutsuccessorindestination,
2223 obsoleteextinctsuccessors,
2223 obsoleteextinctsuccessors,
2224 )
2224 )
2225
2225
2226
2226
2227 def abortrebase(ui, repo):
2227 def abortrebase(ui, repo):
2228 with repo.wlock(), repo.lock():
2228 with repo.wlock(), repo.lock():
2229 rbsrt = rebaseruntime(repo, ui)
2229 rbsrt = rebaseruntime(repo, ui)
2230 rbsrt._prepareabortorcontinue(isabort=True)
2230 rbsrt._prepareabortorcontinue(isabort=True)
2231
2231
2232
2232
2233 def continuerebase(ui, repo):
2233 def continuerebase(ui, repo):
2234 with repo.wlock(), repo.lock():
2234 with repo.wlock(), repo.lock():
2235 rbsrt = rebaseruntime(repo, ui)
2235 rbsrt = rebaseruntime(repo, ui)
2236 ms = mergemod.mergestate.read(repo)
2236 ms = mergemod.mergestate.read(repo)
2237 mergeutil.checkunresolved(ms)
2237 mergeutil.checkunresolved(ms)
2238 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2238 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2239 if retcode is not None:
2239 if retcode is not None:
2240 return retcode
2240 return retcode
2241 rbsrt._performrebase(None)
2241 rbsrt._performrebase(None)
2242 rbsrt._finishrebase()
2242 rbsrt._finishrebase()
2243
2243
2244
2244
2245 def summaryhook(ui, repo):
2245 def summaryhook(ui, repo):
2246 if not repo.vfs.exists(b'rebasestate'):
2246 if not repo.vfs.exists(b'rebasestate'):
2247 return
2247 return
2248 try:
2248 try:
2249 rbsrt = rebaseruntime(repo, ui, {})
2249 rbsrt = rebaseruntime(repo, ui, {})
2250 rbsrt.restorestatus()
2250 rbsrt.restorestatus()
2251 state = rbsrt.state
2251 state = rbsrt.state
2252 except error.RepoLookupError:
2252 except error.RepoLookupError:
2253 # i18n: column positioning for "hg summary"
2253 # i18n: column positioning for "hg summary"
2254 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2254 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2255 ui.write(msg)
2255 ui.write(msg)
2256 return
2256 return
2257 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2257 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2258 # i18n: column positioning for "hg summary"
2258 # i18n: column positioning for "hg summary"
2259 ui.write(
2259 ui.write(
2260 _(b'rebase: %s, %s (rebase --continue)\n')
2260 _(b'rebase: %s, %s (rebase --continue)\n')
2261 % (
2261 % (
2262 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2262 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2263 ui.label(_(b'%d remaining'), b'rebase.remaining')
2263 ui.label(_(b'%d remaining'), b'rebase.remaining')
2264 % (len(state) - numrebased),
2264 % (len(state) - numrebased),
2265 )
2265 )
2266 )
2266 )
2267
2267
2268
2268
2269 def uisetup(ui):
2269 def uisetup(ui):
2270 # Replace pull with a decorator to provide --rebase option
2270 # Replace pull with a decorator to provide --rebase option
2271 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2271 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2272 entry[1].append(
2272 entry[1].append(
2273 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2273 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2274 )
2274 )
2275 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2275 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2276 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2276 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2277 statemod.addunfinished(
2277 statemod.addunfinished(
2278 b'rebase',
2278 b'rebase',
2279 fname=b'rebasestate',
2279 fname=b'rebasestate',
2280 stopflag=True,
2280 stopflag=True,
2281 continueflag=True,
2281 continueflag=True,
2282 abortfunc=abortrebase,
2282 abortfunc=abortrebase,
2283 continuefunc=continuerebase,
2283 continuefunc=continuerebase,
2284 )
2284 )
@@ -1,3027 +1,3030 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 copies,
31 copies,
32 dagop,
32 dagop,
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 obsolete as obsmod,
37 obsolete as obsmod,
38 patch,
38 patch,
39 pathutil,
39 pathutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repoview,
42 repoview,
43 scmutil,
43 scmutil,
44 sparse,
44 sparse,
45 subrepo,
45 subrepo,
46 subrepoutil,
46 subrepoutil,
47 util,
47 util,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return "<%s %s>" % (type(self).__name__, str(self))
74 return "<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(
106 def _buildstatus(
107 self, other, s, match, listignored, listclean, listunknown
107 self, other, s, match, listignored, listclean, listunknown
108 ):
108 ):
109 """build a status with respect to another context"""
109 """build a status with respect to another context"""
110 # Load earliest manifest first for caching reasons. More specifically,
110 # Load earliest manifest first for caching reasons. More specifically,
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta application.
115 # delta application.
116 mf2 = None
116 mf2 = None
117 if self.rev() is not None and self.rev() < other.rev():
117 if self.rev() is not None and self.rev() < other.rev():
118 mf2 = self._buildstatusmanifest(s)
118 mf2 = self._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
120 if mf2 is None:
120 if mf2 is None:
121 mf2 = self._buildstatusmanifest(s)
121 mf2 = self._buildstatusmanifest(s)
122
122
123 modified, added = [], []
123 modified, added = [], []
124 removed = []
124 removed = []
125 clean = []
125 clean = []
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deletedset = set(deleted)
127 deletedset = set(deleted)
128 d = mf1.diff(mf2, match=match, clean=listclean)
128 d = mf1.diff(mf2, match=match, clean=listclean)
129 for fn, value in pycompat.iteritems(d):
129 for fn, value in pycompat.iteritems(d):
130 if fn in deletedset:
130 if fn in deletedset:
131 continue
131 continue
132 if value is None:
132 if value is None:
133 clean.append(fn)
133 clean.append(fn)
134 continue
134 continue
135 (node1, flag1), (node2, flag2) = value
135 (node1, flag1), (node2, flag2) = value
136 if node1 is None:
136 if node1 is None:
137 added.append(fn)
137 added.append(fn)
138 elif node2 is None:
138 elif node2 is None:
139 removed.append(fn)
139 removed.append(fn)
140 elif flag1 != flag2:
140 elif flag1 != flag2:
141 modified.append(fn)
141 modified.append(fn)
142 elif node2 not in wdirfilenodeids:
142 elif node2 not in wdirfilenodeids:
143 # When comparing files between two commits, we save time by
143 # When comparing files between two commits, we save time by
144 # not comparing the file contents when the nodeids differ.
144 # not comparing the file contents when the nodeids differ.
145 # Note that this means we incorrectly report a reverted change
145 # Note that this means we incorrectly report a reverted change
146 # to a file as a modification.
146 # to a file as a modification.
147 modified.append(fn)
147 modified.append(fn)
148 elif self[fn].cmp(other[fn]):
148 elif self[fn].cmp(other[fn]):
149 modified.append(fn)
149 modified.append(fn)
150 else:
150 else:
151 clean.append(fn)
151 clean.append(fn)
152
152
153 if removed:
153 if removed:
154 # need to filter files if they are already reported as removed
154 # need to filter files if they are already reported as removed
155 unknown = [
155 unknown = [
156 fn
156 fn
157 for fn in unknown
157 for fn in unknown
158 if fn not in mf1 and (not match or match(fn))
158 if fn not in mf1 and (not match or match(fn))
159 ]
159 ]
160 ignored = [
160 ignored = [
161 fn
161 fn
162 for fn in ignored
162 for fn in ignored
163 if fn not in mf1 and (not match or match(fn))
163 if fn not in mf1 and (not match or match(fn))
164 ]
164 ]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(
168 return scmutil.status(
169 modified, added, removed, deleted, unknown, ignored, clean
169 modified, added, removed, deleted, unknown, ignored, clean
170 )
170 )
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepoutil.state(self, self._repo.ui)
174 return subrepoutil.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181
181
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184
184
185 def hex(self):
185 def hex(self):
186 return hex(self.node())
186 return hex(self.node())
187
187
188 def manifest(self):
188 def manifest(self):
189 return self._manifest
189 return self._manifest
190
190
191 def manifestctx(self):
191 def manifestctx(self):
192 return self._manifestctx
192 return self._manifestctx
193
193
194 def repo(self):
194 def repo(self):
195 return self._repo
195 return self._repo
196
196
197 def phasestr(self):
197 def phasestr(self):
198 return phases.phasenames[self.phase()]
198 return phases.phasenames[self.phase()]
199
199
200 def mutable(self):
200 def mutable(self):
201 return self.phase() > phases.public
201 return self.phase() > phases.public
202
202
203 def matchfileset(self, cwd, expr, badfn=None):
203 def matchfileset(self, cwd, expr, badfn=None):
204 return fileset.match(self, cwd, expr, badfn=badfn)
204 return fileset.match(self, cwd, expr, badfn=badfn)
205
205
206 def obsolete(self):
206 def obsolete(self):
207 """True if the changeset is obsolete"""
207 """True if the changeset is obsolete"""
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209
209
210 def extinct(self):
210 def extinct(self):
211 """True if the changeset is extinct"""
211 """True if the changeset is extinct"""
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213
213
214 def orphan(self):
214 def orphan(self):
215 """True if the changeset is not obsolete, but its ancestor is"""
215 """True if the changeset is not obsolete, but its ancestor is"""
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217
217
218 def phasedivergent(self):
218 def phasedivergent(self):
219 """True if the changeset tries to be a successor of a public changeset
219 """True if the changeset tries to be a successor of a public changeset
220
220
221 Only non-public and non-obsolete changesets may be phase-divergent.
221 Only non-public and non-obsolete changesets may be phase-divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224
224
225 def contentdivergent(self):
225 def contentdivergent(self):
226 """Is a successor of a changeset with multiple possible successor sets
226 """Is a successor of a changeset with multiple possible successor sets
227
227
228 Only non-public and non-obsolete changesets may be content-divergent.
228 Only non-public and non-obsolete changesets may be content-divergent.
229 """
229 """
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231
231
232 def isunstable(self):
232 def isunstable(self):
233 """True if the changeset is either orphan, phase-divergent or
233 """True if the changeset is either orphan, phase-divergent or
234 content-divergent"""
234 content-divergent"""
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236
236
237 def instabilities(self):
237 def instabilities(self):
238 """return the list of instabilities affecting this changeset.
238 """return the list of instabilities affecting this changeset.
239
239
240 Instabilities are returned as strings. possible values are:
240 Instabilities are returned as strings. possible values are:
241 - orphan,
241 - orphan,
242 - phase-divergent,
242 - phase-divergent,
243 - content-divergent.
243 - content-divergent.
244 """
244 """
245 instabilities = []
245 instabilities = []
246 if self.orphan():
246 if self.orphan():
247 instabilities.append(b'orphan')
247 instabilities.append(b'orphan')
248 if self.phasedivergent():
248 if self.phasedivergent():
249 instabilities.append(b'phase-divergent')
249 instabilities.append(b'phase-divergent')
250 if self.contentdivergent():
250 if self.contentdivergent():
251 instabilities.append(b'content-divergent')
251 instabilities.append(b'content-divergent')
252 return instabilities
252 return instabilities
253
253
254 def parents(self):
254 def parents(self):
255 """return contexts for each parent changeset"""
255 """return contexts for each parent changeset"""
256 return self._parents
256 return self._parents
257
257
258 def p1(self):
258 def p1(self):
259 return self._parents[0]
259 return self._parents[0]
260
260
261 def p2(self):
261 def p2(self):
262 parents = self._parents
262 parents = self._parents
263 if len(parents) == 2:
263 if len(parents) == 2:
264 return parents[1]
264 return parents[1]
265 return self._repo[nullrev]
265 return self._repo[nullrev]
266
266
267 def _fileinfo(self, path):
267 def _fileinfo(self, path):
268 if '_manifest' in self.__dict__:
268 if '_manifest' in self.__dict__:
269 try:
269 try:
270 return self._manifest[path], self._manifest.flags(path)
270 return self._manifest[path], self._manifest.flags(path)
271 except KeyError:
271 except KeyError:
272 raise error.ManifestLookupError(
272 raise error.ManifestLookupError(
273 self._node, path, _(b'not found in manifest')
273 self._node, path, _(b'not found in manifest')
274 )
274 )
275 if '_manifestdelta' in self.__dict__ or path in self.files():
275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if path in self._manifestdelta:
276 if path in self._manifestdelta:
277 return (
277 return (
278 self._manifestdelta[path],
278 self._manifestdelta[path],
279 self._manifestdelta.flags(path),
279 self._manifestdelta.flags(path),
280 )
280 )
281 mfl = self._repo.manifestlog
281 mfl = self._repo.manifestlog
282 try:
282 try:
283 node, flag = mfl[self._changeset.manifest].find(path)
283 node, flag = mfl[self._changeset.manifest].find(path)
284 except KeyError:
284 except KeyError:
285 raise error.ManifestLookupError(
285 raise error.ManifestLookupError(
286 self._node, path, _(b'not found in manifest')
286 self._node, path, _(b'not found in manifest')
287 )
287 )
288
288
289 return node, flag
289 return node, flag
290
290
291 def filenode(self, path):
291 def filenode(self, path):
292 return self._fileinfo(path)[0]
292 return self._fileinfo(path)[0]
293
293
294 def flags(self, path):
294 def flags(self, path):
295 try:
295 try:
296 return self._fileinfo(path)[1]
296 return self._fileinfo(path)[1]
297 except error.LookupError:
297 except error.LookupError:
298 return b''
298 return b''
299
299
300 @propertycache
300 @propertycache
301 def _copies(self):
301 def _copies(self):
302 return copies.computechangesetcopies(self)
302 return copies.computechangesetcopies(self)
303
303
304 def p1copies(self):
304 def p1copies(self):
305 return self._copies[0]
305 return self._copies[0]
306
306
307 def p2copies(self):
307 def p2copies(self):
308 return self._copies[1]
308 return self._copies[1]
309
309
310 def sub(self, path, allowcreate=True):
310 def sub(self, path, allowcreate=True):
311 '''return a subrepo for the stored revision of path, never wdir()'''
311 '''return a subrepo for the stored revision of path, never wdir()'''
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313
313
314 def nullsub(self, path, pctx):
314 def nullsub(self, path, pctx):
315 return subrepo.nullsubrepo(self, path, pctx)
315 return subrepo.nullsubrepo(self, path, pctx)
316
316
317 def workingsub(self, path):
317 def workingsub(self, path):
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 context.
319 context.
320 '''
320 '''
321 return subrepo.subrepo(self, path, allowwdir=True)
321 return subrepo.subrepo(self, path, allowwdir=True)
322
322
323 def match(
323 def match(
324 self,
324 self,
325 pats=None,
325 pats=None,
326 include=None,
326 include=None,
327 exclude=None,
327 exclude=None,
328 default=b'glob',
328 default=b'glob',
329 listsubrepos=False,
329 listsubrepos=False,
330 badfn=None,
330 badfn=None,
331 cwd=None,
331 cwd=None,
332 ):
332 ):
333 r = self._repo
333 r = self._repo
334 if not cwd:
334 if not cwd:
335 cwd = r.getcwd()
335 cwd = r.getcwd()
336 return matchmod.match(
336 return matchmod.match(
337 r.root,
337 r.root,
338 cwd,
338 cwd,
339 pats,
339 pats,
340 include,
340 include,
341 exclude,
341 exclude,
342 default,
342 default,
343 auditor=r.nofsauditor,
343 auditor=r.nofsauditor,
344 ctx=self,
344 ctx=self,
345 listsubrepos=listsubrepos,
345 listsubrepos=listsubrepos,
346 badfn=badfn,
346 badfn=badfn,
347 )
347 )
348
348
349 def diff(
349 def diff(
350 self,
350 self,
351 ctx2=None,
351 ctx2=None,
352 match=None,
352 match=None,
353 changes=None,
353 changes=None,
354 opts=None,
354 opts=None,
355 losedatafn=None,
355 losedatafn=None,
356 pathfn=None,
356 pathfn=None,
357 copy=None,
357 copy=None,
358 copysourcematch=None,
358 copysourcematch=None,
359 hunksfilterfn=None,
359 hunksfilterfn=None,
360 ):
360 ):
361 """Returns a diff generator for the given contexts and matcher"""
361 """Returns a diff generator for the given contexts and matcher"""
362 if ctx2 is None:
362 if ctx2 is None:
363 ctx2 = self.p1()
363 ctx2 = self.p1()
364 if ctx2 is not None:
364 if ctx2 is not None:
365 ctx2 = self._repo[ctx2]
365 ctx2 = self._repo[ctx2]
366 return patch.diff(
366 return patch.diff(
367 self._repo,
367 self._repo,
368 ctx2,
368 ctx2,
369 self,
369 self,
370 match=match,
370 match=match,
371 changes=changes,
371 changes=changes,
372 opts=opts,
372 opts=opts,
373 losedatafn=losedatafn,
373 losedatafn=losedatafn,
374 pathfn=pathfn,
374 pathfn=pathfn,
375 copy=copy,
375 copy=copy,
376 copysourcematch=copysourcematch,
376 copysourcematch=copysourcematch,
377 hunksfilterfn=hunksfilterfn,
377 hunksfilterfn=hunksfilterfn,
378 )
378 )
379
379
380 def dirs(self):
380 def dirs(self):
381 return self._manifest.dirs()
381 return self._manifest.dirs()
382
382
383 def hasdir(self, dir):
383 def hasdir(self, dir):
384 return self._manifest.hasdir(dir)
384 return self._manifest.hasdir(dir)
385
385
386 def status(
386 def status(
387 self,
387 self,
388 other=None,
388 other=None,
389 match=None,
389 match=None,
390 listignored=False,
390 listignored=False,
391 listclean=False,
391 listclean=False,
392 listunknown=False,
392 listunknown=False,
393 listsubrepos=False,
393 listsubrepos=False,
394 ):
394 ):
395 """return status of files between two nodes or node and working
395 """return status of files between two nodes or node and working
396 directory.
396 directory.
397
397
398 If other is None, compare this node with working directory.
398 If other is None, compare this node with working directory.
399
399
400 returns (modified, added, removed, deleted, unknown, ignored, clean)
400 returns (modified, added, removed, deleted, unknown, ignored, clean)
401 """
401 """
402
402
403 ctx1 = self
403 ctx1 = self
404 ctx2 = self._repo[other]
404 ctx2 = self._repo[other]
405
405
406 # This next code block is, admittedly, fragile logic that tests for
406 # This next code block is, admittedly, fragile logic that tests for
407 # reversing the contexts and wouldn't need to exist if it weren't for
407 # reversing the contexts and wouldn't need to exist if it weren't for
408 # the fast (and common) code path of comparing the working directory
408 # the fast (and common) code path of comparing the working directory
409 # with its first parent.
409 # with its first parent.
410 #
410 #
411 # What we're aiming for here is the ability to call:
411 # What we're aiming for here is the ability to call:
412 #
412 #
413 # workingctx.status(parentctx)
413 # workingctx.status(parentctx)
414 #
414 #
415 # If we always built the manifest for each context and compared those,
415 # If we always built the manifest for each context and compared those,
416 # then we'd be done. But the special case of the above call means we
416 # then we'd be done. But the special case of the above call means we
417 # just copy the manifest of the parent.
417 # just copy the manifest of the parent.
418 reversed = False
418 reversed = False
419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
420 reversed = True
420 reversed = True
421 ctx1, ctx2 = ctx2, ctx1
421 ctx1, ctx2 = ctx2, ctx1
422
422
423 match = self._repo.narrowmatch(match)
423 match = self._repo.narrowmatch(match)
424 match = ctx2._matchstatus(ctx1, match)
424 match = ctx2._matchstatus(ctx1, match)
425 r = scmutil.status([], [], [], [], [], [], [])
425 r = scmutil.status([], [], [], [], [], [], [])
426 r = ctx2._buildstatus(
426 r = ctx2._buildstatus(
427 ctx1, r, match, listignored, listclean, listunknown
427 ctx1, r, match, listignored, listclean, listunknown
428 )
428 )
429
429
430 if reversed:
430 if reversed:
431 # Reverse added and removed. Clear deleted, unknown and ignored as
431 # Reverse added and removed. Clear deleted, unknown and ignored as
432 # these make no sense to reverse.
432 # these make no sense to reverse.
433 r = scmutil.status(
433 r = scmutil.status(
434 r.modified, r.removed, r.added, [], [], [], r.clean
434 r.modified, r.removed, r.added, [], [], [], r.clean
435 )
435 )
436
436
437 if listsubrepos:
437 if listsubrepos:
438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
439 try:
439 try:
440 rev2 = ctx2.subrev(subpath)
440 rev2 = ctx2.subrev(subpath)
441 except KeyError:
441 except KeyError:
442 # A subrepo that existed in node1 was deleted between
442 # A subrepo that existed in node1 was deleted between
443 # node1 and node2 (inclusive). Thus, ctx2's substate
443 # node1 and node2 (inclusive). Thus, ctx2's substate
444 # won't contain that subpath. The best we can do ignore it.
444 # won't contain that subpath. The best we can do ignore it.
445 rev2 = None
445 rev2 = None
446 submatch = matchmod.subdirmatcher(subpath, match)
446 submatch = matchmod.subdirmatcher(subpath, match)
447 s = sub.status(
447 s = sub.status(
448 rev2,
448 rev2,
449 match=submatch,
449 match=submatch,
450 ignored=listignored,
450 ignored=listignored,
451 clean=listclean,
451 clean=listclean,
452 unknown=listunknown,
452 unknown=listunknown,
453 listsubrepos=True,
453 listsubrepos=True,
454 )
454 )
455 for k in (
455 for k in (
456 'modified',
456 'modified',
457 'added',
457 'added',
458 'removed',
458 'removed',
459 'deleted',
459 'deleted',
460 'unknown',
460 'unknown',
461 'ignored',
461 'ignored',
462 'clean',
462 'clean',
463 ):
463 ):
464 rfiles, sfiles = getattr(r, k), getattr(s, k)
464 rfiles, sfiles = getattr(r, k), getattr(s, k)
465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
466
466
467 r.modified.sort()
467 r.modified.sort()
468 r.added.sort()
468 r.added.sort()
469 r.removed.sort()
469 r.removed.sort()
470 r.deleted.sort()
470 r.deleted.sort()
471 r.unknown.sort()
471 r.unknown.sort()
472 r.ignored.sort()
472 r.ignored.sort()
473 r.clean.sort()
473 r.clean.sort()
474
474
475 return r
475 return r
476
476
477
477
478 class changectx(basectx):
478 class changectx(basectx):
479 """A changecontext object makes access to data related to a particular
479 """A changecontext object makes access to data related to a particular
480 changeset convenient. It represents a read-only context already present in
480 changeset convenient. It represents a read-only context already present in
481 the repo."""
481 the repo."""
482
482
483 def __init__(self, repo, rev, node, maybe_filtered=True):
483 def __init__(self, repo, rev, node, maybe_filtered=True):
484 super(changectx, self).__init__(repo)
484 super(changectx, self).__init__(repo)
485 self._rev = rev
485 self._rev = rev
486 self._node = node
486 self._node = node
487 # When maybe_filtered is True, the revision might be affected by
487 # When maybe_filtered is True, the revision might be affected by
488 # changelog filtering and operation through the filtered changelog must be used.
488 # changelog filtering and operation through the filtered changelog must be used.
489 #
489 #
490 # When maybe_filtered is False, the revision has already been checked
490 # When maybe_filtered is False, the revision has already been checked
491 # against filtering and is not filtered. Operation through the
491 # against filtering and is not filtered. Operation through the
492 # unfiltered changelog might be used in some case.
492 # unfiltered changelog might be used in some case.
493 self._maybe_filtered = maybe_filtered
493 self._maybe_filtered = maybe_filtered
494
494
495 def __hash__(self):
495 def __hash__(self):
496 try:
496 try:
497 return hash(self._rev)
497 return hash(self._rev)
498 except AttributeError:
498 except AttributeError:
499 return id(self)
499 return id(self)
500
500
501 def __nonzero__(self):
501 def __nonzero__(self):
502 return self._rev != nullrev
502 return self._rev != nullrev
503
503
504 __bool__ = __nonzero__
504 __bool__ = __nonzero__
505
505
506 @propertycache
506 @propertycache
507 def _changeset(self):
507 def _changeset(self):
508 if self._maybe_filtered:
508 if self._maybe_filtered:
509 repo = self._repo
509 repo = self._repo
510 else:
510 else:
511 repo = self._repo.unfiltered()
511 repo = self._repo.unfiltered()
512 return repo.changelog.changelogrevision(self.rev())
512 return repo.changelog.changelogrevision(self.rev())
513
513
514 @propertycache
514 @propertycache
515 def _manifest(self):
515 def _manifest(self):
516 return self._manifestctx.read()
516 return self._manifestctx.read()
517
517
518 @property
518 @property
519 def _manifestctx(self):
519 def _manifestctx(self):
520 return self._repo.manifestlog[self._changeset.manifest]
520 return self._repo.manifestlog[self._changeset.manifest]
521
521
522 @propertycache
522 @propertycache
523 def _manifestdelta(self):
523 def _manifestdelta(self):
524 return self._manifestctx.readdelta()
524 return self._manifestctx.readdelta()
525
525
526 @propertycache
526 @propertycache
527 def _parents(self):
527 def _parents(self):
528 repo = self._repo
528 repo = self._repo
529 if self._maybe_filtered:
529 if self._maybe_filtered:
530 cl = repo.changelog
530 cl = repo.changelog
531 else:
531 else:
532 cl = repo.unfiltered().changelog
532 cl = repo.unfiltered().changelog
533
533
534 p1, p2 = cl.parentrevs(self._rev)
534 p1, p2 = cl.parentrevs(self._rev)
535 if p2 == nullrev:
535 if p2 == nullrev:
536 return [repo[p1]]
536 return [repo[p1]]
537 return [repo[p1], repo[p2]]
537 return [repo[p1], repo[p2]]
538
538
539 def changeset(self):
539 def changeset(self):
540 c = self._changeset
540 c = self._changeset
541 return (
541 return (
542 c.manifest,
542 c.manifest,
543 c.user,
543 c.user,
544 c.date,
544 c.date,
545 c.files,
545 c.files,
546 c.description,
546 c.description,
547 c.extra,
547 c.extra,
548 )
548 )
549
549
550 def manifestnode(self):
550 def manifestnode(self):
551 return self._changeset.manifest
551 return self._changeset.manifest
552
552
553 def user(self):
553 def user(self):
554 return self._changeset.user
554 return self._changeset.user
555
555
556 def date(self):
556 def date(self):
557 return self._changeset.date
557 return self._changeset.date
558
558
559 def files(self):
559 def files(self):
560 return self._changeset.files
560 return self._changeset.files
561
561
562 def filesmodified(self):
562 def filesmodified(self):
563 modified = set(self.files())
563 modified = set(self.files())
564 modified.difference_update(self.filesadded())
564 modified.difference_update(self.filesadded())
565 modified.difference_update(self.filesremoved())
565 modified.difference_update(self.filesremoved())
566 return sorted(modified)
566 return sorted(modified)
567
567
568 def filesadded(self):
568 def filesadded(self):
569 filesadded = self._changeset.filesadded
569 filesadded = self._changeset.filesadded
570 compute_on_none = True
570 compute_on_none = True
571 if self._repo.filecopiesmode == b'changeset-sidedata':
571 if self._repo.filecopiesmode == b'changeset-sidedata':
572 compute_on_none = False
572 compute_on_none = False
573 else:
573 else:
574 source = self._repo.ui.config(b'experimental', b'copies.read-from')
574 source = self._repo.ui.config(b'experimental', b'copies.read-from')
575 if source == b'changeset-only':
575 if source == b'changeset-only':
576 compute_on_none = False
576 compute_on_none = False
577 elif source != b'compatibility':
577 elif source != b'compatibility':
578 # filelog mode, ignore any changelog content
578 # filelog mode, ignore any changelog content
579 filesadded = None
579 filesadded = None
580 if filesadded is None:
580 if filesadded is None:
581 if compute_on_none:
581 if compute_on_none:
582 filesadded = copies.computechangesetfilesadded(self)
582 filesadded = copies.computechangesetfilesadded(self)
583 else:
583 else:
584 filesadded = []
584 filesadded = []
585 return filesadded
585 return filesadded
586
586
587 def filesremoved(self):
587 def filesremoved(self):
588 filesremoved = self._changeset.filesremoved
588 filesremoved = self._changeset.filesremoved
589 compute_on_none = True
589 compute_on_none = True
590 if self._repo.filecopiesmode == b'changeset-sidedata':
590 if self._repo.filecopiesmode == b'changeset-sidedata':
591 compute_on_none = False
591 compute_on_none = False
592 else:
592 else:
593 source = self._repo.ui.config(b'experimental', b'copies.read-from')
593 source = self._repo.ui.config(b'experimental', b'copies.read-from')
594 if source == b'changeset-only':
594 if source == b'changeset-only':
595 compute_on_none = False
595 compute_on_none = False
596 elif source != b'compatibility':
596 elif source != b'compatibility':
597 # filelog mode, ignore any changelog content
597 # filelog mode, ignore any changelog content
598 filesremoved = None
598 filesremoved = None
599 if filesremoved is None:
599 if filesremoved is None:
600 if compute_on_none:
600 if compute_on_none:
601 filesremoved = copies.computechangesetfilesremoved(self)
601 filesremoved = copies.computechangesetfilesremoved(self)
602 else:
602 else:
603 filesremoved = []
603 filesremoved = []
604 return filesremoved
604 return filesremoved
605
605
606 @propertycache
606 @propertycache
607 def _copies(self):
607 def _copies(self):
608 p1copies = self._changeset.p1copies
608 p1copies = self._changeset.p1copies
609 p2copies = self._changeset.p2copies
609 p2copies = self._changeset.p2copies
610 compute_on_none = True
610 compute_on_none = True
611 if self._repo.filecopiesmode == b'changeset-sidedata':
611 if self._repo.filecopiesmode == b'changeset-sidedata':
612 compute_on_none = False
612 compute_on_none = False
613 else:
613 else:
614 source = self._repo.ui.config(b'experimental', b'copies.read-from')
614 source = self._repo.ui.config(b'experimental', b'copies.read-from')
615 # If config says to get copy metadata only from changeset, then
615 # If config says to get copy metadata only from changeset, then
616 # return that, defaulting to {} if there was no copy metadata. In
616 # return that, defaulting to {} if there was no copy metadata. In
617 # compatibility mode, we return copy data from the changeset if it
617 # compatibility mode, we return copy data from the changeset if it
618 # was recorded there, and otherwise we fall back to getting it from
618 # was recorded there, and otherwise we fall back to getting it from
619 # the filelogs (below).
619 # the filelogs (below).
620 #
620 #
621 # If we are in compatiblity mode and there is not data in the
621 # If we are in compatiblity mode and there is not data in the
622 # changeset), we get the copy metadata from the filelogs.
622 # changeset), we get the copy metadata from the filelogs.
623 #
623 #
624 # otherwise, when config said to read only from filelog, we get the
624 # otherwise, when config said to read only from filelog, we get the
625 # copy metadata from the filelogs.
625 # copy metadata from the filelogs.
626 if source == b'changeset-only':
626 if source == b'changeset-only':
627 compute_on_none = False
627 compute_on_none = False
628 elif source != b'compatibility':
628 elif source != b'compatibility':
629 # filelog mode, ignore any changelog content
629 # filelog mode, ignore any changelog content
630 p1copies = p2copies = None
630 p1copies = p2copies = None
631 if p1copies is None:
631 if p1copies is None:
632 if compute_on_none:
632 if compute_on_none:
633 p1copies, p2copies = super(changectx, self)._copies
633 p1copies, p2copies = super(changectx, self)._copies
634 else:
634 else:
635 if p1copies is None:
635 if p1copies is None:
636 p1copies = {}
636 p1copies = {}
637 if p2copies is None:
637 if p2copies is None:
638 p2copies = {}
638 p2copies = {}
639 return p1copies, p2copies
639 return p1copies, p2copies
640
640
641 def description(self):
641 def description(self):
642 return self._changeset.description
642 return self._changeset.description
643
643
644 def branch(self):
644 def branch(self):
645 return encoding.tolocal(self._changeset.extra.get(b"branch"))
645 return encoding.tolocal(self._changeset.extra.get(b"branch"))
646
646
647 def closesbranch(self):
647 def closesbranch(self):
648 return b'close' in self._changeset.extra
648 return b'close' in self._changeset.extra
649
649
650 def extra(self):
650 def extra(self):
651 """Return a dict of extra information."""
651 """Return a dict of extra information."""
652 return self._changeset.extra
652 return self._changeset.extra
653
653
654 def tags(self):
654 def tags(self):
655 """Return a list of byte tag names"""
655 """Return a list of byte tag names"""
656 return self._repo.nodetags(self._node)
656 return self._repo.nodetags(self._node)
657
657
658 def bookmarks(self):
658 def bookmarks(self):
659 """Return a list of byte bookmark names."""
659 """Return a list of byte bookmark names."""
660 return self._repo.nodebookmarks(self._node)
660 return self._repo.nodebookmarks(self._node)
661
661
662 def phase(self):
662 def phase(self):
663 return self._repo._phasecache.phase(self._repo, self._rev)
663 return self._repo._phasecache.phase(self._repo, self._rev)
664
664
665 def hidden(self):
665 def hidden(self):
666 return self._rev in repoview.filterrevs(self._repo, b'visible')
666 return self._rev in repoview.filterrevs(self._repo, b'visible')
667
667
668 def isinmemory(self):
668 def isinmemory(self):
669 return False
669 return False
670
670
671 def children(self):
671 def children(self):
672 """return list of changectx contexts for each child changeset.
672 """return list of changectx contexts for each child changeset.
673
673
674 This returns only the immediate child changesets. Use descendants() to
674 This returns only the immediate child changesets. Use descendants() to
675 recursively walk children.
675 recursively walk children.
676 """
676 """
677 c = self._repo.changelog.children(self._node)
677 c = self._repo.changelog.children(self._node)
678 return [self._repo[x] for x in c]
678 return [self._repo[x] for x in c]
679
679
680 def ancestors(self):
680 def ancestors(self):
681 for a in self._repo.changelog.ancestors([self._rev]):
681 for a in self._repo.changelog.ancestors([self._rev]):
682 yield self._repo[a]
682 yield self._repo[a]
683
683
684 def descendants(self):
684 def descendants(self):
685 """Recursively yield all children of the changeset.
685 """Recursively yield all children of the changeset.
686
686
687 For just the immediate children, use children()
687 For just the immediate children, use children()
688 """
688 """
689 for d in self._repo.changelog.descendants([self._rev]):
689 for d in self._repo.changelog.descendants([self._rev]):
690 yield self._repo[d]
690 yield self._repo[d]
691
691
692 def filectx(self, path, fileid=None, filelog=None):
692 def filectx(self, path, fileid=None, filelog=None):
693 """get a file context from this changeset"""
693 """get a file context from this changeset"""
694 if fileid is None:
694 if fileid is None:
695 fileid = self.filenode(path)
695 fileid = self.filenode(path)
696 return filectx(
696 return filectx(
697 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
697 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
698 )
698 )
699
699
700 def ancestor(self, c2, warn=False):
700 def ancestor(self, c2, warn=False):
701 """return the "best" ancestor context of self and c2
701 """return the "best" ancestor context of self and c2
702
702
703 If there are multiple candidates, it will show a message and check
703 If there are multiple candidates, it will show a message and check
704 merge.preferancestor configuration before falling back to the
704 merge.preferancestor configuration before falling back to the
705 revlog ancestor."""
705 revlog ancestor."""
706 # deal with workingctxs
706 # deal with workingctxs
707 n2 = c2._node
707 n2 = c2._node
708 if n2 is None:
708 if n2 is None:
709 n2 = c2._parents[0]._node
709 n2 = c2._parents[0]._node
710 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
710 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
711 if not cahs:
711 if not cahs:
712 anc = nullid
712 anc = nullid
713 elif len(cahs) == 1:
713 elif len(cahs) == 1:
714 anc = cahs[0]
714 anc = cahs[0]
715 else:
715 else:
716 # experimental config: merge.preferancestor
716 # experimental config: merge.preferancestor
717 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
717 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
718 try:
718 try:
719 ctx = scmutil.revsymbol(self._repo, r)
719 ctx = scmutil.revsymbol(self._repo, r)
720 except error.RepoLookupError:
720 except error.RepoLookupError:
721 continue
721 continue
722 anc = ctx.node()
722 anc = ctx.node()
723 if anc in cahs:
723 if anc in cahs:
724 break
724 break
725 else:
725 else:
726 anc = self._repo.changelog.ancestor(self._node, n2)
726 anc = self._repo.changelog.ancestor(self._node, n2)
727 if warn:
727 if warn:
728 self._repo.ui.status(
728 self._repo.ui.status(
729 (
729 (
730 _(b"note: using %s as ancestor of %s and %s\n")
730 _(b"note: using %s as ancestor of %s and %s\n")
731 % (short(anc), short(self._node), short(n2))
731 % (short(anc), short(self._node), short(n2))
732 )
732 )
733 + b''.join(
733 + b''.join(
734 _(
734 _(
735 b" alternatively, use --config "
735 b" alternatively, use --config "
736 b"merge.preferancestor=%s\n"
736 b"merge.preferancestor=%s\n"
737 )
737 )
738 % short(n)
738 % short(n)
739 for n in sorted(cahs)
739 for n in sorted(cahs)
740 if n != anc
740 if n != anc
741 )
741 )
742 )
742 )
743 return self._repo[anc]
743 return self._repo[anc]
744
744
745 def isancestorof(self, other):
745 def isancestorof(self, other):
746 """True if this changeset is an ancestor of other"""
746 """True if this changeset is an ancestor of other"""
747 return self._repo.changelog.isancestorrev(self._rev, other._rev)
747 return self._repo.changelog.isancestorrev(self._rev, other._rev)
748
748
749 def walk(self, match):
749 def walk(self, match):
750 '''Generates matching file names.'''
750 '''Generates matching file names.'''
751
751
752 # Wrap match.bad method to have message with nodeid
752 # Wrap match.bad method to have message with nodeid
753 def bad(fn, msg):
753 def bad(fn, msg):
754 # The manifest doesn't know about subrepos, so don't complain about
754 # The manifest doesn't know about subrepos, so don't complain about
755 # paths into valid subrepos.
755 # paths into valid subrepos.
756 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
756 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
757 return
757 return
758 match.bad(fn, _(b'no such file in rev %s') % self)
758 match.bad(fn, _(b'no such file in rev %s') % self)
759
759
760 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
760 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
761 return self._manifest.walk(m)
761 return self._manifest.walk(m)
762
762
763 def matches(self, match):
763 def matches(self, match):
764 return self.walk(match)
764 return self.walk(match)
765
765
766
766
767 class basefilectx(object):
767 class basefilectx(object):
768 """A filecontext object represents the common logic for its children:
768 """A filecontext object represents the common logic for its children:
769 filectx: read-only access to a filerevision that is already present
769 filectx: read-only access to a filerevision that is already present
770 in the repo,
770 in the repo,
771 workingfilectx: a filecontext that represents files from the working
771 workingfilectx: a filecontext that represents files from the working
772 directory,
772 directory,
773 memfilectx: a filecontext that represents files in-memory,
773 memfilectx: a filecontext that represents files in-memory,
774 """
774 """
775
775
776 @propertycache
776 @propertycache
777 def _filelog(self):
777 def _filelog(self):
778 return self._repo.file(self._path)
778 return self._repo.file(self._path)
779
779
780 @propertycache
780 @propertycache
781 def _changeid(self):
781 def _changeid(self):
782 if '_changectx' in self.__dict__:
782 if '_changectx' in self.__dict__:
783 return self._changectx.rev()
783 return self._changectx.rev()
784 elif '_descendantrev' in self.__dict__:
784 elif '_descendantrev' in self.__dict__:
785 # this file context was created from a revision with a known
785 # this file context was created from a revision with a known
786 # descendant, we can (lazily) correct for linkrev aliases
786 # descendant, we can (lazily) correct for linkrev aliases
787 return self._adjustlinkrev(self._descendantrev)
787 return self._adjustlinkrev(self._descendantrev)
788 else:
788 else:
789 return self._filelog.linkrev(self._filerev)
789 return self._filelog.linkrev(self._filerev)
790
790
791 @propertycache
791 @propertycache
792 def _filenode(self):
792 def _filenode(self):
793 if '_fileid' in self.__dict__:
793 if '_fileid' in self.__dict__:
794 return self._filelog.lookup(self._fileid)
794 return self._filelog.lookup(self._fileid)
795 else:
795 else:
796 return self._changectx.filenode(self._path)
796 return self._changectx.filenode(self._path)
797
797
798 @propertycache
798 @propertycache
799 def _filerev(self):
799 def _filerev(self):
800 return self._filelog.rev(self._filenode)
800 return self._filelog.rev(self._filenode)
801
801
802 @propertycache
802 @propertycache
803 def _repopath(self):
803 def _repopath(self):
804 return self._path
804 return self._path
805
805
806 def __nonzero__(self):
806 def __nonzero__(self):
807 try:
807 try:
808 self._filenode
808 self._filenode
809 return True
809 return True
810 except error.LookupError:
810 except error.LookupError:
811 # file is missing
811 # file is missing
812 return False
812 return False
813
813
814 __bool__ = __nonzero__
814 __bool__ = __nonzero__
815
815
816 def __bytes__(self):
816 def __bytes__(self):
817 try:
817 try:
818 return b"%s@%s" % (self.path(), self._changectx)
818 return b"%s@%s" % (self.path(), self._changectx)
819 except error.LookupError:
819 except error.LookupError:
820 return b"%s@???" % self.path()
820 return b"%s@???" % self.path()
821
821
822 __str__ = encoding.strmethod(__bytes__)
822 __str__ = encoding.strmethod(__bytes__)
823
823
824 def __repr__(self):
824 def __repr__(self):
825 return "<%s %s>" % (type(self).__name__, str(self))
825 return "<%s %s>" % (type(self).__name__, str(self))
826
826
827 def __hash__(self):
827 def __hash__(self):
828 try:
828 try:
829 return hash((self._path, self._filenode))
829 return hash((self._path, self._filenode))
830 except AttributeError:
830 except AttributeError:
831 return id(self)
831 return id(self)
832
832
833 def __eq__(self, other):
833 def __eq__(self, other):
834 try:
834 try:
835 return (
835 return (
836 type(self) == type(other)
836 type(self) == type(other)
837 and self._path == other._path
837 and self._path == other._path
838 and self._filenode == other._filenode
838 and self._filenode == other._filenode
839 )
839 )
840 except AttributeError:
840 except AttributeError:
841 return False
841 return False
842
842
843 def __ne__(self, other):
843 def __ne__(self, other):
844 return not (self == other)
844 return not (self == other)
845
845
846 def filerev(self):
846 def filerev(self):
847 return self._filerev
847 return self._filerev
848
848
849 def filenode(self):
849 def filenode(self):
850 return self._filenode
850 return self._filenode
851
851
852 @propertycache
852 @propertycache
853 def _flags(self):
853 def _flags(self):
854 return self._changectx.flags(self._path)
854 return self._changectx.flags(self._path)
855
855
856 def flags(self):
856 def flags(self):
857 return self._flags
857 return self._flags
858
858
859 def filelog(self):
859 def filelog(self):
860 return self._filelog
860 return self._filelog
861
861
862 def rev(self):
862 def rev(self):
863 return self._changeid
863 return self._changeid
864
864
865 def linkrev(self):
865 def linkrev(self):
866 return self._filelog.linkrev(self._filerev)
866 return self._filelog.linkrev(self._filerev)
867
867
868 def node(self):
868 def node(self):
869 return self._changectx.node()
869 return self._changectx.node()
870
870
871 def hex(self):
871 def hex(self):
872 return self._changectx.hex()
872 return self._changectx.hex()
873
873
874 def user(self):
874 def user(self):
875 return self._changectx.user()
875 return self._changectx.user()
876
876
877 def date(self):
877 def date(self):
878 return self._changectx.date()
878 return self._changectx.date()
879
879
880 def files(self):
880 def files(self):
881 return self._changectx.files()
881 return self._changectx.files()
882
882
883 def description(self):
883 def description(self):
884 return self._changectx.description()
884 return self._changectx.description()
885
885
886 def branch(self):
886 def branch(self):
887 return self._changectx.branch()
887 return self._changectx.branch()
888
888
889 def extra(self):
889 def extra(self):
890 return self._changectx.extra()
890 return self._changectx.extra()
891
891
892 def phase(self):
892 def phase(self):
893 return self._changectx.phase()
893 return self._changectx.phase()
894
894
895 def phasestr(self):
895 def phasestr(self):
896 return self._changectx.phasestr()
896 return self._changectx.phasestr()
897
897
898 def obsolete(self):
898 def obsolete(self):
899 return self._changectx.obsolete()
899 return self._changectx.obsolete()
900
900
901 def instabilities(self):
901 def instabilities(self):
902 return self._changectx.instabilities()
902 return self._changectx.instabilities()
903
903
904 def manifest(self):
904 def manifest(self):
905 return self._changectx.manifest()
905 return self._changectx.manifest()
906
906
907 def changectx(self):
907 def changectx(self):
908 return self._changectx
908 return self._changectx
909
909
910 def renamed(self):
910 def renamed(self):
911 return self._copied
911 return self._copied
912
912
913 def copysource(self):
913 def copysource(self):
914 return self._copied and self._copied[0]
914 return self._copied and self._copied[0]
915
915
916 def repo(self):
916 def repo(self):
917 return self._repo
917 return self._repo
918
918
919 def size(self):
919 def size(self):
920 return len(self.data())
920 return len(self.data())
921
921
922 def path(self):
922 def path(self):
923 return self._path
923 return self._path
924
924
925 def isbinary(self):
925 def isbinary(self):
926 try:
926 try:
927 return stringutil.binary(self.data())
927 return stringutil.binary(self.data())
928 except IOError:
928 except IOError:
929 return False
929 return False
930
930
931 def isexec(self):
931 def isexec(self):
932 return b'x' in self.flags()
932 return b'x' in self.flags()
933
933
934 def islink(self):
934 def islink(self):
935 return b'l' in self.flags()
935 return b'l' in self.flags()
936
936
937 def isabsent(self):
937 def isabsent(self):
938 """whether this filectx represents a file not in self._changectx
938 """whether this filectx represents a file not in self._changectx
939
939
940 This is mainly for merge code to detect change/delete conflicts. This is
940 This is mainly for merge code to detect change/delete conflicts. This is
941 expected to be True for all subclasses of basectx."""
941 expected to be True for all subclasses of basectx."""
942 return False
942 return False
943
943
944 _customcmp = False
944 _customcmp = False
945
945
946 def cmp(self, fctx):
946 def cmp(self, fctx):
947 """compare with other file context
947 """compare with other file context
948
948
949 returns True if different than fctx.
949 returns True if different than fctx.
950 """
950 """
951 if fctx._customcmp:
951 if fctx._customcmp:
952 return fctx.cmp(self)
952 return fctx.cmp(self)
953
953
954 if self._filenode is None:
954 if self._filenode is None:
955 raise error.ProgrammingError(
955 raise error.ProgrammingError(
956 b'filectx.cmp() must be reimplemented if not backed by revlog'
956 b'filectx.cmp() must be reimplemented if not backed by revlog'
957 )
957 )
958
958
959 if fctx._filenode is None:
959 if fctx._filenode is None:
960 if self._repo._encodefilterpats:
960 if self._repo._encodefilterpats:
961 # can't rely on size() because wdir content may be decoded
961 # can't rely on size() because wdir content may be decoded
962 return self._filelog.cmp(self._filenode, fctx.data())
962 return self._filelog.cmp(self._filenode, fctx.data())
963 if self.size() - 4 == fctx.size():
963 if self.size() - 4 == fctx.size():
964 # size() can match:
964 # size() can match:
965 # if file data starts with '\1\n', empty metadata block is
965 # if file data starts with '\1\n', empty metadata block is
966 # prepended, which adds 4 bytes to filelog.size().
966 # prepended, which adds 4 bytes to filelog.size().
967 return self._filelog.cmp(self._filenode, fctx.data())
967 return self._filelog.cmp(self._filenode, fctx.data())
968 if self.size() == fctx.size():
968 if self.size() == fctx.size():
969 # size() matches: need to compare content
969 # size() matches: need to compare content
970 return self._filelog.cmp(self._filenode, fctx.data())
970 return self._filelog.cmp(self._filenode, fctx.data())
971
971
972 # size() differs
972 # size() differs
973 return True
973 return True
974
974
975 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
975 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
976 """return the first ancestor of <srcrev> introducing <fnode>
976 """return the first ancestor of <srcrev> introducing <fnode>
977
977
978 If the linkrev of the file revision does not point to an ancestor of
978 If the linkrev of the file revision does not point to an ancestor of
979 srcrev, we'll walk down the ancestors until we find one introducing
979 srcrev, we'll walk down the ancestors until we find one introducing
980 this file revision.
980 this file revision.
981
981
982 :srcrev: the changeset revision we search ancestors from
982 :srcrev: the changeset revision we search ancestors from
983 :inclusive: if true, the src revision will also be checked
983 :inclusive: if true, the src revision will also be checked
984 :stoprev: an optional revision to stop the walk at. If no introduction
984 :stoprev: an optional revision to stop the walk at. If no introduction
985 of this file content could be found before this floor
985 of this file content could be found before this floor
986 revision, the function will returns "None" and stops its
986 revision, the function will returns "None" and stops its
987 iteration.
987 iteration.
988 """
988 """
989 repo = self._repo
989 repo = self._repo
990 cl = repo.unfiltered().changelog
990 cl = repo.unfiltered().changelog
991 mfl = repo.manifestlog
991 mfl = repo.manifestlog
992 # fetch the linkrev
992 # fetch the linkrev
993 lkr = self.linkrev()
993 lkr = self.linkrev()
994 if srcrev == lkr:
994 if srcrev == lkr:
995 return lkr
995 return lkr
996 # hack to reuse ancestor computation when searching for renames
996 # hack to reuse ancestor computation when searching for renames
997 memberanc = getattr(self, '_ancestrycontext', None)
997 memberanc = getattr(self, '_ancestrycontext', None)
998 iteranc = None
998 iteranc = None
999 if srcrev is None:
999 if srcrev is None:
1000 # wctx case, used by workingfilectx during mergecopy
1000 # wctx case, used by workingfilectx during mergecopy
1001 revs = [p.rev() for p in self._repo[None].parents()]
1001 revs = [p.rev() for p in self._repo[None].parents()]
1002 inclusive = True # we skipped the real (revless) source
1002 inclusive = True # we skipped the real (revless) source
1003 else:
1003 else:
1004 revs = [srcrev]
1004 revs = [srcrev]
1005 if memberanc is None:
1005 if memberanc is None:
1006 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1006 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1007 # check if this linkrev is an ancestor of srcrev
1007 # check if this linkrev is an ancestor of srcrev
1008 if lkr not in memberanc:
1008 if lkr not in memberanc:
1009 if iteranc is None:
1009 if iteranc is None:
1010 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1010 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1011 fnode = self._filenode
1011 fnode = self._filenode
1012 path = self._path
1012 path = self._path
1013 for a in iteranc:
1013 for a in iteranc:
1014 if stoprev is not None and a < stoprev:
1014 if stoprev is not None and a < stoprev:
1015 return None
1015 return None
1016 ac = cl.read(a) # get changeset data (we avoid object creation)
1016 ac = cl.read(a) # get changeset data (we avoid object creation)
1017 if path in ac[3]: # checking the 'files' field.
1017 if path in ac[3]: # checking the 'files' field.
1018 # The file has been touched, check if the content is
1018 # The file has been touched, check if the content is
1019 # similar to the one we search for.
1019 # similar to the one we search for.
1020 if fnode == mfl[ac[0]].readfast().get(path):
1020 if fnode == mfl[ac[0]].readfast().get(path):
1021 return a
1021 return a
1022 # In theory, we should never get out of that loop without a result.
1022 # In theory, we should never get out of that loop without a result.
1023 # But if manifest uses a buggy file revision (not children of the
1023 # But if manifest uses a buggy file revision (not children of the
1024 # one it replaces) we could. Such a buggy situation will likely
1024 # one it replaces) we could. Such a buggy situation will likely
1025 # result is crash somewhere else at to some point.
1025 # result is crash somewhere else at to some point.
1026 return lkr
1026 return lkr
1027
1027
1028 def isintroducedafter(self, changelogrev):
1028 def isintroducedafter(self, changelogrev):
1029 """True if a filectx has been introduced after a given floor revision
1029 """True if a filectx has been introduced after a given floor revision
1030 """
1030 """
1031 if self.linkrev() >= changelogrev:
1031 if self.linkrev() >= changelogrev:
1032 return True
1032 return True
1033 introrev = self._introrev(stoprev=changelogrev)
1033 introrev = self._introrev(stoprev=changelogrev)
1034 if introrev is None:
1034 if introrev is None:
1035 return False
1035 return False
1036 return introrev >= changelogrev
1036 return introrev >= changelogrev
1037
1037
1038 def introrev(self):
1038 def introrev(self):
1039 """return the rev of the changeset which introduced this file revision
1039 """return the rev of the changeset which introduced this file revision
1040
1040
1041 This method is different from linkrev because it take into account the
1041 This method is different from linkrev because it take into account the
1042 changeset the filectx was created from. It ensures the returned
1042 changeset the filectx was created from. It ensures the returned
1043 revision is one of its ancestors. This prevents bugs from
1043 revision is one of its ancestors. This prevents bugs from
1044 'linkrev-shadowing' when a file revision is used by multiple
1044 'linkrev-shadowing' when a file revision is used by multiple
1045 changesets.
1045 changesets.
1046 """
1046 """
1047 return self._introrev()
1047 return self._introrev()
1048
1048
1049 def _introrev(self, stoprev=None):
1049 def _introrev(self, stoprev=None):
1050 """
1050 """
1051 Same as `introrev` but, with an extra argument to limit changelog
1051 Same as `introrev` but, with an extra argument to limit changelog
1052 iteration range in some internal usecase.
1052 iteration range in some internal usecase.
1053
1053
1054 If `stoprev` is set, the `introrev` will not be searched past that
1054 If `stoprev` is set, the `introrev` will not be searched past that
1055 `stoprev` revision and "None" might be returned. This is useful to
1055 `stoprev` revision and "None" might be returned. This is useful to
1056 limit the iteration range.
1056 limit the iteration range.
1057 """
1057 """
1058 toprev = None
1058 toprev = None
1059 attrs = vars(self)
1059 attrs = vars(self)
1060 if '_changeid' in attrs:
1060 if '_changeid' in attrs:
1061 # We have a cached value already
1061 # We have a cached value already
1062 toprev = self._changeid
1062 toprev = self._changeid
1063 elif '_changectx' in attrs:
1063 elif '_changectx' in attrs:
1064 # We know which changelog entry we are coming from
1064 # We know which changelog entry we are coming from
1065 toprev = self._changectx.rev()
1065 toprev = self._changectx.rev()
1066
1066
1067 if toprev is not None:
1067 if toprev is not None:
1068 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1068 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1069 elif '_descendantrev' in attrs:
1069 elif '_descendantrev' in attrs:
1070 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1070 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1071 # be nice and cache the result of the computation
1071 # be nice and cache the result of the computation
1072 if introrev is not None:
1072 if introrev is not None:
1073 self._changeid = introrev
1073 self._changeid = introrev
1074 return introrev
1074 return introrev
1075 else:
1075 else:
1076 return self.linkrev()
1076 return self.linkrev()
1077
1077
1078 def introfilectx(self):
1078 def introfilectx(self):
1079 """Return filectx having identical contents, but pointing to the
1079 """Return filectx having identical contents, but pointing to the
1080 changeset revision where this filectx was introduced"""
1080 changeset revision where this filectx was introduced"""
1081 introrev = self.introrev()
1081 introrev = self.introrev()
1082 if self.rev() == introrev:
1082 if self.rev() == introrev:
1083 return self
1083 return self
1084 return self.filectx(self.filenode(), changeid=introrev)
1084 return self.filectx(self.filenode(), changeid=introrev)
1085
1085
1086 def _parentfilectx(self, path, fileid, filelog):
1086 def _parentfilectx(self, path, fileid, filelog):
1087 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1087 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1088 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1088 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1089 if '_changeid' in vars(self) or '_changectx' in vars(self):
1089 if '_changeid' in vars(self) or '_changectx' in vars(self):
1090 # If self is associated with a changeset (probably explicitly
1090 # If self is associated with a changeset (probably explicitly
1091 # fed), ensure the created filectx is associated with a
1091 # fed), ensure the created filectx is associated with a
1092 # changeset that is an ancestor of self.changectx.
1092 # changeset that is an ancestor of self.changectx.
1093 # This lets us later use _adjustlinkrev to get a correct link.
1093 # This lets us later use _adjustlinkrev to get a correct link.
1094 fctx._descendantrev = self.rev()
1094 fctx._descendantrev = self.rev()
1095 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1095 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1096 elif '_descendantrev' in vars(self):
1096 elif '_descendantrev' in vars(self):
1097 # Otherwise propagate _descendantrev if we have one associated.
1097 # Otherwise propagate _descendantrev if we have one associated.
1098 fctx._descendantrev = self._descendantrev
1098 fctx._descendantrev = self._descendantrev
1099 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1099 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1100 return fctx
1100 return fctx
1101
1101
1102 def parents(self):
1102 def parents(self):
1103 _path = self._path
1103 _path = self._path
1104 fl = self._filelog
1104 fl = self._filelog
1105 parents = self._filelog.parents(self._filenode)
1105 parents = self._filelog.parents(self._filenode)
1106 pl = [(_path, node, fl) for node in parents if node != nullid]
1106 pl = [(_path, node, fl) for node in parents if node != nullid]
1107
1107
1108 r = fl.renamed(self._filenode)
1108 r = fl.renamed(self._filenode)
1109 if r:
1109 if r:
1110 # - In the simple rename case, both parent are nullid, pl is empty.
1110 # - In the simple rename case, both parent are nullid, pl is empty.
1111 # - In case of merge, only one of the parent is null id and should
1111 # - In case of merge, only one of the parent is null id and should
1112 # be replaced with the rename information. This parent is -always-
1112 # be replaced with the rename information. This parent is -always-
1113 # the first one.
1113 # the first one.
1114 #
1114 #
1115 # As null id have always been filtered out in the previous list
1115 # As null id have always been filtered out in the previous list
1116 # comprehension, inserting to 0 will always result in "replacing
1116 # comprehension, inserting to 0 will always result in "replacing
1117 # first nullid parent with rename information.
1117 # first nullid parent with rename information.
1118 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1118 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1119
1119
1120 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1120 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1121
1121
1122 def p1(self):
1122 def p1(self):
1123 return self.parents()[0]
1123 return self.parents()[0]
1124
1124
1125 def p2(self):
1125 def p2(self):
1126 p = self.parents()
1126 p = self.parents()
1127 if len(p) == 2:
1127 if len(p) == 2:
1128 return p[1]
1128 return p[1]
1129 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1129 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1130
1130
1131 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1131 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1132 """Returns a list of annotateline objects for each line in the file
1132 """Returns a list of annotateline objects for each line in the file
1133
1133
1134 - line.fctx is the filectx of the node where that line was last changed
1134 - line.fctx is the filectx of the node where that line was last changed
1135 - line.lineno is the line number at the first appearance in the managed
1135 - line.lineno is the line number at the first appearance in the managed
1136 file
1136 file
1137 - line.text is the data on that line (including newline character)
1137 - line.text is the data on that line (including newline character)
1138 """
1138 """
1139 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1139 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1140
1140
1141 def parents(f):
1141 def parents(f):
1142 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1142 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1143 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1143 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1144 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1144 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1145 # isn't an ancestor of the srcrev.
1145 # isn't an ancestor of the srcrev.
1146 f._changeid
1146 f._changeid
1147 pl = f.parents()
1147 pl = f.parents()
1148
1148
1149 # Don't return renamed parents if we aren't following.
1149 # Don't return renamed parents if we aren't following.
1150 if not follow:
1150 if not follow:
1151 pl = [p for p in pl if p.path() == f.path()]
1151 pl = [p for p in pl if p.path() == f.path()]
1152
1152
1153 # renamed filectx won't have a filelog yet, so set it
1153 # renamed filectx won't have a filelog yet, so set it
1154 # from the cache to save time
1154 # from the cache to save time
1155 for p in pl:
1155 for p in pl:
1156 if not '_filelog' in p.__dict__:
1156 if not '_filelog' in p.__dict__:
1157 p._filelog = getlog(p.path())
1157 p._filelog = getlog(p.path())
1158
1158
1159 return pl
1159 return pl
1160
1160
1161 # use linkrev to find the first changeset where self appeared
1161 # use linkrev to find the first changeset where self appeared
1162 base = self.introfilectx()
1162 base = self.introfilectx()
1163 if getattr(base, '_ancestrycontext', None) is None:
1163 if getattr(base, '_ancestrycontext', None) is None:
1164 cl = self._repo.changelog
1164 cl = self._repo.changelog
1165 if base.rev() is None:
1165 if base.rev() is None:
1166 # wctx is not inclusive, but works because _ancestrycontext
1166 # wctx is not inclusive, but works because _ancestrycontext
1167 # is used to test filelog revisions
1167 # is used to test filelog revisions
1168 ac = cl.ancestors(
1168 ac = cl.ancestors(
1169 [p.rev() for p in base.parents()], inclusive=True
1169 [p.rev() for p in base.parents()], inclusive=True
1170 )
1170 )
1171 else:
1171 else:
1172 ac = cl.ancestors([base.rev()], inclusive=True)
1172 ac = cl.ancestors([base.rev()], inclusive=True)
1173 base._ancestrycontext = ac
1173 base._ancestrycontext = ac
1174
1174
1175 return dagop.annotate(
1175 return dagop.annotate(
1176 base, parents, skiprevs=skiprevs, diffopts=diffopts
1176 base, parents, skiprevs=skiprevs, diffopts=diffopts
1177 )
1177 )
1178
1178
1179 def ancestors(self, followfirst=False):
1179 def ancestors(self, followfirst=False):
1180 visit = {}
1180 visit = {}
1181 c = self
1181 c = self
1182 if followfirst:
1182 if followfirst:
1183 cut = 1
1183 cut = 1
1184 else:
1184 else:
1185 cut = None
1185 cut = None
1186
1186
1187 while True:
1187 while True:
1188 for parent in c.parents()[:cut]:
1188 for parent in c.parents()[:cut]:
1189 visit[(parent.linkrev(), parent.filenode())] = parent
1189 visit[(parent.linkrev(), parent.filenode())] = parent
1190 if not visit:
1190 if not visit:
1191 break
1191 break
1192 c = visit.pop(max(visit))
1192 c = visit.pop(max(visit))
1193 yield c
1193 yield c
1194
1194
1195 def decodeddata(self):
1195 def decodeddata(self):
1196 """Returns `data()` after running repository decoding filters.
1196 """Returns `data()` after running repository decoding filters.
1197
1197
1198 This is often equivalent to how the data would be expressed on disk.
1198 This is often equivalent to how the data would be expressed on disk.
1199 """
1199 """
1200 return self._repo.wwritedata(self.path(), self.data())
1200 return self._repo.wwritedata(self.path(), self.data())
1201
1201
1202
1202
1203 class filectx(basefilectx):
1203 class filectx(basefilectx):
1204 """A filecontext object makes access to data related to a particular
1204 """A filecontext object makes access to data related to a particular
1205 filerevision convenient."""
1205 filerevision convenient."""
1206
1206
1207 def __init__(
1207 def __init__(
1208 self,
1208 self,
1209 repo,
1209 repo,
1210 path,
1210 path,
1211 changeid=None,
1211 changeid=None,
1212 fileid=None,
1212 fileid=None,
1213 filelog=None,
1213 filelog=None,
1214 changectx=None,
1214 changectx=None,
1215 ):
1215 ):
1216 """changeid must be a revision number, if specified.
1216 """changeid must be a revision number, if specified.
1217 fileid can be a file revision or node."""
1217 fileid can be a file revision or node."""
1218 self._repo = repo
1218 self._repo = repo
1219 self._path = path
1219 self._path = path
1220
1220
1221 assert (
1221 assert (
1222 changeid is not None or fileid is not None or changectx is not None
1222 changeid is not None or fileid is not None or changectx is not None
1223 ), (
1223 ), (
1224 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1224 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1225 % (changeid, fileid, changectx,)
1225 % (changeid, fileid, changectx,)
1226 )
1226 )
1227
1227
1228 if filelog is not None:
1228 if filelog is not None:
1229 self._filelog = filelog
1229 self._filelog = filelog
1230
1230
1231 if changeid is not None:
1231 if changeid is not None:
1232 self._changeid = changeid
1232 self._changeid = changeid
1233 if changectx is not None:
1233 if changectx is not None:
1234 self._changectx = changectx
1234 self._changectx = changectx
1235 if fileid is not None:
1235 if fileid is not None:
1236 self._fileid = fileid
1236 self._fileid = fileid
1237
1237
1238 @propertycache
1238 @propertycache
1239 def _changectx(self):
1239 def _changectx(self):
1240 try:
1240 try:
1241 return self._repo[self._changeid]
1241 return self._repo[self._changeid]
1242 except error.FilteredRepoLookupError:
1242 except error.FilteredRepoLookupError:
1243 # Linkrev may point to any revision in the repository. When the
1243 # Linkrev may point to any revision in the repository. When the
1244 # repository is filtered this may lead to `filectx` trying to build
1244 # repository is filtered this may lead to `filectx` trying to build
1245 # `changectx` for filtered revision. In such case we fallback to
1245 # `changectx` for filtered revision. In such case we fallback to
1246 # creating `changectx` on the unfiltered version of the reposition.
1246 # creating `changectx` on the unfiltered version of the reposition.
1247 # This fallback should not be an issue because `changectx` from
1247 # This fallback should not be an issue because `changectx` from
1248 # `filectx` are not used in complex operations that care about
1248 # `filectx` are not used in complex operations that care about
1249 # filtering.
1249 # filtering.
1250 #
1250 #
1251 # This fallback is a cheap and dirty fix that prevent several
1251 # This fallback is a cheap and dirty fix that prevent several
1252 # crashes. It does not ensure the behavior is correct. However the
1252 # crashes. It does not ensure the behavior is correct. However the
1253 # behavior was not correct before filtering either and "incorrect
1253 # behavior was not correct before filtering either and "incorrect
1254 # behavior" is seen as better as "crash"
1254 # behavior" is seen as better as "crash"
1255 #
1255 #
1256 # Linkrevs have several serious troubles with filtering that are
1256 # Linkrevs have several serious troubles with filtering that are
1257 # complicated to solve. Proper handling of the issue here should be
1257 # complicated to solve. Proper handling of the issue here should be
1258 # considered when solving linkrev issue are on the table.
1258 # considered when solving linkrev issue are on the table.
1259 return self._repo.unfiltered()[self._changeid]
1259 return self._repo.unfiltered()[self._changeid]
1260
1260
1261 def filectx(self, fileid, changeid=None):
1261 def filectx(self, fileid, changeid=None):
1262 '''opens an arbitrary revision of the file without
1262 '''opens an arbitrary revision of the file without
1263 opening a new filelog'''
1263 opening a new filelog'''
1264 return filectx(
1264 return filectx(
1265 self._repo,
1265 self._repo,
1266 self._path,
1266 self._path,
1267 fileid=fileid,
1267 fileid=fileid,
1268 filelog=self._filelog,
1268 filelog=self._filelog,
1269 changeid=changeid,
1269 changeid=changeid,
1270 )
1270 )
1271
1271
1272 def rawdata(self):
1272 def rawdata(self):
1273 return self._filelog.rawdata(self._filenode)
1273 return self._filelog.rawdata(self._filenode)
1274
1274
1275 def rawflags(self):
1275 def rawflags(self):
1276 """low-level revlog flags"""
1276 """low-level revlog flags"""
1277 return self._filelog.flags(self._filerev)
1277 return self._filelog.flags(self._filerev)
1278
1278
1279 def data(self):
1279 def data(self):
1280 try:
1280 try:
1281 return self._filelog.read(self._filenode)
1281 return self._filelog.read(self._filenode)
1282 except error.CensoredNodeError:
1282 except error.CensoredNodeError:
1283 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1283 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1284 return b""
1284 return b""
1285 raise error.Abort(
1285 raise error.Abort(
1286 _(b"censored node: %s") % short(self._filenode),
1286 _(b"censored node: %s") % short(self._filenode),
1287 hint=_(b"set censor.policy to ignore errors"),
1287 hint=_(b"set censor.policy to ignore errors"),
1288 )
1288 )
1289
1289
1290 def size(self):
1290 def size(self):
1291 return self._filelog.size(self._filerev)
1291 return self._filelog.size(self._filerev)
1292
1292
1293 @propertycache
1293 @propertycache
1294 def _copied(self):
1294 def _copied(self):
1295 """check if file was actually renamed in this changeset revision
1295 """check if file was actually renamed in this changeset revision
1296
1296
1297 If rename logged in file revision, we report copy for changeset only
1297 If rename logged in file revision, we report copy for changeset only
1298 if file revisions linkrev points back to the changeset in question
1298 if file revisions linkrev points back to the changeset in question
1299 or both changeset parents contain different file revisions.
1299 or both changeset parents contain different file revisions.
1300 """
1300 """
1301
1301
1302 renamed = self._filelog.renamed(self._filenode)
1302 renamed = self._filelog.renamed(self._filenode)
1303 if not renamed:
1303 if not renamed:
1304 return None
1304 return None
1305
1305
1306 if self.rev() == self.linkrev():
1306 if self.rev() == self.linkrev():
1307 return renamed
1307 return renamed
1308
1308
1309 name = self.path()
1309 name = self.path()
1310 fnode = self._filenode
1310 fnode = self._filenode
1311 for p in self._changectx.parents():
1311 for p in self._changectx.parents():
1312 try:
1312 try:
1313 if fnode == p.filenode(name):
1313 if fnode == p.filenode(name):
1314 return None
1314 return None
1315 except error.LookupError:
1315 except error.LookupError:
1316 pass
1316 pass
1317 return renamed
1317 return renamed
1318
1318
1319 def children(self):
1319 def children(self):
1320 # hard for renames
1320 # hard for renames
1321 c = self._filelog.children(self._filenode)
1321 c = self._filelog.children(self._filenode)
1322 return [
1322 return [
1323 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1323 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1324 for x in c
1324 for x in c
1325 ]
1325 ]
1326
1326
1327
1327
1328 class committablectx(basectx):
1328 class committablectx(basectx):
1329 """A committablectx object provides common functionality for a context that
1329 """A committablectx object provides common functionality for a context that
1330 wants the ability to commit, e.g. workingctx or memctx."""
1330 wants the ability to commit, e.g. workingctx or memctx."""
1331
1331
1332 def __init__(
1332 def __init__(
1333 self,
1333 self,
1334 repo,
1334 repo,
1335 text=b"",
1335 text=b"",
1336 user=None,
1336 user=None,
1337 date=None,
1337 date=None,
1338 extra=None,
1338 extra=None,
1339 changes=None,
1339 changes=None,
1340 branch=None,
1340 branch=None,
1341 ):
1341 ):
1342 super(committablectx, self).__init__(repo)
1342 super(committablectx, self).__init__(repo)
1343 self._rev = None
1343 self._rev = None
1344 self._node = None
1344 self._node = None
1345 self._text = text
1345 self._text = text
1346 if date:
1346 if date:
1347 self._date = dateutil.parsedate(date)
1347 self._date = dateutil.parsedate(date)
1348 if user:
1348 if user:
1349 self._user = user
1349 self._user = user
1350 if changes:
1350 if changes:
1351 self._status = changes
1351 self._status = changes
1352
1352
1353 self._extra = {}
1353 self._extra = {}
1354 if extra:
1354 if extra:
1355 self._extra = extra.copy()
1355 self._extra = extra.copy()
1356 if branch is not None:
1356 if branch is not None:
1357 self._extra[b'branch'] = encoding.fromlocal(branch)
1357 self._extra[b'branch'] = encoding.fromlocal(branch)
1358 if not self._extra.get(b'branch'):
1358 if not self._extra.get(b'branch'):
1359 self._extra[b'branch'] = b'default'
1359 self._extra[b'branch'] = b'default'
1360
1360
1361 def __bytes__(self):
1361 def __bytes__(self):
1362 return bytes(self._parents[0]) + b"+"
1362 return bytes(self._parents[0]) + b"+"
1363
1363
1364 __str__ = encoding.strmethod(__bytes__)
1364 __str__ = encoding.strmethod(__bytes__)
1365
1365
1366 def __nonzero__(self):
1366 def __nonzero__(self):
1367 return True
1367 return True
1368
1368
1369 __bool__ = __nonzero__
1369 __bool__ = __nonzero__
1370
1370
1371 @propertycache
1371 @propertycache
1372 def _status(self):
1372 def _status(self):
1373 return self._repo.status()
1373 return self._repo.status()
1374
1374
1375 @propertycache
1375 @propertycache
1376 def _user(self):
1376 def _user(self):
1377 return self._repo.ui.username()
1377 return self._repo.ui.username()
1378
1378
1379 @propertycache
1379 @propertycache
1380 def _date(self):
1380 def _date(self):
1381 ui = self._repo.ui
1381 ui = self._repo.ui
1382 date = ui.configdate(b'devel', b'default-date')
1382 date = ui.configdate(b'devel', b'default-date')
1383 if date is None:
1383 if date is None:
1384 date = dateutil.makedate()
1384 date = dateutil.makedate()
1385 return date
1385 return date
1386
1386
1387 def subrev(self, subpath):
1387 def subrev(self, subpath):
1388 return None
1388 return None
1389
1389
1390 def manifestnode(self):
1390 def manifestnode(self):
1391 return None
1391 return None
1392
1392
1393 def user(self):
1393 def user(self):
1394 return self._user or self._repo.ui.username()
1394 return self._user or self._repo.ui.username()
1395
1395
1396 def date(self):
1396 def date(self):
1397 return self._date
1397 return self._date
1398
1398
1399 def description(self):
1399 def description(self):
1400 return self._text
1400 return self._text
1401
1401
1402 def files(self):
1402 def files(self):
1403 return sorted(
1403 return sorted(
1404 self._status.modified + self._status.added + self._status.removed
1404 self._status.modified + self._status.added + self._status.removed
1405 )
1405 )
1406
1406
1407 def modified(self):
1407 def modified(self):
1408 return self._status.modified
1408 return self._status.modified
1409
1409
1410 def added(self):
1410 def added(self):
1411 return self._status.added
1411 return self._status.added
1412
1412
1413 def removed(self):
1413 def removed(self):
1414 return self._status.removed
1414 return self._status.removed
1415
1415
1416 def deleted(self):
1416 def deleted(self):
1417 return self._status.deleted
1417 return self._status.deleted
1418
1418
1419 filesmodified = modified
1419 filesmodified = modified
1420 filesadded = added
1420 filesadded = added
1421 filesremoved = removed
1421 filesremoved = removed
1422
1422
1423 def branch(self):
1423 def branch(self):
1424 return encoding.tolocal(self._extra[b'branch'])
1424 return encoding.tolocal(self._extra[b'branch'])
1425
1425
1426 def closesbranch(self):
1426 def closesbranch(self):
1427 return b'close' in self._extra
1427 return b'close' in self._extra
1428
1428
1429 def extra(self):
1429 def extra(self):
1430 return self._extra
1430 return self._extra
1431
1431
1432 def isinmemory(self):
1432 def isinmemory(self):
1433 return False
1433 return False
1434
1434
1435 def tags(self):
1435 def tags(self):
1436 return []
1436 return []
1437
1437
1438 def bookmarks(self):
1438 def bookmarks(self):
1439 b = []
1439 b = []
1440 for p in self.parents():
1440 for p in self.parents():
1441 b.extend(p.bookmarks())
1441 b.extend(p.bookmarks())
1442 return b
1442 return b
1443
1443
1444 def phase(self):
1444 def phase(self):
1445 phase = phases.newcommitphase(self._repo.ui)
1445 phase = phases.newcommitphase(self._repo.ui)
1446 for p in self.parents():
1446 for p in self.parents():
1447 phase = max(phase, p.phase())
1447 phase = max(phase, p.phase())
1448 return phase
1448 return phase
1449
1449
1450 def hidden(self):
1450 def hidden(self):
1451 return False
1451 return False
1452
1452
1453 def children(self):
1453 def children(self):
1454 return []
1454 return []
1455
1455
1456 def ancestor(self, c2):
1456 def ancestor(self, c2):
1457 """return the "best" ancestor context of self and c2"""
1457 """return the "best" ancestor context of self and c2"""
1458 return self._parents[0].ancestor(c2) # punt on two parents for now
1458 return self._parents[0].ancestor(c2) # punt on two parents for now
1459
1459
1460 def ancestors(self):
1460 def ancestors(self):
1461 for p in self._parents:
1461 for p in self._parents:
1462 yield p
1462 yield p
1463 for a in self._repo.changelog.ancestors(
1463 for a in self._repo.changelog.ancestors(
1464 [p.rev() for p in self._parents]
1464 [p.rev() for p in self._parents]
1465 ):
1465 ):
1466 yield self._repo[a]
1466 yield self._repo[a]
1467
1467
1468 def markcommitted(self, node):
1468 def markcommitted(self, node):
1469 """Perform post-commit cleanup necessary after committing this ctx
1469 """Perform post-commit cleanup necessary after committing this ctx
1470
1470
1471 Specifically, this updates backing stores this working context
1471 Specifically, this updates backing stores this working context
1472 wraps to reflect the fact that the changes reflected by this
1472 wraps to reflect the fact that the changes reflected by this
1473 workingctx have been committed. For example, it marks
1473 workingctx have been committed. For example, it marks
1474 modified and added files as normal in the dirstate.
1474 modified and added files as normal in the dirstate.
1475
1475
1476 """
1476 """
1477
1477
1478 def dirty(self, missing=False, merge=True, branch=True):
1478 def dirty(self, missing=False, merge=True, branch=True):
1479 return False
1479 return False
1480
1480
1481
1481
1482 class workingctx(committablectx):
1482 class workingctx(committablectx):
1483 """A workingctx object makes access to data related to
1483 """A workingctx object makes access to data related to
1484 the current working directory convenient.
1484 the current working directory convenient.
1485 date - any valid date string or (unixtime, offset), or None.
1485 date - any valid date string or (unixtime, offset), or None.
1486 user - username string, or None.
1486 user - username string, or None.
1487 extra - a dictionary of extra values, or None.
1487 extra - a dictionary of extra values, or None.
1488 changes - a list of file lists as returned by localrepo.status()
1488 changes - a list of file lists as returned by localrepo.status()
1489 or None to use the repository status.
1489 or None to use the repository status.
1490 """
1490 """
1491
1491
1492 def __init__(
1492 def __init__(
1493 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1493 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1494 ):
1494 ):
1495 branch = None
1495 branch = None
1496 if not extra or b'branch' not in extra:
1496 if not extra or b'branch' not in extra:
1497 try:
1497 try:
1498 branch = repo.dirstate.branch()
1498 branch = repo.dirstate.branch()
1499 except UnicodeDecodeError:
1499 except UnicodeDecodeError:
1500 raise error.Abort(_(b'branch name not in UTF-8!'))
1500 raise error.Abort(_(b'branch name not in UTF-8!'))
1501 super(workingctx, self).__init__(
1501 super(workingctx, self).__init__(
1502 repo, text, user, date, extra, changes, branch=branch
1502 repo, text, user, date, extra, changes, branch=branch
1503 )
1503 )
1504
1504
1505 def __iter__(self):
1505 def __iter__(self):
1506 d = self._repo.dirstate
1506 d = self._repo.dirstate
1507 for f in d:
1507 for f in d:
1508 if d[f] != b'r':
1508 if d[f] != b'r':
1509 yield f
1509 yield f
1510
1510
1511 def __contains__(self, key):
1511 def __contains__(self, key):
1512 return self._repo.dirstate[key] not in b"?r"
1512 return self._repo.dirstate[key] not in b"?r"
1513
1513
1514 def hex(self):
1514 def hex(self):
1515 return wdirhex
1515 return wdirhex
1516
1516
1517 @propertycache
1517 @propertycache
1518 def _parents(self):
1518 def _parents(self):
1519 p = self._repo.dirstate.parents()
1519 p = self._repo.dirstate.parents()
1520 if p[1] == nullid:
1520 if p[1] == nullid:
1521 p = p[:-1]
1521 p = p[:-1]
1522 # use unfiltered repo to delay/avoid loading obsmarkers
1522 # use unfiltered repo to delay/avoid loading obsmarkers
1523 unfi = self._repo.unfiltered()
1523 unfi = self._repo.unfiltered()
1524 return [
1524 return [
1525 changectx(
1525 changectx(
1526 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1526 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1527 )
1527 )
1528 for n in p
1528 for n in p
1529 ]
1529 ]
1530
1530
1531 def _fileinfo(self, path):
1531 def _fileinfo(self, path):
1532 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1532 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1533 self._manifest
1533 self._manifest
1534 return super(workingctx, self)._fileinfo(path)
1534 return super(workingctx, self)._fileinfo(path)
1535
1535
1536 def _buildflagfunc(self):
1536 def _buildflagfunc(self):
1537 # Create a fallback function for getting file flags when the
1537 # Create a fallback function for getting file flags when the
1538 # filesystem doesn't support them
1538 # filesystem doesn't support them
1539
1539
1540 copiesget = self._repo.dirstate.copies().get
1540 copiesget = self._repo.dirstate.copies().get
1541 parents = self.parents()
1541 parents = self.parents()
1542 if len(parents) < 2:
1542 if len(parents) < 2:
1543 # when we have one parent, it's easy: copy from parent
1543 # when we have one parent, it's easy: copy from parent
1544 man = parents[0].manifest()
1544 man = parents[0].manifest()
1545
1545
1546 def func(f):
1546 def func(f):
1547 f = copiesget(f, f)
1547 f = copiesget(f, f)
1548 return man.flags(f)
1548 return man.flags(f)
1549
1549
1550 else:
1550 else:
1551 # merges are tricky: we try to reconstruct the unstored
1551 # merges are tricky: we try to reconstruct the unstored
1552 # result from the merge (issue1802)
1552 # result from the merge (issue1802)
1553 p1, p2 = parents
1553 p1, p2 = parents
1554 pa = p1.ancestor(p2)
1554 pa = p1.ancestor(p2)
1555 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1555 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1556
1556
1557 def func(f):
1557 def func(f):
1558 f = copiesget(f, f) # may be wrong for merges with copies
1558 f = copiesget(f, f) # may be wrong for merges with copies
1559 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1559 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1560 if fl1 == fl2:
1560 if fl1 == fl2:
1561 return fl1
1561 return fl1
1562 if fl1 == fla:
1562 if fl1 == fla:
1563 return fl2
1563 return fl2
1564 if fl2 == fla:
1564 if fl2 == fla:
1565 return fl1
1565 return fl1
1566 return b'' # punt for conflicts
1566 return b'' # punt for conflicts
1567
1567
1568 return func
1568 return func
1569
1569
1570 @propertycache
1570 @propertycache
1571 def _flagfunc(self):
1571 def _flagfunc(self):
1572 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1572 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1573
1573
1574 def flags(self, path):
1574 def flags(self, path):
1575 if '_manifest' in self.__dict__:
1575 if '_manifest' in self.__dict__:
1576 try:
1576 try:
1577 return self._manifest.flags(path)
1577 return self._manifest.flags(path)
1578 except KeyError:
1578 except KeyError:
1579 return b''
1579 return b''
1580
1580
1581 try:
1581 try:
1582 return self._flagfunc(path)
1582 return self._flagfunc(path)
1583 except OSError:
1583 except OSError:
1584 return b''
1584 return b''
1585
1585
1586 def filectx(self, path, filelog=None):
1586 def filectx(self, path, filelog=None):
1587 """get a file context from the working directory"""
1587 """get a file context from the working directory"""
1588 return workingfilectx(
1588 return workingfilectx(
1589 self._repo, path, workingctx=self, filelog=filelog
1589 self._repo, path, workingctx=self, filelog=filelog
1590 )
1590 )
1591
1591
1592 def dirty(self, missing=False, merge=True, branch=True):
1592 def dirty(self, missing=False, merge=True, branch=True):
1593 """check whether a working directory is modified"""
1593 """check whether a working directory is modified"""
1594 # check subrepos first
1594 # check subrepos first
1595 for s in sorted(self.substate):
1595 for s in sorted(self.substate):
1596 if self.sub(s).dirty(missing=missing):
1596 if self.sub(s).dirty(missing=missing):
1597 return True
1597 return True
1598 # check current working dir
1598 # check current working dir
1599 return (
1599 return (
1600 (merge and self.p2())
1600 (merge and self.p2())
1601 or (branch and self.branch() != self.p1().branch())
1601 or (branch and self.branch() != self.p1().branch())
1602 or self.modified()
1602 or self.modified()
1603 or self.added()
1603 or self.added()
1604 or self.removed()
1604 or self.removed()
1605 or (missing and self.deleted())
1605 or (missing and self.deleted())
1606 )
1606 )
1607
1607
1608 def add(self, list, prefix=b""):
1608 def add(self, list, prefix=b""):
1609 with self._repo.wlock():
1609 with self._repo.wlock():
1610 ui, ds = self._repo.ui, self._repo.dirstate
1610 ui, ds = self._repo.ui, self._repo.dirstate
1611 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1611 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1612 rejected = []
1612 rejected = []
1613 lstat = self._repo.wvfs.lstat
1613 lstat = self._repo.wvfs.lstat
1614 for f in list:
1614 for f in list:
1615 # ds.pathto() returns an absolute file when this is invoked from
1615 # ds.pathto() returns an absolute file when this is invoked from
1616 # the keyword extension. That gets flagged as non-portable on
1616 # the keyword extension. That gets flagged as non-portable on
1617 # Windows, since it contains the drive letter and colon.
1617 # Windows, since it contains the drive letter and colon.
1618 scmutil.checkportable(ui, os.path.join(prefix, f))
1618 scmutil.checkportable(ui, os.path.join(prefix, f))
1619 try:
1619 try:
1620 st = lstat(f)
1620 st = lstat(f)
1621 except OSError:
1621 except OSError:
1622 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1622 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1623 rejected.append(f)
1623 rejected.append(f)
1624 continue
1624 continue
1625 limit = ui.configbytes(b'ui', b'large-file-limit')
1625 limit = ui.configbytes(b'ui', b'large-file-limit')
1626 if limit != 0 and st.st_size > limit:
1626 if limit != 0 and st.st_size > limit:
1627 ui.warn(
1627 ui.warn(
1628 _(
1628 _(
1629 b"%s: up to %d MB of RAM may be required "
1629 b"%s: up to %d MB of RAM may be required "
1630 b"to manage this file\n"
1630 b"to manage this file\n"
1631 b"(use 'hg revert %s' to cancel the "
1631 b"(use 'hg revert %s' to cancel the "
1632 b"pending addition)\n"
1632 b"pending addition)\n"
1633 )
1633 )
1634 % (f, 3 * st.st_size // 1000000, uipath(f))
1634 % (f, 3 * st.st_size // 1000000, uipath(f))
1635 )
1635 )
1636 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1636 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1637 ui.warn(
1637 ui.warn(
1638 _(
1638 _(
1639 b"%s not added: only files and symlinks "
1639 b"%s not added: only files and symlinks "
1640 b"supported currently\n"
1640 b"supported currently\n"
1641 )
1641 )
1642 % uipath(f)
1642 % uipath(f)
1643 )
1643 )
1644 rejected.append(f)
1644 rejected.append(f)
1645 elif ds[f] in b'amn':
1645 elif ds[f] in b'amn':
1646 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1646 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1647 elif ds[f] == b'r':
1647 elif ds[f] == b'r':
1648 ds.normallookup(f)
1648 ds.normallookup(f)
1649 else:
1649 else:
1650 ds.add(f)
1650 ds.add(f)
1651 return rejected
1651 return rejected
1652
1652
1653 def forget(self, files, prefix=b""):
1653 def forget(self, files, prefix=b""):
1654 with self._repo.wlock():
1654 with self._repo.wlock():
1655 ds = self._repo.dirstate
1655 ds = self._repo.dirstate
1656 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1656 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1657 rejected = []
1657 rejected = []
1658 for f in files:
1658 for f in files:
1659 if f not in ds:
1659 if f not in ds:
1660 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1660 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1661 rejected.append(f)
1661 rejected.append(f)
1662 elif ds[f] != b'a':
1662 elif ds[f] != b'a':
1663 ds.remove(f)
1663 ds.remove(f)
1664 else:
1664 else:
1665 ds.drop(f)
1665 ds.drop(f)
1666 return rejected
1666 return rejected
1667
1667
1668 def copy(self, source, dest):
1668 def copy(self, source, dest):
1669 try:
1669 try:
1670 st = self._repo.wvfs.lstat(dest)
1670 st = self._repo.wvfs.lstat(dest)
1671 except OSError as err:
1671 except OSError as err:
1672 if err.errno != errno.ENOENT:
1672 if err.errno != errno.ENOENT:
1673 raise
1673 raise
1674 self._repo.ui.warn(
1674 self._repo.ui.warn(
1675 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1675 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1676 )
1676 )
1677 return
1677 return
1678 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1678 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1679 self._repo.ui.warn(
1679 self._repo.ui.warn(
1680 _(b"copy failed: %s is not a file or a symbolic link\n")
1680 _(b"copy failed: %s is not a file or a symbolic link\n")
1681 % self._repo.dirstate.pathto(dest)
1681 % self._repo.dirstate.pathto(dest)
1682 )
1682 )
1683 else:
1683 else:
1684 with self._repo.wlock():
1684 with self._repo.wlock():
1685 ds = self._repo.dirstate
1685 ds = self._repo.dirstate
1686 if ds[dest] in b'?':
1686 if ds[dest] in b'?':
1687 ds.add(dest)
1687 ds.add(dest)
1688 elif ds[dest] in b'r':
1688 elif ds[dest] in b'r':
1689 ds.normallookup(dest)
1689 ds.normallookup(dest)
1690 ds.copy(source, dest)
1690 ds.copy(source, dest)
1691
1691
1692 def match(
1692 def match(
1693 self,
1693 self,
1694 pats=None,
1694 pats=None,
1695 include=None,
1695 include=None,
1696 exclude=None,
1696 exclude=None,
1697 default=b'glob',
1697 default=b'glob',
1698 listsubrepos=False,
1698 listsubrepos=False,
1699 badfn=None,
1699 badfn=None,
1700 cwd=None,
1700 cwd=None,
1701 ):
1701 ):
1702 r = self._repo
1702 r = self._repo
1703 if not cwd:
1703 if not cwd:
1704 cwd = r.getcwd()
1704 cwd = r.getcwd()
1705
1705
1706 # Only a case insensitive filesystem needs magic to translate user input
1706 # Only a case insensitive filesystem needs magic to translate user input
1707 # to actual case in the filesystem.
1707 # to actual case in the filesystem.
1708 icasefs = not util.fscasesensitive(r.root)
1708 icasefs = not util.fscasesensitive(r.root)
1709 return matchmod.match(
1709 return matchmod.match(
1710 r.root,
1710 r.root,
1711 cwd,
1711 cwd,
1712 pats,
1712 pats,
1713 include,
1713 include,
1714 exclude,
1714 exclude,
1715 default,
1715 default,
1716 auditor=r.auditor,
1716 auditor=r.auditor,
1717 ctx=self,
1717 ctx=self,
1718 listsubrepos=listsubrepos,
1718 listsubrepos=listsubrepos,
1719 badfn=badfn,
1719 badfn=badfn,
1720 icasefs=icasefs,
1720 icasefs=icasefs,
1721 )
1721 )
1722
1722
1723 def _filtersuspectsymlink(self, files):
1723 def _filtersuspectsymlink(self, files):
1724 if not files or self._repo.dirstate._checklink:
1724 if not files or self._repo.dirstate._checklink:
1725 return files
1725 return files
1726
1726
1727 # Symlink placeholders may get non-symlink-like contents
1727 # Symlink placeholders may get non-symlink-like contents
1728 # via user error or dereferencing by NFS or Samba servers,
1728 # via user error or dereferencing by NFS or Samba servers,
1729 # so we filter out any placeholders that don't look like a
1729 # so we filter out any placeholders that don't look like a
1730 # symlink
1730 # symlink
1731 sane = []
1731 sane = []
1732 for f in files:
1732 for f in files:
1733 if self.flags(f) == b'l':
1733 if self.flags(f) == b'l':
1734 d = self[f].data()
1734 d = self[f].data()
1735 if (
1735 if (
1736 d == b''
1736 d == b''
1737 or len(d) >= 1024
1737 or len(d) >= 1024
1738 or b'\n' in d
1738 or b'\n' in d
1739 or stringutil.binary(d)
1739 or stringutil.binary(d)
1740 ):
1740 ):
1741 self._repo.ui.debug(
1741 self._repo.ui.debug(
1742 b'ignoring suspect symlink placeholder "%s"\n' % f
1742 b'ignoring suspect symlink placeholder "%s"\n' % f
1743 )
1743 )
1744 continue
1744 continue
1745 sane.append(f)
1745 sane.append(f)
1746 return sane
1746 return sane
1747
1747
1748 def _checklookup(self, files):
1748 def _checklookup(self, files):
1749 # check for any possibly clean files
1749 # check for any possibly clean files
1750 if not files:
1750 if not files:
1751 return [], [], []
1751 return [], [], []
1752
1752
1753 modified = []
1753 modified = []
1754 deleted = []
1754 deleted = []
1755 fixup = []
1755 fixup = []
1756 pctx = self._parents[0]
1756 pctx = self._parents[0]
1757 # do a full compare of any files that might have changed
1757 # do a full compare of any files that might have changed
1758 for f in sorted(files):
1758 for f in sorted(files):
1759 try:
1759 try:
1760 # This will return True for a file that got replaced by a
1760 # This will return True for a file that got replaced by a
1761 # directory in the interim, but fixing that is pretty hard.
1761 # directory in the interim, but fixing that is pretty hard.
1762 if (
1762 if (
1763 f not in pctx
1763 f not in pctx
1764 or self.flags(f) != pctx.flags(f)
1764 or self.flags(f) != pctx.flags(f)
1765 or pctx[f].cmp(self[f])
1765 or pctx[f].cmp(self[f])
1766 ):
1766 ):
1767 modified.append(f)
1767 modified.append(f)
1768 else:
1768 else:
1769 fixup.append(f)
1769 fixup.append(f)
1770 except (IOError, OSError):
1770 except (IOError, OSError):
1771 # A file become inaccessible in between? Mark it as deleted,
1771 # A file become inaccessible in between? Mark it as deleted,
1772 # matching dirstate behavior (issue5584).
1772 # matching dirstate behavior (issue5584).
1773 # The dirstate has more complex behavior around whether a
1773 # The dirstate has more complex behavior around whether a
1774 # missing file matches a directory, etc, but we don't need to
1774 # missing file matches a directory, etc, but we don't need to
1775 # bother with that: if f has made it to this point, we're sure
1775 # bother with that: if f has made it to this point, we're sure
1776 # it's in the dirstate.
1776 # it's in the dirstate.
1777 deleted.append(f)
1777 deleted.append(f)
1778
1778
1779 return modified, deleted, fixup
1779 return modified, deleted, fixup
1780
1780
1781 def _poststatusfixup(self, status, fixup):
1781 def _poststatusfixup(self, status, fixup):
1782 """update dirstate for files that are actually clean"""
1782 """update dirstate for files that are actually clean"""
1783 poststatus = self._repo.postdsstatus()
1783 poststatus = self._repo.postdsstatus()
1784 if fixup or poststatus:
1784 if fixup or poststatus:
1785 try:
1785 try:
1786 oldid = self._repo.dirstate.identity()
1786 oldid = self._repo.dirstate.identity()
1787
1787
1788 # updating the dirstate is optional
1788 # updating the dirstate is optional
1789 # so we don't wait on the lock
1789 # so we don't wait on the lock
1790 # wlock can invalidate the dirstate, so cache normal _after_
1790 # wlock can invalidate the dirstate, so cache normal _after_
1791 # taking the lock
1791 # taking the lock
1792 with self._repo.wlock(False):
1792 with self._repo.wlock(False):
1793 if self._repo.dirstate.identity() == oldid:
1793 if self._repo.dirstate.identity() == oldid:
1794 if fixup:
1794 if fixup:
1795 normal = self._repo.dirstate.normal
1795 normal = self._repo.dirstate.normal
1796 for f in fixup:
1796 for f in fixup:
1797 normal(f)
1797 normal(f)
1798 # write changes out explicitly, because nesting
1798 # write changes out explicitly, because nesting
1799 # wlock at runtime may prevent 'wlock.release()'
1799 # wlock at runtime may prevent 'wlock.release()'
1800 # after this block from doing so for subsequent
1800 # after this block from doing so for subsequent
1801 # changing files
1801 # changing files
1802 tr = self._repo.currenttransaction()
1802 tr = self._repo.currenttransaction()
1803 self._repo.dirstate.write(tr)
1803 self._repo.dirstate.write(tr)
1804
1804
1805 if poststatus:
1805 if poststatus:
1806 for ps in poststatus:
1806 for ps in poststatus:
1807 ps(self, status)
1807 ps(self, status)
1808 else:
1808 else:
1809 # in this case, writing changes out breaks
1809 # in this case, writing changes out breaks
1810 # consistency, because .hg/dirstate was
1810 # consistency, because .hg/dirstate was
1811 # already changed simultaneously after last
1811 # already changed simultaneously after last
1812 # caching (see also issue5584 for detail)
1812 # caching (see also issue5584 for detail)
1813 self._repo.ui.debug(
1813 self._repo.ui.debug(
1814 b'skip updating dirstate: identity mismatch\n'
1814 b'skip updating dirstate: identity mismatch\n'
1815 )
1815 )
1816 except error.LockError:
1816 except error.LockError:
1817 pass
1817 pass
1818 finally:
1818 finally:
1819 # Even if the wlock couldn't be grabbed, clear out the list.
1819 # Even if the wlock couldn't be grabbed, clear out the list.
1820 self._repo.clearpostdsstatus()
1820 self._repo.clearpostdsstatus()
1821
1821
1822 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1822 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1823 '''Gets the status from the dirstate -- internal use only.'''
1823 '''Gets the status from the dirstate -- internal use only.'''
1824 subrepos = []
1824 subrepos = []
1825 if b'.hgsub' in self:
1825 if b'.hgsub' in self:
1826 subrepos = sorted(self.substate)
1826 subrepos = sorted(self.substate)
1827 cmp, s = self._repo.dirstate.status(
1827 cmp, s = self._repo.dirstate.status(
1828 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1828 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1829 )
1829 )
1830
1830
1831 # check for any possibly clean files
1831 # check for any possibly clean files
1832 fixup = []
1832 fixup = []
1833 if cmp:
1833 if cmp:
1834 modified2, deleted2, fixup = self._checklookup(cmp)
1834 modified2, deleted2, fixup = self._checklookup(cmp)
1835 s.modified.extend(modified2)
1835 s.modified.extend(modified2)
1836 s.deleted.extend(deleted2)
1836 s.deleted.extend(deleted2)
1837
1837
1838 if fixup and clean:
1838 if fixup and clean:
1839 s.clean.extend(fixup)
1839 s.clean.extend(fixup)
1840
1840
1841 self._poststatusfixup(s, fixup)
1841 self._poststatusfixup(s, fixup)
1842
1842
1843 if match.always():
1843 if match.always():
1844 # cache for performance
1844 # cache for performance
1845 if s.unknown or s.ignored or s.clean:
1845 if s.unknown or s.ignored or s.clean:
1846 # "_status" is cached with list*=False in the normal route
1846 # "_status" is cached with list*=False in the normal route
1847 self._status = scmutil.status(
1847 self._status = scmutil.status(
1848 s.modified, s.added, s.removed, s.deleted, [], [], []
1848 s.modified, s.added, s.removed, s.deleted, [], [], []
1849 )
1849 )
1850 else:
1850 else:
1851 self._status = s
1851 self._status = s
1852
1852
1853 return s
1853 return s
1854
1854
1855 @propertycache
1855 @propertycache
1856 def _copies(self):
1856 def _copies(self):
1857 p1copies = {}
1857 p1copies = {}
1858 p2copies = {}
1858 p2copies = {}
1859 parents = self._repo.dirstate.parents()
1859 parents = self._repo.dirstate.parents()
1860 p1manifest = self._repo[parents[0]].manifest()
1860 p1manifest = self._repo[parents[0]].manifest()
1861 p2manifest = self._repo[parents[1]].manifest()
1861 p2manifest = self._repo[parents[1]].manifest()
1862 changedset = set(self.added()) | set(self.modified())
1862 changedset = set(self.added()) | set(self.modified())
1863 narrowmatch = self._repo.narrowmatch()
1863 narrowmatch = self._repo.narrowmatch()
1864 for dst, src in self._repo.dirstate.copies().items():
1864 for dst, src in self._repo.dirstate.copies().items():
1865 if dst not in changedset or not narrowmatch(dst):
1865 if dst not in changedset or not narrowmatch(dst):
1866 continue
1866 continue
1867 if src in p1manifest:
1867 if src in p1manifest:
1868 p1copies[dst] = src
1868 p1copies[dst] = src
1869 elif src in p2manifest:
1869 elif src in p2manifest:
1870 p2copies[dst] = src
1870 p2copies[dst] = src
1871 return p1copies, p2copies
1871 return p1copies, p2copies
1872
1872
1873 @propertycache
1873 @propertycache
1874 def _manifest(self):
1874 def _manifest(self):
1875 """generate a manifest corresponding to the values in self._status
1875 """generate a manifest corresponding to the values in self._status
1876
1876
1877 This reuse the file nodeid from parent, but we use special node
1877 This reuse the file nodeid from parent, but we use special node
1878 identifiers for added and modified files. This is used by manifests
1878 identifiers for added and modified files. This is used by manifests
1879 merge to see that files are different and by update logic to avoid
1879 merge to see that files are different and by update logic to avoid
1880 deleting newly added files.
1880 deleting newly added files.
1881 """
1881 """
1882 return self._buildstatusmanifest(self._status)
1882 return self._buildstatusmanifest(self._status)
1883
1883
1884 def _buildstatusmanifest(self, status):
1884 def _buildstatusmanifest(self, status):
1885 """Builds a manifest that includes the given status results."""
1885 """Builds a manifest that includes the given status results."""
1886 parents = self.parents()
1886 parents = self.parents()
1887
1887
1888 man = parents[0].manifest().copy()
1888 man = parents[0].manifest().copy()
1889
1889
1890 ff = self._flagfunc
1890 ff = self._flagfunc
1891 for i, l in (
1891 for i, l in (
1892 (addednodeid, status.added),
1892 (addednodeid, status.added),
1893 (modifiednodeid, status.modified),
1893 (modifiednodeid, status.modified),
1894 ):
1894 ):
1895 for f in l:
1895 for f in l:
1896 man[f] = i
1896 man[f] = i
1897 try:
1897 try:
1898 man.setflag(f, ff(f))
1898 man.setflag(f, ff(f))
1899 except OSError:
1899 except OSError:
1900 pass
1900 pass
1901
1901
1902 for f in status.deleted + status.removed:
1902 for f in status.deleted + status.removed:
1903 if f in man:
1903 if f in man:
1904 del man[f]
1904 del man[f]
1905
1905
1906 return man
1906 return man
1907
1907
1908 def _buildstatus(
1908 def _buildstatus(
1909 self, other, s, match, listignored, listclean, listunknown
1909 self, other, s, match, listignored, listclean, listunknown
1910 ):
1910 ):
1911 """build a status with respect to another context
1911 """build a status with respect to another context
1912
1912
1913 This includes logic for maintaining the fast path of status when
1913 This includes logic for maintaining the fast path of status when
1914 comparing the working directory against its parent, which is to skip
1914 comparing the working directory against its parent, which is to skip
1915 building a new manifest if self (working directory) is not comparing
1915 building a new manifest if self (working directory) is not comparing
1916 against its parent (repo['.']).
1916 against its parent (repo['.']).
1917 """
1917 """
1918 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1918 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1919 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1919 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1920 # might have accidentally ended up with the entire contents of the file
1920 # might have accidentally ended up with the entire contents of the file
1921 # they are supposed to be linking to.
1921 # they are supposed to be linking to.
1922 s.modified[:] = self._filtersuspectsymlink(s.modified)
1922 s.modified[:] = self._filtersuspectsymlink(s.modified)
1923 if other != self._repo[b'.']:
1923 if other != self._repo[b'.']:
1924 s = super(workingctx, self)._buildstatus(
1924 s = super(workingctx, self)._buildstatus(
1925 other, s, match, listignored, listclean, listunknown
1925 other, s, match, listignored, listclean, listunknown
1926 )
1926 )
1927 return s
1927 return s
1928
1928
1929 def _matchstatus(self, other, match):
1929 def _matchstatus(self, other, match):
1930 """override the match method with a filter for directory patterns
1930 """override the match method with a filter for directory patterns
1931
1931
1932 We use inheritance to customize the match.bad method only in cases of
1932 We use inheritance to customize the match.bad method only in cases of
1933 workingctx since it belongs only to the working directory when
1933 workingctx since it belongs only to the working directory when
1934 comparing against the parent changeset.
1934 comparing against the parent changeset.
1935
1935
1936 If we aren't comparing against the working directory's parent, then we
1936 If we aren't comparing against the working directory's parent, then we
1937 just use the default match object sent to us.
1937 just use the default match object sent to us.
1938 """
1938 """
1939 if other != self._repo[b'.']:
1939 if other != self._repo[b'.']:
1940
1940
1941 def bad(f, msg):
1941 def bad(f, msg):
1942 # 'f' may be a directory pattern from 'match.files()',
1942 # 'f' may be a directory pattern from 'match.files()',
1943 # so 'f not in ctx1' is not enough
1943 # so 'f not in ctx1' is not enough
1944 if f not in other and not other.hasdir(f):
1944 if f not in other and not other.hasdir(f):
1945 self._repo.ui.warn(
1945 self._repo.ui.warn(
1946 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1946 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1947 )
1947 )
1948
1948
1949 match.bad = bad
1949 match.bad = bad
1950 return match
1950 return match
1951
1951
1952 def walk(self, match):
1952 def walk(self, match):
1953 '''Generates matching file names.'''
1953 '''Generates matching file names.'''
1954 return sorted(
1954 return sorted(
1955 self._repo.dirstate.walk(
1955 self._repo.dirstate.walk(
1956 self._repo.narrowmatch(match),
1956 self._repo.narrowmatch(match),
1957 subrepos=sorted(self.substate),
1957 subrepos=sorted(self.substate),
1958 unknown=True,
1958 unknown=True,
1959 ignored=False,
1959 ignored=False,
1960 )
1960 )
1961 )
1961 )
1962
1962
1963 def matches(self, match):
1963 def matches(self, match):
1964 match = self._repo.narrowmatch(match)
1964 match = self._repo.narrowmatch(match)
1965 ds = self._repo.dirstate
1965 ds = self._repo.dirstate
1966 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1966 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1967
1967
1968 def markcommitted(self, node):
1968 def markcommitted(self, node):
1969 with self._repo.dirstate.parentchange():
1969 with self._repo.dirstate.parentchange():
1970 for f in self.modified() + self.added():
1970 for f in self.modified() + self.added():
1971 self._repo.dirstate.normal(f)
1971 self._repo.dirstate.normal(f)
1972 for f in self.removed():
1972 for f in self.removed():
1973 self._repo.dirstate.drop(f)
1973 self._repo.dirstate.drop(f)
1974 self._repo.dirstate.setparents(node)
1974 self._repo.dirstate.setparents(node)
1975
1975
1976 # write changes out explicitly, because nesting wlock at
1976 # write changes out explicitly, because nesting wlock at
1977 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1977 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1978 # from immediately doing so for subsequent changing files
1978 # from immediately doing so for subsequent changing files
1979 self._repo.dirstate.write(self._repo.currenttransaction())
1979 self._repo.dirstate.write(self._repo.currenttransaction())
1980
1980
1981 sparse.aftercommit(self._repo, node)
1981 sparse.aftercommit(self._repo, node)
1982
1982
1983
1983
1984 class committablefilectx(basefilectx):
1984 class committablefilectx(basefilectx):
1985 """A committablefilectx provides common functionality for a file context
1985 """A committablefilectx provides common functionality for a file context
1986 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1986 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1987
1987
1988 def __init__(self, repo, path, filelog=None, ctx=None):
1988 def __init__(self, repo, path, filelog=None, ctx=None):
1989 self._repo = repo
1989 self._repo = repo
1990 self._path = path
1990 self._path = path
1991 self._changeid = None
1991 self._changeid = None
1992 self._filerev = self._filenode = None
1992 self._filerev = self._filenode = None
1993
1993
1994 if filelog is not None:
1994 if filelog is not None:
1995 self._filelog = filelog
1995 self._filelog = filelog
1996 if ctx:
1996 if ctx:
1997 self._changectx = ctx
1997 self._changectx = ctx
1998
1998
1999 def __nonzero__(self):
1999 def __nonzero__(self):
2000 return True
2000 return True
2001
2001
2002 __bool__ = __nonzero__
2002 __bool__ = __nonzero__
2003
2003
2004 def linkrev(self):
2004 def linkrev(self):
2005 # linked to self._changectx no matter if file is modified or not
2005 # linked to self._changectx no matter if file is modified or not
2006 return self.rev()
2006 return self.rev()
2007
2007
2008 def renamed(self):
2008 def renamed(self):
2009 path = self.copysource()
2009 path = self.copysource()
2010 if not path:
2010 if not path:
2011 return None
2011 return None
2012 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2012 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2013
2013
2014 def parents(self):
2014 def parents(self):
2015 '''return parent filectxs, following copies if necessary'''
2015 '''return parent filectxs, following copies if necessary'''
2016
2016
2017 def filenode(ctx, path):
2017 def filenode(ctx, path):
2018 return ctx._manifest.get(path, nullid)
2018 return ctx._manifest.get(path, nullid)
2019
2019
2020 path = self._path
2020 path = self._path
2021 fl = self._filelog
2021 fl = self._filelog
2022 pcl = self._changectx._parents
2022 pcl = self._changectx._parents
2023 renamed = self.renamed()
2023 renamed = self.renamed()
2024
2024
2025 if renamed:
2025 if renamed:
2026 pl = [renamed + (None,)]
2026 pl = [renamed + (None,)]
2027 else:
2027 else:
2028 pl = [(path, filenode(pcl[0], path), fl)]
2028 pl = [(path, filenode(pcl[0], path), fl)]
2029
2029
2030 for pc in pcl[1:]:
2030 for pc in pcl[1:]:
2031 pl.append((path, filenode(pc, path), fl))
2031 pl.append((path, filenode(pc, path), fl))
2032
2032
2033 return [
2033 return [
2034 self._parentfilectx(p, fileid=n, filelog=l)
2034 self._parentfilectx(p, fileid=n, filelog=l)
2035 for p, n, l in pl
2035 for p, n, l in pl
2036 if n != nullid
2036 if n != nullid
2037 ]
2037 ]
2038
2038
2039 def children(self):
2039 def children(self):
2040 return []
2040 return []
2041
2041
2042
2042
2043 class workingfilectx(committablefilectx):
2043 class workingfilectx(committablefilectx):
2044 """A workingfilectx object makes access to data related to a particular
2044 """A workingfilectx object makes access to data related to a particular
2045 file in the working directory convenient."""
2045 file in the working directory convenient."""
2046
2046
2047 def __init__(self, repo, path, filelog=None, workingctx=None):
2047 def __init__(self, repo, path, filelog=None, workingctx=None):
2048 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2048 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2049
2049
2050 @propertycache
2050 @propertycache
2051 def _changectx(self):
2051 def _changectx(self):
2052 return workingctx(self._repo)
2052 return workingctx(self._repo)
2053
2053
2054 def data(self):
2054 def data(self):
2055 return self._repo.wread(self._path)
2055 return self._repo.wread(self._path)
2056
2056
2057 def copysource(self):
2057 def copysource(self):
2058 return self._repo.dirstate.copied(self._path)
2058 return self._repo.dirstate.copied(self._path)
2059
2059
2060 def size(self):
2060 def size(self):
2061 return self._repo.wvfs.lstat(self._path).st_size
2061 return self._repo.wvfs.lstat(self._path).st_size
2062
2062
2063 def lstat(self):
2063 def lstat(self):
2064 return self._repo.wvfs.lstat(self._path)
2064 return self._repo.wvfs.lstat(self._path)
2065
2065
2066 def date(self):
2066 def date(self):
2067 t, tz = self._changectx.date()
2067 t, tz = self._changectx.date()
2068 try:
2068 try:
2069 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2069 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2070 except OSError as err:
2070 except OSError as err:
2071 if err.errno != errno.ENOENT:
2071 if err.errno != errno.ENOENT:
2072 raise
2072 raise
2073 return (t, tz)
2073 return (t, tz)
2074
2074
2075 def exists(self):
2075 def exists(self):
2076 return self._repo.wvfs.exists(self._path)
2076 return self._repo.wvfs.exists(self._path)
2077
2077
2078 def lexists(self):
2078 def lexists(self):
2079 return self._repo.wvfs.lexists(self._path)
2079 return self._repo.wvfs.lexists(self._path)
2080
2080
2081 def audit(self):
2081 def audit(self):
2082 return self._repo.wvfs.audit(self._path)
2082 return self._repo.wvfs.audit(self._path)
2083
2083
2084 def cmp(self, fctx):
2084 def cmp(self, fctx):
2085 """compare with other file context
2085 """compare with other file context
2086
2086
2087 returns True if different than fctx.
2087 returns True if different than fctx.
2088 """
2088 """
2089 # fctx should be a filectx (not a workingfilectx)
2089 # fctx should be a filectx (not a workingfilectx)
2090 # invert comparison to reuse the same code path
2090 # invert comparison to reuse the same code path
2091 return fctx.cmp(self)
2091 return fctx.cmp(self)
2092
2092
2093 def remove(self, ignoremissing=False):
2093 def remove(self, ignoremissing=False):
2094 """wraps unlink for a repo's working directory"""
2094 """wraps unlink for a repo's working directory"""
2095 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2095 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2096 self._repo.wvfs.unlinkpath(
2096 self._repo.wvfs.unlinkpath(
2097 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2097 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2098 )
2098 )
2099
2099
2100 def write(self, data, flags, backgroundclose=False, **kwargs):
2100 def write(self, data, flags, backgroundclose=False, **kwargs):
2101 """wraps repo.wwrite"""
2101 """wraps repo.wwrite"""
2102 return self._repo.wwrite(
2102 return self._repo.wwrite(
2103 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2103 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2104 )
2104 )
2105
2105
2106 def markcopied(self, src):
2106 def markcopied(self, src):
2107 """marks this file a copy of `src`"""
2107 """marks this file a copy of `src`"""
2108 self._repo.dirstate.copy(src, self._path)
2108 self._repo.dirstate.copy(src, self._path)
2109
2109
2110 def clearunknown(self):
2110 def clearunknown(self):
2111 """Removes conflicting items in the working directory so that
2111 """Removes conflicting items in the working directory so that
2112 ``write()`` can be called successfully.
2112 ``write()`` can be called successfully.
2113 """
2113 """
2114 wvfs = self._repo.wvfs
2114 wvfs = self._repo.wvfs
2115 f = self._path
2115 f = self._path
2116 wvfs.audit(f)
2116 wvfs.audit(f)
2117 if self._repo.ui.configbool(
2117 if self._repo.ui.configbool(
2118 b'experimental', b'merge.checkpathconflicts'
2118 b'experimental', b'merge.checkpathconflicts'
2119 ):
2119 ):
2120 # remove files under the directory as they should already be
2120 # remove files under the directory as they should already be
2121 # warned and backed up
2121 # warned and backed up
2122 if wvfs.isdir(f) and not wvfs.islink(f):
2122 if wvfs.isdir(f) and not wvfs.islink(f):
2123 wvfs.rmtree(f, forcibly=True)
2123 wvfs.rmtree(f, forcibly=True)
2124 for p in reversed(list(pathutil.finddirs(f))):
2124 for p in reversed(list(pathutil.finddirs(f))):
2125 if wvfs.isfileorlink(p):
2125 if wvfs.isfileorlink(p):
2126 wvfs.unlink(p)
2126 wvfs.unlink(p)
2127 break
2127 break
2128 else:
2128 else:
2129 # don't remove files if path conflicts are not processed
2129 # don't remove files if path conflicts are not processed
2130 if wvfs.isdir(f) and not wvfs.islink(f):
2130 if wvfs.isdir(f) and not wvfs.islink(f):
2131 wvfs.removedirs(f)
2131 wvfs.removedirs(f)
2132
2132
2133 def setflags(self, l, x):
2133 def setflags(self, l, x):
2134 self._repo.wvfs.setflags(self._path, l, x)
2134 self._repo.wvfs.setflags(self._path, l, x)
2135
2135
2136
2136
2137 class overlayworkingctx(committablectx):
2137 class overlayworkingctx(committablectx):
2138 """Wraps another mutable context with a write-back cache that can be
2138 """Wraps another mutable context with a write-back cache that can be
2139 converted into a commit context.
2139 converted into a commit context.
2140
2140
2141 self._cache[path] maps to a dict with keys: {
2141 self._cache[path] maps to a dict with keys: {
2142 'exists': bool?
2142 'exists': bool?
2143 'date': date?
2143 'date': date?
2144 'data': str?
2144 'data': str?
2145 'flags': str?
2145 'flags': str?
2146 'copied': str? (path or None)
2146 'copied': str? (path or None)
2147 }
2147 }
2148 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2148 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2149 is `False`, the file was deleted.
2149 is `False`, the file was deleted.
2150 """
2150 """
2151
2151
2152 def __init__(self, repo):
2152 def __init__(self, repo):
2153 super(overlayworkingctx, self).__init__(repo)
2153 super(overlayworkingctx, self).__init__(repo)
2154 self.clean()
2154 self.clean()
2155
2155
2156 def setbase(self, wrappedctx):
2156 def setbase(self, wrappedctx):
2157 self._wrappedctx = wrappedctx
2157 self._wrappedctx = wrappedctx
2158 self._parents = [wrappedctx]
2158 self._parents = [wrappedctx]
2159 # Drop old manifest cache as it is now out of date.
2159 # Drop old manifest cache as it is now out of date.
2160 # This is necessary when, e.g., rebasing several nodes with one
2160 # This is necessary when, e.g., rebasing several nodes with one
2161 # ``overlayworkingctx`` (e.g. with --collapse).
2161 # ``overlayworkingctx`` (e.g. with --collapse).
2162 util.clearcachedproperty(self, b'_manifest')
2162 util.clearcachedproperty(self, b'_manifest')
2163
2163
2164 def data(self, path):
2164 def data(self, path):
2165 if self.isdirty(path):
2165 if self.isdirty(path):
2166 if self._cache[path][b'exists']:
2166 if self._cache[path][b'exists']:
2167 if self._cache[path][b'data'] is not None:
2167 if self._cache[path][b'data'] is not None:
2168 return self._cache[path][b'data']
2168 return self._cache[path][b'data']
2169 else:
2169 else:
2170 # Must fallback here, too, because we only set flags.
2170 # Must fallback here, too, because we only set flags.
2171 return self._wrappedctx[path].data()
2171 return self._wrappedctx[path].data()
2172 else:
2172 else:
2173 raise error.ProgrammingError(
2173 raise error.ProgrammingError(
2174 b"No such file or directory: %s" % path
2174 b"No such file or directory: %s" % path
2175 )
2175 )
2176 else:
2176 else:
2177 return self._wrappedctx[path].data()
2177 return self._wrappedctx[path].data()
2178
2178
2179 @propertycache
2179 @propertycache
2180 def _manifest(self):
2180 def _manifest(self):
2181 parents = self.parents()
2181 parents = self.parents()
2182 man = parents[0].manifest().copy()
2182 man = parents[0].manifest().copy()
2183
2183
2184 flag = self._flagfunc
2184 flag = self._flagfunc
2185 for path in self.added():
2185 for path in self.added():
2186 man[path] = addednodeid
2186 man[path] = addednodeid
2187 man.setflag(path, flag(path))
2187 man.setflag(path, flag(path))
2188 for path in self.modified():
2188 for path in self.modified():
2189 man[path] = modifiednodeid
2189 man[path] = modifiednodeid
2190 man.setflag(path, flag(path))
2190 man.setflag(path, flag(path))
2191 for path in self.removed():
2191 for path in self.removed():
2192 del man[path]
2192 del man[path]
2193 return man
2193 return man
2194
2194
2195 @propertycache
2195 @propertycache
2196 def _flagfunc(self):
2196 def _flagfunc(self):
2197 def f(path):
2197 def f(path):
2198 return self._cache[path][b'flags']
2198 return self._cache[path][b'flags']
2199
2199
2200 return f
2200 return f
2201
2201
2202 def files(self):
2202 def files(self):
2203 return sorted(self.added() + self.modified() + self.removed())
2203 return sorted(self.added() + self.modified() + self.removed())
2204
2204
2205 def modified(self):
2205 def modified(self):
2206 return [
2206 return [
2207 f
2207 f
2208 for f in self._cache.keys()
2208 for f in self._cache.keys()
2209 if self._cache[f][b'exists'] and self._existsinparent(f)
2209 if self._cache[f][b'exists'] and self._existsinparent(f)
2210 ]
2210 ]
2211
2211
2212 def added(self):
2212 def added(self):
2213 return [
2213 return [
2214 f
2214 f
2215 for f in self._cache.keys()
2215 for f in self._cache.keys()
2216 if self._cache[f][b'exists'] and not self._existsinparent(f)
2216 if self._cache[f][b'exists'] and not self._existsinparent(f)
2217 ]
2217 ]
2218
2218
2219 def removed(self):
2219 def removed(self):
2220 return [
2220 return [
2221 f
2221 f
2222 for f in self._cache.keys()
2222 for f in self._cache.keys()
2223 if not self._cache[f][b'exists'] and self._existsinparent(f)
2223 if not self._cache[f][b'exists'] and self._existsinparent(f)
2224 ]
2224 ]
2225
2225
2226 def p1copies(self):
2226 def p1copies(self):
2227 copies = {}
2227 copies = {}
2228 narrowmatch = self._repo.narrowmatch()
2228 narrowmatch = self._repo.narrowmatch()
2229 for f in self._cache.keys():
2229 for f in self._cache.keys():
2230 if not narrowmatch(f):
2230 if not narrowmatch(f):
2231 continue
2231 continue
2232 copies.pop(f, None) # delete if it exists
2232 copies.pop(f, None) # delete if it exists
2233 source = self._cache[f][b'copied']
2233 source = self._cache[f][b'copied']
2234 if source:
2234 if source:
2235 copies[f] = source
2235 copies[f] = source
2236 return copies
2236 return copies
2237
2237
2238 def p2copies(self):
2238 def p2copies(self):
2239 copies = {}
2239 copies = {}
2240 narrowmatch = self._repo.narrowmatch()
2240 narrowmatch = self._repo.narrowmatch()
2241 for f in self._cache.keys():
2241 for f in self._cache.keys():
2242 if not narrowmatch(f):
2242 if not narrowmatch(f):
2243 continue
2243 continue
2244 copies.pop(f, None) # delete if it exists
2244 copies.pop(f, None) # delete if it exists
2245 source = self._cache[f][b'copied']
2245 source = self._cache[f][b'copied']
2246 if source:
2246 if source:
2247 copies[f] = source
2247 copies[f] = source
2248 return copies
2248 return copies
2249
2249
2250 def isinmemory(self):
2250 def isinmemory(self):
2251 return True
2251 return True
2252
2252
2253 def filedate(self, path):
2253 def filedate(self, path):
2254 if self.isdirty(path):
2254 if self.isdirty(path):
2255 return self._cache[path][b'date']
2255 return self._cache[path][b'date']
2256 else:
2256 else:
2257 return self._wrappedctx[path].date()
2257 return self._wrappedctx[path].date()
2258
2258
2259 def markcopied(self, path, origin):
2259 def markcopied(self, path, origin):
2260 self._markdirty(
2260 self._markdirty(
2261 path,
2261 path,
2262 exists=True,
2262 exists=True,
2263 date=self.filedate(path),
2263 date=self.filedate(path),
2264 flags=self.flags(path),
2264 flags=self.flags(path),
2265 copied=origin,
2265 copied=origin,
2266 )
2266 )
2267
2267
2268 def copydata(self, path):
2268 def copydata(self, path):
2269 if self.isdirty(path):
2269 if self.isdirty(path):
2270 return self._cache[path][b'copied']
2270 return self._cache[path][b'copied']
2271 else:
2271 else:
2272 return None
2272 return None
2273
2273
2274 def flags(self, path):
2274 def flags(self, path):
2275 if self.isdirty(path):
2275 if self.isdirty(path):
2276 if self._cache[path][b'exists']:
2276 if self._cache[path][b'exists']:
2277 return self._cache[path][b'flags']
2277 return self._cache[path][b'flags']
2278 else:
2278 else:
2279 raise error.ProgrammingError(
2279 raise error.ProgrammingError(
2280 b"No such file or directory: %s" % self._path
2280 b"No such file or directory: %s" % self._path
2281 )
2281 )
2282 else:
2282 else:
2283 return self._wrappedctx[path].flags()
2283 return self._wrappedctx[path].flags()
2284
2284
2285 def __contains__(self, key):
2285 def __contains__(self, key):
2286 if key in self._cache:
2286 if key in self._cache:
2287 return self._cache[key][b'exists']
2287 return self._cache[key][b'exists']
2288 return key in self.p1()
2288 return key in self.p1()
2289
2289
2290 def _existsinparent(self, path):
2290 def _existsinparent(self, path):
2291 try:
2291 try:
2292 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2292 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2293 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2293 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2294 # with an ``exists()`` function.
2294 # with an ``exists()`` function.
2295 self._wrappedctx[path]
2295 self._wrappedctx[path]
2296 return True
2296 return True
2297 except error.ManifestLookupError:
2297 except error.ManifestLookupError:
2298 return False
2298 return False
2299
2299
2300 def _auditconflicts(self, path):
2300 def _auditconflicts(self, path):
2301 """Replicates conflict checks done by wvfs.write().
2301 """Replicates conflict checks done by wvfs.write().
2302
2302
2303 Since we never write to the filesystem and never call `applyupdates` in
2303 Since we never write to the filesystem and never call `applyupdates` in
2304 IMM, we'll never check that a path is actually writable -- e.g., because
2304 IMM, we'll never check that a path is actually writable -- e.g., because
2305 it adds `a/foo`, but `a` is actually a file in the other commit.
2305 it adds `a/foo`, but `a` is actually a file in the other commit.
2306 """
2306 """
2307
2307
2308 def fail(path, component):
2308 def fail(path, component):
2309 # p1() is the base and we're receiving "writes" for p2()'s
2309 # p1() is the base and we're receiving "writes" for p2()'s
2310 # files.
2310 # files.
2311 if b'l' in self.p1()[component].flags():
2311 if b'l' in self.p1()[component].flags():
2312 raise error.Abort(
2312 raise error.Abort(
2313 b"error: %s conflicts with symlink %s "
2313 b"error: %s conflicts with symlink %s "
2314 b"in %d." % (path, component, self.p1().rev())
2314 b"in %d." % (path, component, self.p1().rev())
2315 )
2315 )
2316 else:
2316 else:
2317 raise error.Abort(
2317 raise error.Abort(
2318 b"error: '%s' conflicts with file '%s' in "
2318 b"error: '%s' conflicts with file '%s' in "
2319 b"%d." % (path, component, self.p1().rev())
2319 b"%d." % (path, component, self.p1().rev())
2320 )
2320 )
2321
2321
2322 # Test that each new directory to be created to write this path from p2
2322 # Test that each new directory to be created to write this path from p2
2323 # is not a file in p1.
2323 # is not a file in p1.
2324 components = path.split(b'/')
2324 components = path.split(b'/')
2325 for i in pycompat.xrange(len(components)):
2325 for i in pycompat.xrange(len(components)):
2326 component = b"/".join(components[0:i])
2326 component = b"/".join(components[0:i])
2327 if component in self:
2327 if component in self:
2328 fail(path, component)
2328 fail(path, component)
2329
2329
2330 # Test the other direction -- that this path from p2 isn't a directory
2330 # Test the other direction -- that this path from p2 isn't a directory
2331 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2331 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2332 match = self.match([path], default=b'path')
2332 match = self.match([path], default=b'path')
2333 matches = self.p1().manifest().matches(match)
2333 matches = self.p1().manifest().matches(match)
2334 mfiles = matches.keys()
2334 mfiles = matches.keys()
2335 if len(mfiles) > 0:
2335 if len(mfiles) > 0:
2336 if len(mfiles) == 1 and mfiles[0] == path:
2336 if len(mfiles) == 1 and mfiles[0] == path:
2337 return
2337 return
2338 # omit the files which are deleted in current IMM wctx
2338 # omit the files which are deleted in current IMM wctx
2339 mfiles = [m for m in mfiles if m in self]
2339 mfiles = [m for m in mfiles if m in self]
2340 if not mfiles:
2340 if not mfiles:
2341 return
2341 return
2342 raise error.Abort(
2342 raise error.Abort(
2343 b"error: file '%s' cannot be written because "
2343 b"error: file '%s' cannot be written because "
2344 b" '%s/' is a directory in %s (containing %d "
2344 b" '%s/' is a directory in %s (containing %d "
2345 b"entries: %s)"
2345 b"entries: %s)"
2346 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2346 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2347 )
2347 )
2348
2348
2349 def write(self, path, data, flags=b'', **kwargs):
2349 def write(self, path, data, flags=b'', **kwargs):
2350 if data is None:
2350 if data is None:
2351 raise error.ProgrammingError(b"data must be non-None")
2351 raise error.ProgrammingError(b"data must be non-None")
2352 self._auditconflicts(path)
2352 self._auditconflicts(path)
2353 self._markdirty(
2353 self._markdirty(
2354 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2354 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2355 )
2355 )
2356
2356
2357 def setflags(self, path, l, x):
2357 def setflags(self, path, l, x):
2358 flag = b''
2358 flag = b''
2359 if l:
2359 if l:
2360 flag = b'l'
2360 flag = b'l'
2361 elif x:
2361 elif x:
2362 flag = b'x'
2362 flag = b'x'
2363 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2363 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2364
2364
2365 def remove(self, path):
2365 def remove(self, path):
2366 self._markdirty(path, exists=False)
2366 self._markdirty(path, exists=False)
2367
2367
2368 def exists(self, path):
2368 def exists(self, path):
2369 """exists behaves like `lexists`, but needs to follow symlinks and
2369 """exists behaves like `lexists`, but needs to follow symlinks and
2370 return False if they are broken.
2370 return False if they are broken.
2371 """
2371 """
2372 if self.isdirty(path):
2372 if self.isdirty(path):
2373 # If this path exists and is a symlink, "follow" it by calling
2373 # If this path exists and is a symlink, "follow" it by calling
2374 # exists on the destination path.
2374 # exists on the destination path.
2375 if (
2375 if (
2376 self._cache[path][b'exists']
2376 self._cache[path][b'exists']
2377 and b'l' in self._cache[path][b'flags']
2377 and b'l' in self._cache[path][b'flags']
2378 ):
2378 ):
2379 return self.exists(self._cache[path][b'data'].strip())
2379 return self.exists(self._cache[path][b'data'].strip())
2380 else:
2380 else:
2381 return self._cache[path][b'exists']
2381 return self._cache[path][b'exists']
2382
2382
2383 return self._existsinparent(path)
2383 return self._existsinparent(path)
2384
2384
2385 def lexists(self, path):
2385 def lexists(self, path):
2386 """lexists returns True if the path exists"""
2386 """lexists returns True if the path exists"""
2387 if self.isdirty(path):
2387 if self.isdirty(path):
2388 return self._cache[path][b'exists']
2388 return self._cache[path][b'exists']
2389
2389
2390 return self._existsinparent(path)
2390 return self._existsinparent(path)
2391
2391
2392 def size(self, path):
2392 def size(self, path):
2393 if self.isdirty(path):
2393 if self.isdirty(path):
2394 if self._cache[path][b'exists']:
2394 if self._cache[path][b'exists']:
2395 return len(self._cache[path][b'data'])
2395 return len(self._cache[path][b'data'])
2396 else:
2396 else:
2397 raise error.ProgrammingError(
2397 raise error.ProgrammingError(
2398 b"No such file or directory: %s" % self._path
2398 b"No such file or directory: %s" % self._path
2399 )
2399 )
2400 return self._wrappedctx[path].size()
2400 return self._wrappedctx[path].size()
2401
2401
2402 def tomemctx(
2402 def tomemctx(
2403 self,
2403 self,
2404 text,
2404 text,
2405 branch=None,
2405 branch=None,
2406 extra=None,
2406 extra=None,
2407 date=None,
2407 date=None,
2408 parents=None,
2408 parents=None,
2409 user=None,
2409 user=None,
2410 editor=None,
2410 editor=None,
2411 ):
2411 ):
2412 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2412 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2413 committed.
2413 committed.
2414
2414
2415 ``text`` is the commit message.
2415 ``text`` is the commit message.
2416 ``parents`` (optional) are rev numbers.
2416 ``parents`` (optional) are rev numbers.
2417 """
2417 """
2418 # Default parents to the wrapped contexts' if not passed.
2418 # Default parents to the wrapped contexts' if not passed.
2419 if parents is None:
2419 if parents is None:
2420 parents = self._wrappedctx.parents()
2420 parents = self._wrappedctx.parents()
2421 if len(parents) == 1:
2421 if len(parents) == 1:
2422 parents = (parents[0], None)
2422 parents = (parents[0], None)
2423
2423
2424 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2424 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2425 if parents[1] is None:
2425 if parents[1] is None:
2426 parents = (self._repo[parents[0]], None)
2426 parents = (self._repo[parents[0]], None)
2427 else:
2427 else:
2428 parents = (self._repo[parents[0]], self._repo[parents[1]])
2428 parents = (self._repo[parents[0]], self._repo[parents[1]])
2429
2429
2430 files = self.files()
2430 files = self.files()
2431
2431
2432 def getfile(repo, memctx, path):
2432 def getfile(repo, memctx, path):
2433 if self._cache[path][b'exists']:
2433 if self._cache[path][b'exists']:
2434 return memfilectx(
2434 return memfilectx(
2435 repo,
2435 repo,
2436 memctx,
2436 memctx,
2437 path,
2437 path,
2438 self._cache[path][b'data'],
2438 self._cache[path][b'data'],
2439 b'l' in self._cache[path][b'flags'],
2439 b'l' in self._cache[path][b'flags'],
2440 b'x' in self._cache[path][b'flags'],
2440 b'x' in self._cache[path][b'flags'],
2441 self._cache[path][b'copied'],
2441 self._cache[path][b'copied'],
2442 )
2442 )
2443 else:
2443 else:
2444 # Returning None, but including the path in `files`, is
2444 # Returning None, but including the path in `files`, is
2445 # necessary for memctx to register a deletion.
2445 # necessary for memctx to register a deletion.
2446 return None
2446 return None
2447
2447
2448 if branch is None:
2449 branch = self._wrappedctx.branch()
2450
2448 return memctx(
2451 return memctx(
2449 self._repo,
2452 self._repo,
2450 parents,
2453 parents,
2451 text,
2454 text,
2452 files,
2455 files,
2453 getfile,
2456 getfile,
2454 date=date,
2457 date=date,
2455 extra=extra,
2458 extra=extra,
2456 user=user,
2459 user=user,
2457 branch=branch,
2460 branch=branch,
2458 editor=editor,
2461 editor=editor,
2459 )
2462 )
2460
2463
2461 def isdirty(self, path):
2464 def isdirty(self, path):
2462 return path in self._cache
2465 return path in self._cache
2463
2466
2464 def isempty(self):
2467 def isempty(self):
2465 # We need to discard any keys that are actually clean before the empty
2468 # We need to discard any keys that are actually clean before the empty
2466 # commit check.
2469 # commit check.
2467 self._compact()
2470 self._compact()
2468 return len(self._cache) == 0
2471 return len(self._cache) == 0
2469
2472
2470 def clean(self):
2473 def clean(self):
2471 self._cache = {}
2474 self._cache = {}
2472
2475
2473 def _compact(self):
2476 def _compact(self):
2474 """Removes keys from the cache that are actually clean, by comparing
2477 """Removes keys from the cache that are actually clean, by comparing
2475 them with the underlying context.
2478 them with the underlying context.
2476
2479
2477 This can occur during the merge process, e.g. by passing --tool :local
2480 This can occur during the merge process, e.g. by passing --tool :local
2478 to resolve a conflict.
2481 to resolve a conflict.
2479 """
2482 """
2480 keys = []
2483 keys = []
2481 # This won't be perfect, but can help performance significantly when
2484 # This won't be perfect, but can help performance significantly when
2482 # using things like remotefilelog.
2485 # using things like remotefilelog.
2483 scmutil.prefetchfiles(
2486 scmutil.prefetchfiles(
2484 self.repo(),
2487 self.repo(),
2485 [self.p1().rev()],
2488 [self.p1().rev()],
2486 scmutil.matchfiles(self.repo(), self._cache.keys()),
2489 scmutil.matchfiles(self.repo(), self._cache.keys()),
2487 )
2490 )
2488
2491
2489 for path in self._cache.keys():
2492 for path in self._cache.keys():
2490 cache = self._cache[path]
2493 cache = self._cache[path]
2491 try:
2494 try:
2492 underlying = self._wrappedctx[path]
2495 underlying = self._wrappedctx[path]
2493 if (
2496 if (
2494 underlying.data() == cache[b'data']
2497 underlying.data() == cache[b'data']
2495 and underlying.flags() == cache[b'flags']
2498 and underlying.flags() == cache[b'flags']
2496 ):
2499 ):
2497 keys.append(path)
2500 keys.append(path)
2498 except error.ManifestLookupError:
2501 except error.ManifestLookupError:
2499 # Path not in the underlying manifest (created).
2502 # Path not in the underlying manifest (created).
2500 continue
2503 continue
2501
2504
2502 for path in keys:
2505 for path in keys:
2503 del self._cache[path]
2506 del self._cache[path]
2504 return keys
2507 return keys
2505
2508
2506 def _markdirty(
2509 def _markdirty(
2507 self, path, exists, data=None, date=None, flags=b'', copied=None
2510 self, path, exists, data=None, date=None, flags=b'', copied=None
2508 ):
2511 ):
2509 # data not provided, let's see if we already have some; if not, let's
2512 # data not provided, let's see if we already have some; if not, let's
2510 # grab it from our underlying context, so that we always have data if
2513 # grab it from our underlying context, so that we always have data if
2511 # the file is marked as existing.
2514 # the file is marked as existing.
2512 if exists and data is None:
2515 if exists and data is None:
2513 oldentry = self._cache.get(path) or {}
2516 oldentry = self._cache.get(path) or {}
2514 data = oldentry.get(b'data')
2517 data = oldentry.get(b'data')
2515 if data is None:
2518 if data is None:
2516 data = self._wrappedctx[path].data()
2519 data = self._wrappedctx[path].data()
2517
2520
2518 self._cache[path] = {
2521 self._cache[path] = {
2519 b'exists': exists,
2522 b'exists': exists,
2520 b'data': data,
2523 b'data': data,
2521 b'date': date,
2524 b'date': date,
2522 b'flags': flags,
2525 b'flags': flags,
2523 b'copied': copied,
2526 b'copied': copied,
2524 }
2527 }
2525
2528
2526 def filectx(self, path, filelog=None):
2529 def filectx(self, path, filelog=None):
2527 return overlayworkingfilectx(
2530 return overlayworkingfilectx(
2528 self._repo, path, parent=self, filelog=filelog
2531 self._repo, path, parent=self, filelog=filelog
2529 )
2532 )
2530
2533
2531
2534
2532 class overlayworkingfilectx(committablefilectx):
2535 class overlayworkingfilectx(committablefilectx):
2533 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2536 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2534 cache, which can be flushed through later by calling ``flush()``."""
2537 cache, which can be flushed through later by calling ``flush()``."""
2535
2538
2536 def __init__(self, repo, path, filelog=None, parent=None):
2539 def __init__(self, repo, path, filelog=None, parent=None):
2537 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2540 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2538 self._repo = repo
2541 self._repo = repo
2539 self._parent = parent
2542 self._parent = parent
2540 self._path = path
2543 self._path = path
2541
2544
2542 def cmp(self, fctx):
2545 def cmp(self, fctx):
2543 return self.data() != fctx.data()
2546 return self.data() != fctx.data()
2544
2547
2545 def changectx(self):
2548 def changectx(self):
2546 return self._parent
2549 return self._parent
2547
2550
2548 def data(self):
2551 def data(self):
2549 return self._parent.data(self._path)
2552 return self._parent.data(self._path)
2550
2553
2551 def date(self):
2554 def date(self):
2552 return self._parent.filedate(self._path)
2555 return self._parent.filedate(self._path)
2553
2556
2554 def exists(self):
2557 def exists(self):
2555 return self.lexists()
2558 return self.lexists()
2556
2559
2557 def lexists(self):
2560 def lexists(self):
2558 return self._parent.exists(self._path)
2561 return self._parent.exists(self._path)
2559
2562
2560 def copysource(self):
2563 def copysource(self):
2561 return self._parent.copydata(self._path)
2564 return self._parent.copydata(self._path)
2562
2565
2563 def size(self):
2566 def size(self):
2564 return self._parent.size(self._path)
2567 return self._parent.size(self._path)
2565
2568
2566 def markcopied(self, origin):
2569 def markcopied(self, origin):
2567 self._parent.markcopied(self._path, origin)
2570 self._parent.markcopied(self._path, origin)
2568
2571
2569 def audit(self):
2572 def audit(self):
2570 pass
2573 pass
2571
2574
2572 def flags(self):
2575 def flags(self):
2573 return self._parent.flags(self._path)
2576 return self._parent.flags(self._path)
2574
2577
2575 def setflags(self, islink, isexec):
2578 def setflags(self, islink, isexec):
2576 return self._parent.setflags(self._path, islink, isexec)
2579 return self._parent.setflags(self._path, islink, isexec)
2577
2580
2578 def write(self, data, flags, backgroundclose=False, **kwargs):
2581 def write(self, data, flags, backgroundclose=False, **kwargs):
2579 return self._parent.write(self._path, data, flags, **kwargs)
2582 return self._parent.write(self._path, data, flags, **kwargs)
2580
2583
2581 def remove(self, ignoremissing=False):
2584 def remove(self, ignoremissing=False):
2582 return self._parent.remove(self._path)
2585 return self._parent.remove(self._path)
2583
2586
2584 def clearunknown(self):
2587 def clearunknown(self):
2585 pass
2588 pass
2586
2589
2587
2590
2588 class workingcommitctx(workingctx):
2591 class workingcommitctx(workingctx):
2589 """A workingcommitctx object makes access to data related to
2592 """A workingcommitctx object makes access to data related to
2590 the revision being committed convenient.
2593 the revision being committed convenient.
2591
2594
2592 This hides changes in the working directory, if they aren't
2595 This hides changes in the working directory, if they aren't
2593 committed in this context.
2596 committed in this context.
2594 """
2597 """
2595
2598
2596 def __init__(
2599 def __init__(
2597 self, repo, changes, text=b"", user=None, date=None, extra=None
2600 self, repo, changes, text=b"", user=None, date=None, extra=None
2598 ):
2601 ):
2599 super(workingcommitctx, self).__init__(
2602 super(workingcommitctx, self).__init__(
2600 repo, text, user, date, extra, changes
2603 repo, text, user, date, extra, changes
2601 )
2604 )
2602
2605
2603 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2606 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2604 """Return matched files only in ``self._status``
2607 """Return matched files only in ``self._status``
2605
2608
2606 Uncommitted files appear "clean" via this context, even if
2609 Uncommitted files appear "clean" via this context, even if
2607 they aren't actually so in the working directory.
2610 they aren't actually so in the working directory.
2608 """
2611 """
2609 if clean:
2612 if clean:
2610 clean = [f for f in self._manifest if f not in self._changedset]
2613 clean = [f for f in self._manifest if f not in self._changedset]
2611 else:
2614 else:
2612 clean = []
2615 clean = []
2613 return scmutil.status(
2616 return scmutil.status(
2614 [f for f in self._status.modified if match(f)],
2617 [f for f in self._status.modified if match(f)],
2615 [f for f in self._status.added if match(f)],
2618 [f for f in self._status.added if match(f)],
2616 [f for f in self._status.removed if match(f)],
2619 [f for f in self._status.removed if match(f)],
2617 [],
2620 [],
2618 [],
2621 [],
2619 [],
2622 [],
2620 clean,
2623 clean,
2621 )
2624 )
2622
2625
2623 @propertycache
2626 @propertycache
2624 def _changedset(self):
2627 def _changedset(self):
2625 """Return the set of files changed in this context
2628 """Return the set of files changed in this context
2626 """
2629 """
2627 changed = set(self._status.modified)
2630 changed = set(self._status.modified)
2628 changed.update(self._status.added)
2631 changed.update(self._status.added)
2629 changed.update(self._status.removed)
2632 changed.update(self._status.removed)
2630 return changed
2633 return changed
2631
2634
2632
2635
2633 def makecachingfilectxfn(func):
2636 def makecachingfilectxfn(func):
2634 """Create a filectxfn that caches based on the path.
2637 """Create a filectxfn that caches based on the path.
2635
2638
2636 We can't use util.cachefunc because it uses all arguments as the cache
2639 We can't use util.cachefunc because it uses all arguments as the cache
2637 key and this creates a cycle since the arguments include the repo and
2640 key and this creates a cycle since the arguments include the repo and
2638 memctx.
2641 memctx.
2639 """
2642 """
2640 cache = {}
2643 cache = {}
2641
2644
2642 def getfilectx(repo, memctx, path):
2645 def getfilectx(repo, memctx, path):
2643 if path not in cache:
2646 if path not in cache:
2644 cache[path] = func(repo, memctx, path)
2647 cache[path] = func(repo, memctx, path)
2645 return cache[path]
2648 return cache[path]
2646
2649
2647 return getfilectx
2650 return getfilectx
2648
2651
2649
2652
2650 def memfilefromctx(ctx):
2653 def memfilefromctx(ctx):
2651 """Given a context return a memfilectx for ctx[path]
2654 """Given a context return a memfilectx for ctx[path]
2652
2655
2653 This is a convenience method for building a memctx based on another
2656 This is a convenience method for building a memctx based on another
2654 context.
2657 context.
2655 """
2658 """
2656
2659
2657 def getfilectx(repo, memctx, path):
2660 def getfilectx(repo, memctx, path):
2658 fctx = ctx[path]
2661 fctx = ctx[path]
2659 copysource = fctx.copysource()
2662 copysource = fctx.copysource()
2660 return memfilectx(
2663 return memfilectx(
2661 repo,
2664 repo,
2662 memctx,
2665 memctx,
2663 path,
2666 path,
2664 fctx.data(),
2667 fctx.data(),
2665 islink=fctx.islink(),
2668 islink=fctx.islink(),
2666 isexec=fctx.isexec(),
2669 isexec=fctx.isexec(),
2667 copysource=copysource,
2670 copysource=copysource,
2668 )
2671 )
2669
2672
2670 return getfilectx
2673 return getfilectx
2671
2674
2672
2675
2673 def memfilefrompatch(patchstore):
2676 def memfilefrompatch(patchstore):
2674 """Given a patch (e.g. patchstore object) return a memfilectx
2677 """Given a patch (e.g. patchstore object) return a memfilectx
2675
2678
2676 This is a convenience method for building a memctx based on a patchstore.
2679 This is a convenience method for building a memctx based on a patchstore.
2677 """
2680 """
2678
2681
2679 def getfilectx(repo, memctx, path):
2682 def getfilectx(repo, memctx, path):
2680 data, mode, copysource = patchstore.getfile(path)
2683 data, mode, copysource = patchstore.getfile(path)
2681 if data is None:
2684 if data is None:
2682 return None
2685 return None
2683 islink, isexec = mode
2686 islink, isexec = mode
2684 return memfilectx(
2687 return memfilectx(
2685 repo,
2688 repo,
2686 memctx,
2689 memctx,
2687 path,
2690 path,
2688 data,
2691 data,
2689 islink=islink,
2692 islink=islink,
2690 isexec=isexec,
2693 isexec=isexec,
2691 copysource=copysource,
2694 copysource=copysource,
2692 )
2695 )
2693
2696
2694 return getfilectx
2697 return getfilectx
2695
2698
2696
2699
2697 class memctx(committablectx):
2700 class memctx(committablectx):
2698 """Use memctx to perform in-memory commits via localrepo.commitctx().
2701 """Use memctx to perform in-memory commits via localrepo.commitctx().
2699
2702
2700 Revision information is supplied at initialization time while
2703 Revision information is supplied at initialization time while
2701 related files data and is made available through a callback
2704 related files data and is made available through a callback
2702 mechanism. 'repo' is the current localrepo, 'parents' is a
2705 mechanism. 'repo' is the current localrepo, 'parents' is a
2703 sequence of two parent revisions identifiers (pass None for every
2706 sequence of two parent revisions identifiers (pass None for every
2704 missing parent), 'text' is the commit message and 'files' lists
2707 missing parent), 'text' is the commit message and 'files' lists
2705 names of files touched by the revision (normalized and relative to
2708 names of files touched by the revision (normalized and relative to
2706 repository root).
2709 repository root).
2707
2710
2708 filectxfn(repo, memctx, path) is a callable receiving the
2711 filectxfn(repo, memctx, path) is a callable receiving the
2709 repository, the current memctx object and the normalized path of
2712 repository, the current memctx object and the normalized path of
2710 requested file, relative to repository root. It is fired by the
2713 requested file, relative to repository root. It is fired by the
2711 commit function for every file in 'files', but calls order is
2714 commit function for every file in 'files', but calls order is
2712 undefined. If the file is available in the revision being
2715 undefined. If the file is available in the revision being
2713 committed (updated or added), filectxfn returns a memfilectx
2716 committed (updated or added), filectxfn returns a memfilectx
2714 object. If the file was removed, filectxfn return None for recent
2717 object. If the file was removed, filectxfn return None for recent
2715 Mercurial. Moved files are represented by marking the source file
2718 Mercurial. Moved files are represented by marking the source file
2716 removed and the new file added with copy information (see
2719 removed and the new file added with copy information (see
2717 memfilectx).
2720 memfilectx).
2718
2721
2719 user receives the committer name and defaults to current
2722 user receives the committer name and defaults to current
2720 repository username, date is the commit date in any format
2723 repository username, date is the commit date in any format
2721 supported by dateutil.parsedate() and defaults to current date, extra
2724 supported by dateutil.parsedate() and defaults to current date, extra
2722 is a dictionary of metadata or is left empty.
2725 is a dictionary of metadata or is left empty.
2723 """
2726 """
2724
2727
2725 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2728 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2726 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2729 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2727 # this field to determine what to do in filectxfn.
2730 # this field to determine what to do in filectxfn.
2728 _returnnoneformissingfiles = True
2731 _returnnoneformissingfiles = True
2729
2732
2730 def __init__(
2733 def __init__(
2731 self,
2734 self,
2732 repo,
2735 repo,
2733 parents,
2736 parents,
2734 text,
2737 text,
2735 files,
2738 files,
2736 filectxfn,
2739 filectxfn,
2737 user=None,
2740 user=None,
2738 date=None,
2741 date=None,
2739 extra=None,
2742 extra=None,
2740 branch=None,
2743 branch=None,
2741 editor=None,
2744 editor=None,
2742 ):
2745 ):
2743 super(memctx, self).__init__(
2746 super(memctx, self).__init__(
2744 repo, text, user, date, extra, branch=branch
2747 repo, text, user, date, extra, branch=branch
2745 )
2748 )
2746 self._rev = None
2749 self._rev = None
2747 self._node = None
2750 self._node = None
2748 parents = [(p or nullid) for p in parents]
2751 parents = [(p or nullid) for p in parents]
2749 p1, p2 = parents
2752 p1, p2 = parents
2750 self._parents = [self._repo[p] for p in (p1, p2)]
2753 self._parents = [self._repo[p] for p in (p1, p2)]
2751 files = sorted(set(files))
2754 files = sorted(set(files))
2752 self._files = files
2755 self._files = files
2753 self.substate = {}
2756 self.substate = {}
2754
2757
2755 if isinstance(filectxfn, patch.filestore):
2758 if isinstance(filectxfn, patch.filestore):
2756 filectxfn = memfilefrompatch(filectxfn)
2759 filectxfn = memfilefrompatch(filectxfn)
2757 elif not callable(filectxfn):
2760 elif not callable(filectxfn):
2758 # if store is not callable, wrap it in a function
2761 # if store is not callable, wrap it in a function
2759 filectxfn = memfilefromctx(filectxfn)
2762 filectxfn = memfilefromctx(filectxfn)
2760
2763
2761 # memoizing increases performance for e.g. vcs convert scenarios.
2764 # memoizing increases performance for e.g. vcs convert scenarios.
2762 self._filectxfn = makecachingfilectxfn(filectxfn)
2765 self._filectxfn = makecachingfilectxfn(filectxfn)
2763
2766
2764 if editor:
2767 if editor:
2765 self._text = editor(self._repo, self, [])
2768 self._text = editor(self._repo, self, [])
2766 self._repo.savecommitmessage(self._text)
2769 self._repo.savecommitmessage(self._text)
2767
2770
2768 def filectx(self, path, filelog=None):
2771 def filectx(self, path, filelog=None):
2769 """get a file context from the working directory
2772 """get a file context from the working directory
2770
2773
2771 Returns None if file doesn't exist and should be removed."""
2774 Returns None if file doesn't exist and should be removed."""
2772 return self._filectxfn(self._repo, self, path)
2775 return self._filectxfn(self._repo, self, path)
2773
2776
2774 def commit(self):
2777 def commit(self):
2775 """commit context to the repo"""
2778 """commit context to the repo"""
2776 return self._repo.commitctx(self)
2779 return self._repo.commitctx(self)
2777
2780
2778 @propertycache
2781 @propertycache
2779 def _manifest(self):
2782 def _manifest(self):
2780 """generate a manifest based on the return values of filectxfn"""
2783 """generate a manifest based on the return values of filectxfn"""
2781
2784
2782 # keep this simple for now; just worry about p1
2785 # keep this simple for now; just worry about p1
2783 pctx = self._parents[0]
2786 pctx = self._parents[0]
2784 man = pctx.manifest().copy()
2787 man = pctx.manifest().copy()
2785
2788
2786 for f in self._status.modified:
2789 for f in self._status.modified:
2787 man[f] = modifiednodeid
2790 man[f] = modifiednodeid
2788
2791
2789 for f in self._status.added:
2792 for f in self._status.added:
2790 man[f] = addednodeid
2793 man[f] = addednodeid
2791
2794
2792 for f in self._status.removed:
2795 for f in self._status.removed:
2793 if f in man:
2796 if f in man:
2794 del man[f]
2797 del man[f]
2795
2798
2796 return man
2799 return man
2797
2800
2798 @propertycache
2801 @propertycache
2799 def _status(self):
2802 def _status(self):
2800 """Calculate exact status from ``files`` specified at construction
2803 """Calculate exact status from ``files`` specified at construction
2801 """
2804 """
2802 man1 = self.p1().manifest()
2805 man1 = self.p1().manifest()
2803 p2 = self._parents[1]
2806 p2 = self._parents[1]
2804 # "1 < len(self._parents)" can't be used for checking
2807 # "1 < len(self._parents)" can't be used for checking
2805 # existence of the 2nd parent, because "memctx._parents" is
2808 # existence of the 2nd parent, because "memctx._parents" is
2806 # explicitly initialized by the list, of which length is 2.
2809 # explicitly initialized by the list, of which length is 2.
2807 if p2.node() != nullid:
2810 if p2.node() != nullid:
2808 man2 = p2.manifest()
2811 man2 = p2.manifest()
2809 managing = lambda f: f in man1 or f in man2
2812 managing = lambda f: f in man1 or f in man2
2810 else:
2813 else:
2811 managing = lambda f: f in man1
2814 managing = lambda f: f in man1
2812
2815
2813 modified, added, removed = [], [], []
2816 modified, added, removed = [], [], []
2814 for f in self._files:
2817 for f in self._files:
2815 if not managing(f):
2818 if not managing(f):
2816 added.append(f)
2819 added.append(f)
2817 elif self[f]:
2820 elif self[f]:
2818 modified.append(f)
2821 modified.append(f)
2819 else:
2822 else:
2820 removed.append(f)
2823 removed.append(f)
2821
2824
2822 return scmutil.status(modified, added, removed, [], [], [], [])
2825 return scmutil.status(modified, added, removed, [], [], [], [])
2823
2826
2824
2827
2825 class memfilectx(committablefilectx):
2828 class memfilectx(committablefilectx):
2826 """memfilectx represents an in-memory file to commit.
2829 """memfilectx represents an in-memory file to commit.
2827
2830
2828 See memctx and committablefilectx for more details.
2831 See memctx and committablefilectx for more details.
2829 """
2832 """
2830
2833
2831 def __init__(
2834 def __init__(
2832 self,
2835 self,
2833 repo,
2836 repo,
2834 changectx,
2837 changectx,
2835 path,
2838 path,
2836 data,
2839 data,
2837 islink=False,
2840 islink=False,
2838 isexec=False,
2841 isexec=False,
2839 copysource=None,
2842 copysource=None,
2840 ):
2843 ):
2841 """
2844 """
2842 path is the normalized file path relative to repository root.
2845 path is the normalized file path relative to repository root.
2843 data is the file content as a string.
2846 data is the file content as a string.
2844 islink is True if the file is a symbolic link.
2847 islink is True if the file is a symbolic link.
2845 isexec is True if the file is executable.
2848 isexec is True if the file is executable.
2846 copied is the source file path if current file was copied in the
2849 copied is the source file path if current file was copied in the
2847 revision being committed, or None."""
2850 revision being committed, or None."""
2848 super(memfilectx, self).__init__(repo, path, None, changectx)
2851 super(memfilectx, self).__init__(repo, path, None, changectx)
2849 self._data = data
2852 self._data = data
2850 if islink:
2853 if islink:
2851 self._flags = b'l'
2854 self._flags = b'l'
2852 elif isexec:
2855 elif isexec:
2853 self._flags = b'x'
2856 self._flags = b'x'
2854 else:
2857 else:
2855 self._flags = b''
2858 self._flags = b''
2856 self._copysource = copysource
2859 self._copysource = copysource
2857
2860
2858 def copysource(self):
2861 def copysource(self):
2859 return self._copysource
2862 return self._copysource
2860
2863
2861 def cmp(self, fctx):
2864 def cmp(self, fctx):
2862 return self.data() != fctx.data()
2865 return self.data() != fctx.data()
2863
2866
2864 def data(self):
2867 def data(self):
2865 return self._data
2868 return self._data
2866
2869
2867 def remove(self, ignoremissing=False):
2870 def remove(self, ignoremissing=False):
2868 """wraps unlink for a repo's working directory"""
2871 """wraps unlink for a repo's working directory"""
2869 # need to figure out what to do here
2872 # need to figure out what to do here
2870 del self._changectx[self._path]
2873 del self._changectx[self._path]
2871
2874
2872 def write(self, data, flags, **kwargs):
2875 def write(self, data, flags, **kwargs):
2873 """wraps repo.wwrite"""
2876 """wraps repo.wwrite"""
2874 self._data = data
2877 self._data = data
2875
2878
2876
2879
2877 class metadataonlyctx(committablectx):
2880 class metadataonlyctx(committablectx):
2878 """Like memctx but it's reusing the manifest of different commit.
2881 """Like memctx but it's reusing the manifest of different commit.
2879 Intended to be used by lightweight operations that are creating
2882 Intended to be used by lightweight operations that are creating
2880 metadata-only changes.
2883 metadata-only changes.
2881
2884
2882 Revision information is supplied at initialization time. 'repo' is the
2885 Revision information is supplied at initialization time. 'repo' is the
2883 current localrepo, 'ctx' is original revision which manifest we're reuisng
2886 current localrepo, 'ctx' is original revision which manifest we're reuisng
2884 'parents' is a sequence of two parent revisions identifiers (pass None for
2887 'parents' is a sequence of two parent revisions identifiers (pass None for
2885 every missing parent), 'text' is the commit.
2888 every missing parent), 'text' is the commit.
2886
2889
2887 user receives the committer name and defaults to current repository
2890 user receives the committer name and defaults to current repository
2888 username, date is the commit date in any format supported by
2891 username, date is the commit date in any format supported by
2889 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2892 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2890 metadata or is left empty.
2893 metadata or is left empty.
2891 """
2894 """
2892
2895
2893 def __init__(
2896 def __init__(
2894 self,
2897 self,
2895 repo,
2898 repo,
2896 originalctx,
2899 originalctx,
2897 parents=None,
2900 parents=None,
2898 text=None,
2901 text=None,
2899 user=None,
2902 user=None,
2900 date=None,
2903 date=None,
2901 extra=None,
2904 extra=None,
2902 editor=None,
2905 editor=None,
2903 ):
2906 ):
2904 if text is None:
2907 if text is None:
2905 text = originalctx.description()
2908 text = originalctx.description()
2906 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2909 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2907 self._rev = None
2910 self._rev = None
2908 self._node = None
2911 self._node = None
2909 self._originalctx = originalctx
2912 self._originalctx = originalctx
2910 self._manifestnode = originalctx.manifestnode()
2913 self._manifestnode = originalctx.manifestnode()
2911 if parents is None:
2914 if parents is None:
2912 parents = originalctx.parents()
2915 parents = originalctx.parents()
2913 else:
2916 else:
2914 parents = [repo[p] for p in parents if p is not None]
2917 parents = [repo[p] for p in parents if p is not None]
2915 parents = parents[:]
2918 parents = parents[:]
2916 while len(parents) < 2:
2919 while len(parents) < 2:
2917 parents.append(repo[nullid])
2920 parents.append(repo[nullid])
2918 p1, p2 = self._parents = parents
2921 p1, p2 = self._parents = parents
2919
2922
2920 # sanity check to ensure that the reused manifest parents are
2923 # sanity check to ensure that the reused manifest parents are
2921 # manifests of our commit parents
2924 # manifests of our commit parents
2922 mp1, mp2 = self.manifestctx().parents
2925 mp1, mp2 = self.manifestctx().parents
2923 if p1 != nullid and p1.manifestnode() != mp1:
2926 if p1 != nullid and p1.manifestnode() != mp1:
2924 raise RuntimeError(
2927 raise RuntimeError(
2925 r"can't reuse the manifest: its p1 "
2928 r"can't reuse the manifest: its p1 "
2926 r"doesn't match the new ctx p1"
2929 r"doesn't match the new ctx p1"
2927 )
2930 )
2928 if p2 != nullid and p2.manifestnode() != mp2:
2931 if p2 != nullid and p2.manifestnode() != mp2:
2929 raise RuntimeError(
2932 raise RuntimeError(
2930 r"can't reuse the manifest: "
2933 r"can't reuse the manifest: "
2931 r"its p2 doesn't match the new ctx p2"
2934 r"its p2 doesn't match the new ctx p2"
2932 )
2935 )
2933
2936
2934 self._files = originalctx.files()
2937 self._files = originalctx.files()
2935 self.substate = {}
2938 self.substate = {}
2936
2939
2937 if editor:
2940 if editor:
2938 self._text = editor(self._repo, self, [])
2941 self._text = editor(self._repo, self, [])
2939 self._repo.savecommitmessage(self._text)
2942 self._repo.savecommitmessage(self._text)
2940
2943
2941 def manifestnode(self):
2944 def manifestnode(self):
2942 return self._manifestnode
2945 return self._manifestnode
2943
2946
2944 @property
2947 @property
2945 def _manifestctx(self):
2948 def _manifestctx(self):
2946 return self._repo.manifestlog[self._manifestnode]
2949 return self._repo.manifestlog[self._manifestnode]
2947
2950
2948 def filectx(self, path, filelog=None):
2951 def filectx(self, path, filelog=None):
2949 return self._originalctx.filectx(path, filelog=filelog)
2952 return self._originalctx.filectx(path, filelog=filelog)
2950
2953
2951 def commit(self):
2954 def commit(self):
2952 """commit context to the repo"""
2955 """commit context to the repo"""
2953 return self._repo.commitctx(self)
2956 return self._repo.commitctx(self)
2954
2957
2955 @property
2958 @property
2956 def _manifest(self):
2959 def _manifest(self):
2957 return self._originalctx.manifest()
2960 return self._originalctx.manifest()
2958
2961
2959 @propertycache
2962 @propertycache
2960 def _status(self):
2963 def _status(self):
2961 """Calculate exact status from ``files`` specified in the ``origctx``
2964 """Calculate exact status from ``files`` specified in the ``origctx``
2962 and parents manifests.
2965 and parents manifests.
2963 """
2966 """
2964 man1 = self.p1().manifest()
2967 man1 = self.p1().manifest()
2965 p2 = self._parents[1]
2968 p2 = self._parents[1]
2966 # "1 < len(self._parents)" can't be used for checking
2969 # "1 < len(self._parents)" can't be used for checking
2967 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2970 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2968 # explicitly initialized by the list, of which length is 2.
2971 # explicitly initialized by the list, of which length is 2.
2969 if p2.node() != nullid:
2972 if p2.node() != nullid:
2970 man2 = p2.manifest()
2973 man2 = p2.manifest()
2971 managing = lambda f: f in man1 or f in man2
2974 managing = lambda f: f in man1 or f in man2
2972 else:
2975 else:
2973 managing = lambda f: f in man1
2976 managing = lambda f: f in man1
2974
2977
2975 modified, added, removed = [], [], []
2978 modified, added, removed = [], [], []
2976 for f in self._files:
2979 for f in self._files:
2977 if not managing(f):
2980 if not managing(f):
2978 added.append(f)
2981 added.append(f)
2979 elif f in self:
2982 elif f in self:
2980 modified.append(f)
2983 modified.append(f)
2981 else:
2984 else:
2982 removed.append(f)
2985 removed.append(f)
2983
2986
2984 return scmutil.status(modified, added, removed, [], [], [], [])
2987 return scmutil.status(modified, added, removed, [], [], [], [])
2985
2988
2986
2989
2987 class arbitraryfilectx(object):
2990 class arbitraryfilectx(object):
2988 """Allows you to use filectx-like functions on a file in an arbitrary
2991 """Allows you to use filectx-like functions on a file in an arbitrary
2989 location on disk, possibly not in the working directory.
2992 location on disk, possibly not in the working directory.
2990 """
2993 """
2991
2994
2992 def __init__(self, path, repo=None):
2995 def __init__(self, path, repo=None):
2993 # Repo is optional because contrib/simplemerge uses this class.
2996 # Repo is optional because contrib/simplemerge uses this class.
2994 self._repo = repo
2997 self._repo = repo
2995 self._path = path
2998 self._path = path
2996
2999
2997 def cmp(self, fctx):
3000 def cmp(self, fctx):
2998 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3001 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2999 # path if either side is a symlink.
3002 # path if either side is a symlink.
3000 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3003 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3001 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3004 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3002 # Add a fast-path for merge if both sides are disk-backed.
3005 # Add a fast-path for merge if both sides are disk-backed.
3003 # Note that filecmp uses the opposite return values (True if same)
3006 # Note that filecmp uses the opposite return values (True if same)
3004 # from our cmp functions (True if different).
3007 # from our cmp functions (True if different).
3005 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3008 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3006 return self.data() != fctx.data()
3009 return self.data() != fctx.data()
3007
3010
3008 def path(self):
3011 def path(self):
3009 return self._path
3012 return self._path
3010
3013
3011 def flags(self):
3014 def flags(self):
3012 return b''
3015 return b''
3013
3016
3014 def data(self):
3017 def data(self):
3015 return util.readfile(self._path)
3018 return util.readfile(self._path)
3016
3019
3017 def decodeddata(self):
3020 def decodeddata(self):
3018 with open(self._path, b"rb") as f:
3021 with open(self._path, b"rb") as f:
3019 return f.read()
3022 return f.read()
3020
3023
3021 def remove(self):
3024 def remove(self):
3022 util.unlink(self._path)
3025 util.unlink(self._path)
3023
3026
3024 def write(self, data, flags, **kwargs):
3027 def write(self, data, flags, **kwargs):
3025 assert not flags
3028 assert not flags
3026 with open(self._path, b"wb") as f:
3029 with open(self._path, b"wb") as f:
3027 f.write(data)
3030 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now