##// END OF EJS Templates
overlayworkingctx: rename misleadingly named `isempty()` method...
Manuel Jacob -
r45647:83f75f1e default
parent child Browse files
Show More
@@ -1,2250 +1,2252 b''
1 # rebase.py - rebasing feature for mercurial
1 # rebase.py - rebasing feature for mercurial
2 #
2 #
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to move sets of revisions to a different ancestor
8 '''command to move sets of revisions to a different ancestor
9
9
10 This extension lets you rebase changesets in an existing Mercurial
10 This extension lets you rebase changesets in an existing Mercurial
11 repository.
11 repository.
12
12
13 For more information:
13 For more information:
14 https://mercurial-scm.org/wiki/RebaseExtension
14 https://mercurial-scm.org/wiki/RebaseExtension
15 '''
15 '''
16
16
17 from __future__ import absolute_import
17 from __future__ import absolute_import
18
18
19 import errno
19 import errno
20 import os
20 import os
21
21
22 from mercurial.i18n import _
22 from mercurial.i18n import _
23 from mercurial.node import (
23 from mercurial.node import (
24 nullrev,
24 nullrev,
25 short,
25 short,
26 )
26 )
27 from mercurial.pycompat import open
27 from mercurial.pycompat import open
28 from mercurial import (
28 from mercurial import (
29 bookmarks,
29 bookmarks,
30 cmdutil,
30 cmdutil,
31 commands,
31 commands,
32 copies,
32 copies,
33 destutil,
33 destutil,
34 dirstateguard,
34 dirstateguard,
35 error,
35 error,
36 extensions,
36 extensions,
37 hg,
37 hg,
38 merge as mergemod,
38 merge as mergemod,
39 mergestate as mergestatemod,
39 mergestate as mergestatemod,
40 mergeutil,
40 mergeutil,
41 node as nodemod,
41 node as nodemod,
42 obsolete,
42 obsolete,
43 obsutil,
43 obsutil,
44 patch,
44 patch,
45 phases,
45 phases,
46 pycompat,
46 pycompat,
47 registrar,
47 registrar,
48 repair,
48 repair,
49 revset,
49 revset,
50 revsetlang,
50 revsetlang,
51 rewriteutil,
51 rewriteutil,
52 scmutil,
52 scmutil,
53 smartset,
53 smartset,
54 state as statemod,
54 state as statemod,
55 util,
55 util,
56 )
56 )
57
57
58 # The following constants are used throughout the rebase module. The ordering of
58 # The following constants are used throughout the rebase module. The ordering of
59 # their values must be maintained.
59 # their values must be maintained.
60
60
61 # Indicates that a revision needs to be rebased
61 # Indicates that a revision needs to be rebased
62 revtodo = -1
62 revtodo = -1
63 revtodostr = b'-1'
63 revtodostr = b'-1'
64
64
65 # legacy revstates no longer needed in current code
65 # legacy revstates no longer needed in current code
66 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
66 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
67 legacystates = {b'-2', b'-3', b'-4', b'-5'}
67 legacystates = {b'-2', b'-3', b'-4', b'-5'}
68
68
69 cmdtable = {}
69 cmdtable = {}
70 command = registrar.command(cmdtable)
70 command = registrar.command(cmdtable)
71 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
71 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
72 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
72 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
73 # be specifying the version(s) of Mercurial they are tested with, or
73 # be specifying the version(s) of Mercurial they are tested with, or
74 # leave the attribute unspecified.
74 # leave the attribute unspecified.
75 testedwith = b'ships-with-hg-core'
75 testedwith = b'ships-with-hg-core'
76
76
77
77
78 def _nothingtorebase():
78 def _nothingtorebase():
79 return 1
79 return 1
80
80
81
81
82 def _savegraft(ctx, extra):
82 def _savegraft(ctx, extra):
83 s = ctx.extra().get(b'source', None)
83 s = ctx.extra().get(b'source', None)
84 if s is not None:
84 if s is not None:
85 extra[b'source'] = s
85 extra[b'source'] = s
86 s = ctx.extra().get(b'intermediate-source', None)
86 s = ctx.extra().get(b'intermediate-source', None)
87 if s is not None:
87 if s is not None:
88 extra[b'intermediate-source'] = s
88 extra[b'intermediate-source'] = s
89
89
90
90
91 def _savebranch(ctx, extra):
91 def _savebranch(ctx, extra):
92 extra[b'branch'] = ctx.branch()
92 extra[b'branch'] = ctx.branch()
93
93
94
94
95 def _destrebase(repo, sourceset, destspace=None):
95 def _destrebase(repo, sourceset, destspace=None):
96 """small wrapper around destmerge to pass the right extra args
96 """small wrapper around destmerge to pass the right extra args
97
97
98 Please wrap destutil.destmerge instead."""
98 Please wrap destutil.destmerge instead."""
99 return destutil.destmerge(
99 return destutil.destmerge(
100 repo,
100 repo,
101 action=b'rebase',
101 action=b'rebase',
102 sourceset=sourceset,
102 sourceset=sourceset,
103 onheadcheck=False,
103 onheadcheck=False,
104 destspace=destspace,
104 destspace=destspace,
105 )
105 )
106
106
107
107
108 revsetpredicate = registrar.revsetpredicate()
108 revsetpredicate = registrar.revsetpredicate()
109
109
110
110
111 @revsetpredicate(b'_destrebase')
111 @revsetpredicate(b'_destrebase')
112 def _revsetdestrebase(repo, subset, x):
112 def _revsetdestrebase(repo, subset, x):
113 # ``_rebasedefaultdest()``
113 # ``_rebasedefaultdest()``
114
114
115 # default destination for rebase.
115 # default destination for rebase.
116 # # XXX: Currently private because I expect the signature to change.
116 # # XXX: Currently private because I expect the signature to change.
117 # # XXX: - bailing out in case of ambiguity vs returning all data.
117 # # XXX: - bailing out in case of ambiguity vs returning all data.
118 # i18n: "_rebasedefaultdest" is a keyword
118 # i18n: "_rebasedefaultdest" is a keyword
119 sourceset = None
119 sourceset = None
120 if x is not None:
120 if x is not None:
121 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
121 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
122 return subset & smartset.baseset([_destrebase(repo, sourceset)])
122 return subset & smartset.baseset([_destrebase(repo, sourceset)])
123
123
124
124
125 @revsetpredicate(b'_destautoorphanrebase')
125 @revsetpredicate(b'_destautoorphanrebase')
126 def _revsetdestautoorphanrebase(repo, subset, x):
126 def _revsetdestautoorphanrebase(repo, subset, x):
127 # ``_destautoorphanrebase()``
127 # ``_destautoorphanrebase()``
128
128
129 # automatic rebase destination for a single orphan revision.
129 # automatic rebase destination for a single orphan revision.
130 unfi = repo.unfiltered()
130 unfi = repo.unfiltered()
131 obsoleted = unfi.revs(b'obsolete()')
131 obsoleted = unfi.revs(b'obsolete()')
132
132
133 src = revset.getset(repo, subset, x).first()
133 src = revset.getset(repo, subset, x).first()
134
134
135 # Empty src or already obsoleted - Do not return a destination
135 # Empty src or already obsoleted - Do not return a destination
136 if not src or src in obsoleted:
136 if not src or src in obsoleted:
137 return smartset.baseset()
137 return smartset.baseset()
138 dests = destutil.orphanpossibledestination(repo, src)
138 dests = destutil.orphanpossibledestination(repo, src)
139 if len(dests) > 1:
139 if len(dests) > 1:
140 raise error.Abort(
140 raise error.Abort(
141 _(b"ambiguous automatic rebase: %r could end up on any of %r")
141 _(b"ambiguous automatic rebase: %r could end up on any of %r")
142 % (src, dests)
142 % (src, dests)
143 )
143 )
144 # We have zero or one destination, so we can just return here.
144 # We have zero or one destination, so we can just return here.
145 return smartset.baseset(dests)
145 return smartset.baseset(dests)
146
146
147
147
148 def _ctxdesc(ctx):
148 def _ctxdesc(ctx):
149 """short description for a context"""
149 """short description for a context"""
150 desc = b'%d:%s "%s"' % (
150 desc = b'%d:%s "%s"' % (
151 ctx.rev(),
151 ctx.rev(),
152 ctx,
152 ctx,
153 ctx.description().split(b'\n', 1)[0],
153 ctx.description().split(b'\n', 1)[0],
154 )
154 )
155 repo = ctx.repo()
155 repo = ctx.repo()
156 names = []
156 names = []
157 for nsname, ns in pycompat.iteritems(repo.names):
157 for nsname, ns in pycompat.iteritems(repo.names):
158 if nsname == b'branches':
158 if nsname == b'branches':
159 continue
159 continue
160 names.extend(ns.names(repo, ctx.node()))
160 names.extend(ns.names(repo, ctx.node()))
161 if names:
161 if names:
162 desc += b' (%s)' % b' '.join(names)
162 desc += b' (%s)' % b' '.join(names)
163 return desc
163 return desc
164
164
165
165
166 class rebaseruntime(object):
166 class rebaseruntime(object):
167 """This class is a container for rebase runtime state"""
167 """This class is a container for rebase runtime state"""
168
168
169 def __init__(self, repo, ui, inmemory=False, opts=None):
169 def __init__(self, repo, ui, inmemory=False, opts=None):
170 if opts is None:
170 if opts is None:
171 opts = {}
171 opts = {}
172
172
173 # prepared: whether we have rebasestate prepared or not. Currently it
173 # prepared: whether we have rebasestate prepared or not. Currently it
174 # decides whether "self.repo" is unfiltered or not.
174 # decides whether "self.repo" is unfiltered or not.
175 # The rebasestate has explicit hash to hash instructions not depending
175 # The rebasestate has explicit hash to hash instructions not depending
176 # on visibility. If rebasestate exists (in-memory or on-disk), use
176 # on visibility. If rebasestate exists (in-memory or on-disk), use
177 # unfiltered repo to avoid visibility issues.
177 # unfiltered repo to avoid visibility issues.
178 # Before knowing rebasestate (i.e. when starting a new rebase (not
178 # Before knowing rebasestate (i.e. when starting a new rebase (not
179 # --continue or --abort)), the original repo should be used so
179 # --continue or --abort)), the original repo should be used so
180 # visibility-dependent revsets are correct.
180 # visibility-dependent revsets are correct.
181 self.prepared = False
181 self.prepared = False
182 self.resume = False
182 self.resume = False
183 self._repo = repo
183 self._repo = repo
184
184
185 self.ui = ui
185 self.ui = ui
186 self.opts = opts
186 self.opts = opts
187 self.originalwd = None
187 self.originalwd = None
188 self.external = nullrev
188 self.external = nullrev
189 # Mapping between the old revision id and either what is the new rebased
189 # Mapping between the old revision id and either what is the new rebased
190 # revision or what needs to be done with the old revision. The state
190 # revision or what needs to be done with the old revision. The state
191 # dict will be what contains most of the rebase progress state.
191 # dict will be what contains most of the rebase progress state.
192 self.state = {}
192 self.state = {}
193 self.activebookmark = None
193 self.activebookmark = None
194 self.destmap = {}
194 self.destmap = {}
195 self.skipped = set()
195 self.skipped = set()
196
196
197 self.collapsef = opts.get(b'collapse', False)
197 self.collapsef = opts.get(b'collapse', False)
198 self.collapsemsg = cmdutil.logmessage(ui, opts)
198 self.collapsemsg = cmdutil.logmessage(ui, opts)
199 self.date = opts.get(b'date', None)
199 self.date = opts.get(b'date', None)
200
200
201 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
201 e = opts.get(b'extrafn') # internal, used by e.g. hgsubversion
202 self.extrafns = [_savegraft]
202 self.extrafns = [_savegraft]
203 if e:
203 if e:
204 self.extrafns = [e]
204 self.extrafns = [e]
205
205
206 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
206 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
207 self.keepf = opts.get(b'keep', False)
207 self.keepf = opts.get(b'keep', False)
208 self.keepbranchesf = opts.get(b'keepbranches', False)
208 self.keepbranchesf = opts.get(b'keepbranches', False)
209 self.obsoletenotrebased = {}
209 self.obsoletenotrebased = {}
210 self.obsoletewithoutsuccessorindestination = set()
210 self.obsoletewithoutsuccessorindestination = set()
211 self.inmemory = inmemory
211 self.inmemory = inmemory
212 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
212 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
213
213
214 @property
214 @property
215 def repo(self):
215 def repo(self):
216 if self.prepared:
216 if self.prepared:
217 return self._repo.unfiltered()
217 return self._repo.unfiltered()
218 else:
218 else:
219 return self._repo
219 return self._repo
220
220
221 def storestatus(self, tr=None):
221 def storestatus(self, tr=None):
222 """Store the current status to allow recovery"""
222 """Store the current status to allow recovery"""
223 if tr:
223 if tr:
224 tr.addfilegenerator(
224 tr.addfilegenerator(
225 b'rebasestate',
225 b'rebasestate',
226 (b'rebasestate',),
226 (b'rebasestate',),
227 self._writestatus,
227 self._writestatus,
228 location=b'plain',
228 location=b'plain',
229 )
229 )
230 else:
230 else:
231 with self.repo.vfs(b"rebasestate", b"w") as f:
231 with self.repo.vfs(b"rebasestate", b"w") as f:
232 self._writestatus(f)
232 self._writestatus(f)
233
233
234 def _writestatus(self, f):
234 def _writestatus(self, f):
235 repo = self.repo
235 repo = self.repo
236 assert repo.filtername is None
236 assert repo.filtername is None
237 f.write(repo[self.originalwd].hex() + b'\n')
237 f.write(repo[self.originalwd].hex() + b'\n')
238 # was "dest". we now write dest per src root below.
238 # was "dest". we now write dest per src root below.
239 f.write(b'\n')
239 f.write(b'\n')
240 f.write(repo[self.external].hex() + b'\n')
240 f.write(repo[self.external].hex() + b'\n')
241 f.write(b'%d\n' % int(self.collapsef))
241 f.write(b'%d\n' % int(self.collapsef))
242 f.write(b'%d\n' % int(self.keepf))
242 f.write(b'%d\n' % int(self.keepf))
243 f.write(b'%d\n' % int(self.keepbranchesf))
243 f.write(b'%d\n' % int(self.keepbranchesf))
244 f.write(b'%s\n' % (self.activebookmark or b''))
244 f.write(b'%s\n' % (self.activebookmark or b''))
245 destmap = self.destmap
245 destmap = self.destmap
246 for d, v in pycompat.iteritems(self.state):
246 for d, v in pycompat.iteritems(self.state):
247 oldrev = repo[d].hex()
247 oldrev = repo[d].hex()
248 if v >= 0:
248 if v >= 0:
249 newrev = repo[v].hex()
249 newrev = repo[v].hex()
250 else:
250 else:
251 newrev = b"%d" % v
251 newrev = b"%d" % v
252 destnode = repo[destmap[d]].hex()
252 destnode = repo[destmap[d]].hex()
253 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
253 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
254 repo.ui.debug(b'rebase status stored\n')
254 repo.ui.debug(b'rebase status stored\n')
255
255
256 def restorestatus(self):
256 def restorestatus(self):
257 """Restore a previously stored status"""
257 """Restore a previously stored status"""
258 if not self.stateobj.exists():
258 if not self.stateobj.exists():
259 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
259 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
260
260
261 data = self._read()
261 data = self._read()
262 self.repo.ui.debug(b'rebase status resumed\n')
262 self.repo.ui.debug(b'rebase status resumed\n')
263
263
264 self.originalwd = data[b'originalwd']
264 self.originalwd = data[b'originalwd']
265 self.destmap = data[b'destmap']
265 self.destmap = data[b'destmap']
266 self.state = data[b'state']
266 self.state = data[b'state']
267 self.skipped = data[b'skipped']
267 self.skipped = data[b'skipped']
268 self.collapsef = data[b'collapse']
268 self.collapsef = data[b'collapse']
269 self.keepf = data[b'keep']
269 self.keepf = data[b'keep']
270 self.keepbranchesf = data[b'keepbranches']
270 self.keepbranchesf = data[b'keepbranches']
271 self.external = data[b'external']
271 self.external = data[b'external']
272 self.activebookmark = data[b'activebookmark']
272 self.activebookmark = data[b'activebookmark']
273
273
274 def _read(self):
274 def _read(self):
275 self.prepared = True
275 self.prepared = True
276 repo = self.repo
276 repo = self.repo
277 assert repo.filtername is None
277 assert repo.filtername is None
278 data = {
278 data = {
279 b'keepbranches': None,
279 b'keepbranches': None,
280 b'collapse': None,
280 b'collapse': None,
281 b'activebookmark': None,
281 b'activebookmark': None,
282 b'external': nullrev,
282 b'external': nullrev,
283 b'keep': None,
283 b'keep': None,
284 b'originalwd': None,
284 b'originalwd': None,
285 }
285 }
286 legacydest = None
286 legacydest = None
287 state = {}
287 state = {}
288 destmap = {}
288 destmap = {}
289
289
290 if True:
290 if True:
291 f = repo.vfs(b"rebasestate")
291 f = repo.vfs(b"rebasestate")
292 for i, l in enumerate(f.read().splitlines()):
292 for i, l in enumerate(f.read().splitlines()):
293 if i == 0:
293 if i == 0:
294 data[b'originalwd'] = repo[l].rev()
294 data[b'originalwd'] = repo[l].rev()
295 elif i == 1:
295 elif i == 1:
296 # this line should be empty in newer version. but legacy
296 # this line should be empty in newer version. but legacy
297 # clients may still use it
297 # clients may still use it
298 if l:
298 if l:
299 legacydest = repo[l].rev()
299 legacydest = repo[l].rev()
300 elif i == 2:
300 elif i == 2:
301 data[b'external'] = repo[l].rev()
301 data[b'external'] = repo[l].rev()
302 elif i == 3:
302 elif i == 3:
303 data[b'collapse'] = bool(int(l))
303 data[b'collapse'] = bool(int(l))
304 elif i == 4:
304 elif i == 4:
305 data[b'keep'] = bool(int(l))
305 data[b'keep'] = bool(int(l))
306 elif i == 5:
306 elif i == 5:
307 data[b'keepbranches'] = bool(int(l))
307 data[b'keepbranches'] = bool(int(l))
308 elif i == 6 and not (len(l) == 81 and b':' in l):
308 elif i == 6 and not (len(l) == 81 and b':' in l):
309 # line 6 is a recent addition, so for backwards
309 # line 6 is a recent addition, so for backwards
310 # compatibility check that the line doesn't look like the
310 # compatibility check that the line doesn't look like the
311 # oldrev:newrev lines
311 # oldrev:newrev lines
312 data[b'activebookmark'] = l
312 data[b'activebookmark'] = l
313 else:
313 else:
314 args = l.split(b':')
314 args = l.split(b':')
315 oldrev = repo[args[0]].rev()
315 oldrev = repo[args[0]].rev()
316 newrev = args[1]
316 newrev = args[1]
317 if newrev in legacystates:
317 if newrev in legacystates:
318 continue
318 continue
319 if len(args) > 2:
319 if len(args) > 2:
320 destrev = repo[args[2]].rev()
320 destrev = repo[args[2]].rev()
321 else:
321 else:
322 destrev = legacydest
322 destrev = legacydest
323 destmap[oldrev] = destrev
323 destmap[oldrev] = destrev
324 if newrev == revtodostr:
324 if newrev == revtodostr:
325 state[oldrev] = revtodo
325 state[oldrev] = revtodo
326 # Legacy compat special case
326 # Legacy compat special case
327 else:
327 else:
328 state[oldrev] = repo[newrev].rev()
328 state[oldrev] = repo[newrev].rev()
329
329
330 if data[b'keepbranches'] is None:
330 if data[b'keepbranches'] is None:
331 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
331 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
332
332
333 data[b'destmap'] = destmap
333 data[b'destmap'] = destmap
334 data[b'state'] = state
334 data[b'state'] = state
335 skipped = set()
335 skipped = set()
336 # recompute the set of skipped revs
336 # recompute the set of skipped revs
337 if not data[b'collapse']:
337 if not data[b'collapse']:
338 seen = set(destmap.values())
338 seen = set(destmap.values())
339 for old, new in sorted(state.items()):
339 for old, new in sorted(state.items()):
340 if new != revtodo and new in seen:
340 if new != revtodo and new in seen:
341 skipped.add(old)
341 skipped.add(old)
342 seen.add(new)
342 seen.add(new)
343 data[b'skipped'] = skipped
343 data[b'skipped'] = skipped
344 repo.ui.debug(
344 repo.ui.debug(
345 b'computed skipped revs: %s\n'
345 b'computed skipped revs: %s\n'
346 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
346 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
347 )
347 )
348
348
349 return data
349 return data
350
350
351 def _handleskippingobsolete(self, obsoleterevs, destmap):
351 def _handleskippingobsolete(self, obsoleterevs, destmap):
352 """Compute structures necessary for skipping obsolete revisions
352 """Compute structures necessary for skipping obsolete revisions
353
353
354 obsoleterevs: iterable of all obsolete revisions in rebaseset
354 obsoleterevs: iterable of all obsolete revisions in rebaseset
355 destmap: {srcrev: destrev} destination revisions
355 destmap: {srcrev: destrev} destination revisions
356 """
356 """
357 self.obsoletenotrebased = {}
357 self.obsoletenotrebased = {}
358 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
358 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
359 return
359 return
360 obsoleteset = set(obsoleterevs)
360 obsoleteset = set(obsoleterevs)
361 (
361 (
362 self.obsoletenotrebased,
362 self.obsoletenotrebased,
363 self.obsoletewithoutsuccessorindestination,
363 self.obsoletewithoutsuccessorindestination,
364 obsoleteextinctsuccessors,
364 obsoleteextinctsuccessors,
365 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
365 ) = _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
366 skippedset = set(self.obsoletenotrebased)
366 skippedset = set(self.obsoletenotrebased)
367 skippedset.update(self.obsoletewithoutsuccessorindestination)
367 skippedset.update(self.obsoletewithoutsuccessorindestination)
368 skippedset.update(obsoleteextinctsuccessors)
368 skippedset.update(obsoleteextinctsuccessors)
369 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
369 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
370
370
371 def _prepareabortorcontinue(
371 def _prepareabortorcontinue(
372 self, isabort, backup=True, suppwarns=False, dryrun=False, confirm=False
372 self, isabort, backup=True, suppwarns=False, dryrun=False, confirm=False
373 ):
373 ):
374 self.resume = True
374 self.resume = True
375 try:
375 try:
376 self.restorestatus()
376 self.restorestatus()
377 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
377 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
378 except error.RepoLookupError:
378 except error.RepoLookupError:
379 if isabort:
379 if isabort:
380 clearstatus(self.repo)
380 clearstatus(self.repo)
381 clearcollapsemsg(self.repo)
381 clearcollapsemsg(self.repo)
382 self.repo.ui.warn(
382 self.repo.ui.warn(
383 _(
383 _(
384 b'rebase aborted (no revision is removed,'
384 b'rebase aborted (no revision is removed,'
385 b' only broken state is cleared)\n'
385 b' only broken state is cleared)\n'
386 )
386 )
387 )
387 )
388 return 0
388 return 0
389 else:
389 else:
390 msg = _(b'cannot continue inconsistent rebase')
390 msg = _(b'cannot continue inconsistent rebase')
391 hint = _(b'use "hg rebase --abort" to clear broken state')
391 hint = _(b'use "hg rebase --abort" to clear broken state')
392 raise error.Abort(msg, hint=hint)
392 raise error.Abort(msg, hint=hint)
393
393
394 if isabort:
394 if isabort:
395 backup = backup and self.backupf
395 backup = backup and self.backupf
396 return self._abort(
396 return self._abort(
397 backup=backup,
397 backup=backup,
398 suppwarns=suppwarns,
398 suppwarns=suppwarns,
399 dryrun=dryrun,
399 dryrun=dryrun,
400 confirm=confirm,
400 confirm=confirm,
401 )
401 )
402
402
403 def _preparenewrebase(self, destmap):
403 def _preparenewrebase(self, destmap):
404 if not destmap:
404 if not destmap:
405 return _nothingtorebase()
405 return _nothingtorebase()
406
406
407 rebaseset = destmap.keys()
407 rebaseset = destmap.keys()
408 if not self.keepf:
408 if not self.keepf:
409 try:
409 try:
410 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
410 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
411 except error.Abort as e:
411 except error.Abort as e:
412 if e.hint is None:
412 if e.hint is None:
413 e.hint = _(b'use --keep to keep original changesets')
413 e.hint = _(b'use --keep to keep original changesets')
414 raise e
414 raise e
415
415
416 result = buildstate(self.repo, destmap, self.collapsef)
416 result = buildstate(self.repo, destmap, self.collapsef)
417
417
418 if not result:
418 if not result:
419 # Empty state built, nothing to rebase
419 # Empty state built, nothing to rebase
420 self.ui.status(_(b'nothing to rebase\n'))
420 self.ui.status(_(b'nothing to rebase\n'))
421 return _nothingtorebase()
421 return _nothingtorebase()
422
422
423 (self.originalwd, self.destmap, self.state) = result
423 (self.originalwd, self.destmap, self.state) = result
424 if self.collapsef:
424 if self.collapsef:
425 dests = set(self.destmap.values())
425 dests = set(self.destmap.values())
426 if len(dests) != 1:
426 if len(dests) != 1:
427 raise error.Abort(
427 raise error.Abort(
428 _(b'--collapse does not work with multiple destinations')
428 _(b'--collapse does not work with multiple destinations')
429 )
429 )
430 destrev = next(iter(dests))
430 destrev = next(iter(dests))
431 destancestors = self.repo.changelog.ancestors(
431 destancestors = self.repo.changelog.ancestors(
432 [destrev], inclusive=True
432 [destrev], inclusive=True
433 )
433 )
434 self.external = externalparent(self.repo, self.state, destancestors)
434 self.external = externalparent(self.repo, self.state, destancestors)
435
435
436 for destrev in sorted(set(destmap.values())):
436 for destrev in sorted(set(destmap.values())):
437 dest = self.repo[destrev]
437 dest = self.repo[destrev]
438 if dest.closesbranch() and not self.keepbranchesf:
438 if dest.closesbranch() and not self.keepbranchesf:
439 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
439 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
440
440
441 self.prepared = True
441 self.prepared = True
442
442
443 def _assignworkingcopy(self):
443 def _assignworkingcopy(self):
444 if self.inmemory:
444 if self.inmemory:
445 from mercurial.context import overlayworkingctx
445 from mercurial.context import overlayworkingctx
446
446
447 self.wctx = overlayworkingctx(self.repo)
447 self.wctx = overlayworkingctx(self.repo)
448 self.repo.ui.debug(b"rebasing in-memory\n")
448 self.repo.ui.debug(b"rebasing in-memory\n")
449 else:
449 else:
450 self.wctx = self.repo[None]
450 self.wctx = self.repo[None]
451 self.repo.ui.debug(b"rebasing on disk\n")
451 self.repo.ui.debug(b"rebasing on disk\n")
452 self.repo.ui.log(
452 self.repo.ui.log(
453 b"rebase",
453 b"rebase",
454 b"using in-memory rebase: %r\n",
454 b"using in-memory rebase: %r\n",
455 self.inmemory,
455 self.inmemory,
456 rebase_imm_used=self.inmemory,
456 rebase_imm_used=self.inmemory,
457 )
457 )
458
458
459 def _performrebase(self, tr):
459 def _performrebase(self, tr):
460 self._assignworkingcopy()
460 self._assignworkingcopy()
461 repo, ui = self.repo, self.ui
461 repo, ui = self.repo, self.ui
462 if self.keepbranchesf:
462 if self.keepbranchesf:
463 # insert _savebranch at the start of extrafns so if
463 # insert _savebranch at the start of extrafns so if
464 # there's a user-provided extrafn it can clobber branch if
464 # there's a user-provided extrafn it can clobber branch if
465 # desired
465 # desired
466 self.extrafns.insert(0, _savebranch)
466 self.extrafns.insert(0, _savebranch)
467 if self.collapsef:
467 if self.collapsef:
468 branches = set()
468 branches = set()
469 for rev in self.state:
469 for rev in self.state:
470 branches.add(repo[rev].branch())
470 branches.add(repo[rev].branch())
471 if len(branches) > 1:
471 if len(branches) > 1:
472 raise error.Abort(
472 raise error.Abort(
473 _(b'cannot collapse multiple named branches')
473 _(b'cannot collapse multiple named branches')
474 )
474 )
475
475
476 # Calculate self.obsoletenotrebased
476 # Calculate self.obsoletenotrebased
477 obsrevs = _filterobsoleterevs(self.repo, self.state)
477 obsrevs = _filterobsoleterevs(self.repo, self.state)
478 self._handleskippingobsolete(obsrevs, self.destmap)
478 self._handleskippingobsolete(obsrevs, self.destmap)
479
479
480 # Keep track of the active bookmarks in order to reset them later
480 # Keep track of the active bookmarks in order to reset them later
481 self.activebookmark = self.activebookmark or repo._activebookmark
481 self.activebookmark = self.activebookmark or repo._activebookmark
482 if self.activebookmark:
482 if self.activebookmark:
483 bookmarks.deactivate(repo)
483 bookmarks.deactivate(repo)
484
484
485 # Store the state before we begin so users can run 'hg rebase --abort'
485 # Store the state before we begin so users can run 'hg rebase --abort'
486 # if we fail before the transaction closes.
486 # if we fail before the transaction closes.
487 self.storestatus()
487 self.storestatus()
488 if tr:
488 if tr:
489 # When using single transaction, store state when transaction
489 # When using single transaction, store state when transaction
490 # commits.
490 # commits.
491 self.storestatus(tr)
491 self.storestatus(tr)
492
492
493 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
493 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
494 p = repo.ui.makeprogress(
494 p = repo.ui.makeprogress(
495 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
495 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
496 )
496 )
497
497
498 def progress(ctx):
498 def progress(ctx):
499 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
499 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
500
500
501 allowdivergence = self.ui.configbool(
501 allowdivergence = self.ui.configbool(
502 b'experimental', b'evolution.allowdivergence'
502 b'experimental', b'evolution.allowdivergence'
503 )
503 )
504 for subset in sortsource(self.destmap):
504 for subset in sortsource(self.destmap):
505 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
505 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
506 if not allowdivergence:
506 if not allowdivergence:
507 sortedrevs -= self.repo.revs(
507 sortedrevs -= self.repo.revs(
508 b'descendants(%ld) and not %ld',
508 b'descendants(%ld) and not %ld',
509 self.obsoletewithoutsuccessorindestination,
509 self.obsoletewithoutsuccessorindestination,
510 self.obsoletewithoutsuccessorindestination,
510 self.obsoletewithoutsuccessorindestination,
511 )
511 )
512 for rev in sortedrevs:
512 for rev in sortedrevs:
513 self._rebasenode(tr, rev, allowdivergence, progress)
513 self._rebasenode(tr, rev, allowdivergence, progress)
514 p.complete()
514 p.complete()
515 ui.note(_(b'rebase merging completed\n'))
515 ui.note(_(b'rebase merging completed\n'))
516
516
517 def _concludenode(self, rev, p1, editor, commitmsg=None):
517 def _concludenode(self, rev, p1, editor, commitmsg=None):
518 '''Commit the wd changes with parents p1 and p2.
518 '''Commit the wd changes with parents p1 and p2.
519
519
520 Reuse commit info from rev but also store useful information in extra.
520 Reuse commit info from rev but also store useful information in extra.
521 Return node of committed revision.'''
521 Return node of committed revision.'''
522 repo = self.repo
522 repo = self.repo
523 ctx = repo[rev]
523 ctx = repo[rev]
524 if commitmsg is None:
524 if commitmsg is None:
525 commitmsg = ctx.description()
525 commitmsg = ctx.description()
526 date = self.date
526 date = self.date
527 if date is None:
527 if date is None:
528 date = ctx.date()
528 date = ctx.date()
529 extra = {b'rebase_source': ctx.hex()}
529 extra = {b'rebase_source': ctx.hex()}
530 for c in self.extrafns:
530 for c in self.extrafns:
531 c(ctx, extra)
531 c(ctx, extra)
532 destphase = max(ctx.phase(), phases.draft)
532 destphase = max(ctx.phase(), phases.draft)
533 overrides = {(b'phases', b'new-commit'): destphase}
533 overrides = {(b'phases', b'new-commit'): destphase}
534 with repo.ui.configoverride(overrides, b'rebase'):
534 with repo.ui.configoverride(overrides, b'rebase'):
535 if self.inmemory:
535 if self.inmemory:
536 newnode = commitmemorynode(
536 newnode = commitmemorynode(
537 repo,
537 repo,
538 wctx=self.wctx,
538 wctx=self.wctx,
539 extra=extra,
539 extra=extra,
540 commitmsg=commitmsg,
540 commitmsg=commitmsg,
541 editor=editor,
541 editor=editor,
542 user=ctx.user(),
542 user=ctx.user(),
543 date=date,
543 date=date,
544 )
544 )
545 mergestatemod.mergestate.clean(repo)
545 mergestatemod.mergestate.clean(repo)
546 else:
546 else:
547 newnode = commitnode(
547 newnode = commitnode(
548 repo,
548 repo,
549 extra=extra,
549 extra=extra,
550 commitmsg=commitmsg,
550 commitmsg=commitmsg,
551 editor=editor,
551 editor=editor,
552 user=ctx.user(),
552 user=ctx.user(),
553 date=date,
553 date=date,
554 )
554 )
555
555
556 return newnode
556 return newnode
557
557
558 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
558 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
559 repo, ui, opts = self.repo, self.ui, self.opts
559 repo, ui, opts = self.repo, self.ui, self.opts
560 dest = self.destmap[rev]
560 dest = self.destmap[rev]
561 ctx = repo[rev]
561 ctx = repo[rev]
562 desc = _ctxdesc(ctx)
562 desc = _ctxdesc(ctx)
563 if self.state[rev] == rev:
563 if self.state[rev] == rev:
564 ui.status(_(b'already rebased %s\n') % desc)
564 ui.status(_(b'already rebased %s\n') % desc)
565 elif (
565 elif (
566 not allowdivergence
566 not allowdivergence
567 and rev in self.obsoletewithoutsuccessorindestination
567 and rev in self.obsoletewithoutsuccessorindestination
568 ):
568 ):
569 msg = (
569 msg = (
570 _(
570 _(
571 b'note: not rebasing %s and its descendants as '
571 b'note: not rebasing %s and its descendants as '
572 b'this would cause divergence\n'
572 b'this would cause divergence\n'
573 )
573 )
574 % desc
574 % desc
575 )
575 )
576 repo.ui.status(msg)
576 repo.ui.status(msg)
577 self.skipped.add(rev)
577 self.skipped.add(rev)
578 elif rev in self.obsoletenotrebased:
578 elif rev in self.obsoletenotrebased:
579 succ = self.obsoletenotrebased[rev]
579 succ = self.obsoletenotrebased[rev]
580 if succ is None:
580 if succ is None:
581 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
581 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
582 else:
582 else:
583 succdesc = _ctxdesc(repo[succ])
583 succdesc = _ctxdesc(repo[succ])
584 msg = _(
584 msg = _(
585 b'note: not rebasing %s, already in destination as %s\n'
585 b'note: not rebasing %s, already in destination as %s\n'
586 ) % (desc, succdesc)
586 ) % (desc, succdesc)
587 repo.ui.status(msg)
587 repo.ui.status(msg)
588 # Make clearrebased aware state[rev] is not a true successor
588 # Make clearrebased aware state[rev] is not a true successor
589 self.skipped.add(rev)
589 self.skipped.add(rev)
590 # Record rev as moved to its desired destination in self.state.
590 # Record rev as moved to its desired destination in self.state.
591 # This helps bookmark and working parent movement.
591 # This helps bookmark and working parent movement.
592 dest = max(
592 dest = max(
593 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
593 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
594 )
594 )
595 self.state[rev] = dest
595 self.state[rev] = dest
596 elif self.state[rev] == revtodo:
596 elif self.state[rev] == revtodo:
597 ui.status(_(b'rebasing %s\n') % desc)
597 ui.status(_(b'rebasing %s\n') % desc)
598 progressfn(ctx)
598 progressfn(ctx)
599 p1, p2, base = defineparents(
599 p1, p2, base = defineparents(
600 repo,
600 repo,
601 rev,
601 rev,
602 self.destmap,
602 self.destmap,
603 self.state,
603 self.state,
604 self.skipped,
604 self.skipped,
605 self.obsoletenotrebased,
605 self.obsoletenotrebased,
606 )
606 )
607 if self.resume and self.wctx.p1().rev() == p1:
607 if self.resume and self.wctx.p1().rev() == p1:
608 repo.ui.debug(b'resuming interrupted rebase\n')
608 repo.ui.debug(b'resuming interrupted rebase\n')
609 self.resume = False
609 self.resume = False
610 else:
610 else:
611 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
611 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
612 with ui.configoverride(overrides, b'rebase'):
612 with ui.configoverride(overrides, b'rebase'):
613 stats = rebasenode(
613 stats = rebasenode(
614 repo,
614 repo,
615 rev,
615 rev,
616 p1,
616 p1,
617 p2,
617 p2,
618 base,
618 base,
619 self.collapsef,
619 self.collapsef,
620 dest,
620 dest,
621 wctx=self.wctx,
621 wctx=self.wctx,
622 )
622 )
623 if stats.unresolvedcount > 0:
623 if stats.unresolvedcount > 0:
624 if self.inmemory:
624 if self.inmemory:
625 raise error.InMemoryMergeConflictsError()
625 raise error.InMemoryMergeConflictsError()
626 else:
626 else:
627 raise error.InterventionRequired(
627 raise error.InterventionRequired(
628 _(
628 _(
629 b'unresolved conflicts (see hg '
629 b'unresolved conflicts (see hg '
630 b'resolve, then hg rebase --continue)'
630 b'resolve, then hg rebase --continue)'
631 )
631 )
632 )
632 )
633 if not self.collapsef:
633 if not self.collapsef:
634 merging = p2 != nullrev
634 merging = p2 != nullrev
635 editform = cmdutil.mergeeditform(merging, b'rebase')
635 editform = cmdutil.mergeeditform(merging, b'rebase')
636 editor = cmdutil.getcommiteditor(
636 editor = cmdutil.getcommiteditor(
637 editform=editform, **pycompat.strkwargs(opts)
637 editform=editform, **pycompat.strkwargs(opts)
638 )
638 )
639 # We need to set parents again here just in case we're continuing
639 # We need to set parents again here just in case we're continuing
640 # a rebase started with an old hg version (before 9c9cfecd4600),
640 # a rebase started with an old hg version (before 9c9cfecd4600),
641 # because those old versions would have left us with two dirstate
641 # because those old versions would have left us with two dirstate
642 # parents, and we don't want to create a merge commit here (unless
642 # parents, and we don't want to create a merge commit here (unless
643 # we're rebasing a merge commit).
643 # we're rebasing a merge commit).
644 self.wctx.setparents(repo[p1].node(), repo[p2].node())
644 self.wctx.setparents(repo[p1].node(), repo[p2].node())
645 newnode = self._concludenode(rev, p1, editor)
645 newnode = self._concludenode(rev, p1, editor)
646 else:
646 else:
647 # Skip commit if we are collapsing
647 # Skip commit if we are collapsing
648 newnode = None
648 newnode = None
649 # Update the state
649 # Update the state
650 if newnode is not None:
650 if newnode is not None:
651 self.state[rev] = repo[newnode].rev()
651 self.state[rev] = repo[newnode].rev()
652 ui.debug(b'rebased as %s\n' % short(newnode))
652 ui.debug(b'rebased as %s\n' % short(newnode))
653 else:
653 else:
654 if not self.collapsef:
654 if not self.collapsef:
655 ui.warn(
655 ui.warn(
656 _(
656 _(
657 b'note: not rebasing %s, its destination already '
657 b'note: not rebasing %s, its destination already '
658 b'has all its changes\n'
658 b'has all its changes\n'
659 )
659 )
660 % desc
660 % desc
661 )
661 )
662 self.skipped.add(rev)
662 self.skipped.add(rev)
663 self.state[rev] = p1
663 self.state[rev] = p1
664 ui.debug(b'next revision set to %d\n' % p1)
664 ui.debug(b'next revision set to %d\n' % p1)
665 else:
665 else:
666 ui.status(
666 ui.status(
667 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
667 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
668 )
668 )
669 if not tr:
669 if not tr:
670 # When not using single transaction, store state after each
670 # When not using single transaction, store state after each
671 # commit is completely done. On InterventionRequired, we thus
671 # commit is completely done. On InterventionRequired, we thus
672 # won't store the status. Instead, we'll hit the "len(parents) == 2"
672 # won't store the status. Instead, we'll hit the "len(parents) == 2"
673 # case and realize that the commit was in progress.
673 # case and realize that the commit was in progress.
674 self.storestatus()
674 self.storestatus()
675
675
676 def _finishrebase(self):
676 def _finishrebase(self):
677 repo, ui, opts = self.repo, self.ui, self.opts
677 repo, ui, opts = self.repo, self.ui, self.opts
678 fm = ui.formatter(b'rebase', opts)
678 fm = ui.formatter(b'rebase', opts)
679 fm.startitem()
679 fm.startitem()
680 if self.collapsef:
680 if self.collapsef:
681 p1, p2, _base = defineparents(
681 p1, p2, _base = defineparents(
682 repo,
682 repo,
683 min(self.state),
683 min(self.state),
684 self.destmap,
684 self.destmap,
685 self.state,
685 self.state,
686 self.skipped,
686 self.skipped,
687 self.obsoletenotrebased,
687 self.obsoletenotrebased,
688 )
688 )
689 editopt = opts.get(b'edit')
689 editopt = opts.get(b'edit')
690 editform = b'rebase.collapse'
690 editform = b'rebase.collapse'
691 if self.collapsemsg:
691 if self.collapsemsg:
692 commitmsg = self.collapsemsg
692 commitmsg = self.collapsemsg
693 else:
693 else:
694 commitmsg = b'Collapsed revision'
694 commitmsg = b'Collapsed revision'
695 for rebased in sorted(self.state):
695 for rebased in sorted(self.state):
696 if rebased not in self.skipped:
696 if rebased not in self.skipped:
697 commitmsg += b'\n* %s' % repo[rebased].description()
697 commitmsg += b'\n* %s' % repo[rebased].description()
698 editopt = True
698 editopt = True
699 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
699 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
700 revtoreuse = max(self.state)
700 revtoreuse = max(self.state)
701
701
702 self.wctx.setparents(repo[p1].node(), repo[self.external].node())
702 self.wctx.setparents(repo[p1].node(), repo[self.external].node())
703 newnode = self._concludenode(
703 newnode = self._concludenode(
704 revtoreuse, p1, editor, commitmsg=commitmsg
704 revtoreuse, p1, editor, commitmsg=commitmsg
705 )
705 )
706
706
707 if newnode is not None:
707 if newnode is not None:
708 newrev = repo[newnode].rev()
708 newrev = repo[newnode].rev()
709 for oldrev in self.state:
709 for oldrev in self.state:
710 self.state[oldrev] = newrev
710 self.state[oldrev] = newrev
711
711
712 if b'qtip' in repo.tags():
712 if b'qtip' in repo.tags():
713 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
713 updatemq(repo, self.state, self.skipped, **pycompat.strkwargs(opts))
714
714
715 # restore original working directory
715 # restore original working directory
716 # (we do this before stripping)
716 # (we do this before stripping)
717 newwd = self.state.get(self.originalwd, self.originalwd)
717 newwd = self.state.get(self.originalwd, self.originalwd)
718 if newwd < 0:
718 if newwd < 0:
719 # original directory is a parent of rebase set root or ignored
719 # original directory is a parent of rebase set root or ignored
720 newwd = self.originalwd
720 newwd = self.originalwd
721 if newwd not in [c.rev() for c in repo[None].parents()]:
721 if newwd not in [c.rev() for c in repo[None].parents()]:
722 ui.note(_(b"update back to initial working directory parent\n"))
722 ui.note(_(b"update back to initial working directory parent\n"))
723 hg.updaterepo(repo, newwd, overwrite=False)
723 hg.updaterepo(repo, newwd, overwrite=False)
724
724
725 collapsedas = None
725 collapsedas = None
726 if self.collapsef and not self.keepf:
726 if self.collapsef and not self.keepf:
727 collapsedas = newnode
727 collapsedas = newnode
728 clearrebased(
728 clearrebased(
729 ui,
729 ui,
730 repo,
730 repo,
731 self.destmap,
731 self.destmap,
732 self.state,
732 self.state,
733 self.skipped,
733 self.skipped,
734 collapsedas,
734 collapsedas,
735 self.keepf,
735 self.keepf,
736 fm=fm,
736 fm=fm,
737 backup=self.backupf,
737 backup=self.backupf,
738 )
738 )
739
739
740 clearstatus(repo)
740 clearstatus(repo)
741 clearcollapsemsg(repo)
741 clearcollapsemsg(repo)
742
742
743 ui.note(_(b"rebase completed\n"))
743 ui.note(_(b"rebase completed\n"))
744 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
744 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
745 if self.skipped:
745 if self.skipped:
746 skippedlen = len(self.skipped)
746 skippedlen = len(self.skipped)
747 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
747 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
748 fm.end()
748 fm.end()
749
749
750 if (
750 if (
751 self.activebookmark
751 self.activebookmark
752 and self.activebookmark in repo._bookmarks
752 and self.activebookmark in repo._bookmarks
753 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
753 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
754 ):
754 ):
755 bookmarks.activate(repo, self.activebookmark)
755 bookmarks.activate(repo, self.activebookmark)
756
756
757 def _abort(self, backup=True, suppwarns=False, dryrun=False, confirm=False):
757 def _abort(self, backup=True, suppwarns=False, dryrun=False, confirm=False):
758 '''Restore the repository to its original state.'''
758 '''Restore the repository to its original state.'''
759
759
760 repo = self.repo
760 repo = self.repo
761 try:
761 try:
762 # If the first commits in the rebased set get skipped during the
762 # If the first commits in the rebased set get skipped during the
763 # rebase, their values within the state mapping will be the dest
763 # rebase, their values within the state mapping will be the dest
764 # rev id. The rebased list must must not contain the dest rev
764 # rev id. The rebased list must must not contain the dest rev
765 # (issue4896)
765 # (issue4896)
766 rebased = [
766 rebased = [
767 s
767 s
768 for r, s in self.state.items()
768 for r, s in self.state.items()
769 if s >= 0 and s != r and s != self.destmap[r]
769 if s >= 0 and s != r and s != self.destmap[r]
770 ]
770 ]
771 immutable = [d for d in rebased if not repo[d].mutable()]
771 immutable = [d for d in rebased if not repo[d].mutable()]
772 cleanup = True
772 cleanup = True
773 if immutable:
773 if immutable:
774 repo.ui.warn(
774 repo.ui.warn(
775 _(b"warning: can't clean up public changesets %s\n")
775 _(b"warning: can't clean up public changesets %s\n")
776 % b', '.join(bytes(repo[r]) for r in immutable),
776 % b', '.join(bytes(repo[r]) for r in immutable),
777 hint=_(b"see 'hg help phases' for details"),
777 hint=_(b"see 'hg help phases' for details"),
778 )
778 )
779 cleanup = False
779 cleanup = False
780
780
781 descendants = set()
781 descendants = set()
782 if rebased:
782 if rebased:
783 descendants = set(repo.changelog.descendants(rebased))
783 descendants = set(repo.changelog.descendants(rebased))
784 if descendants - set(rebased):
784 if descendants - set(rebased):
785 repo.ui.warn(
785 repo.ui.warn(
786 _(
786 _(
787 b"warning: new changesets detected on "
787 b"warning: new changesets detected on "
788 b"destination branch, can't strip\n"
788 b"destination branch, can't strip\n"
789 )
789 )
790 )
790 )
791 cleanup = False
791 cleanup = False
792
792
793 if cleanup:
793 if cleanup:
794 if rebased:
794 if rebased:
795 strippoints = [
795 strippoints = [
796 c.node() for c in repo.set(b'roots(%ld)', rebased)
796 c.node() for c in repo.set(b'roots(%ld)', rebased)
797 ]
797 ]
798
798
799 updateifonnodes = set(rebased)
799 updateifonnodes = set(rebased)
800 updateifonnodes.update(self.destmap.values())
800 updateifonnodes.update(self.destmap.values())
801
801
802 if not dryrun and not confirm:
802 if not dryrun and not confirm:
803 updateifonnodes.add(self.originalwd)
803 updateifonnodes.add(self.originalwd)
804
804
805 shouldupdate = repo[b'.'].rev() in updateifonnodes
805 shouldupdate = repo[b'.'].rev() in updateifonnodes
806
806
807 # Update away from the rebase if necessary
807 # Update away from the rebase if necessary
808 if shouldupdate:
808 if shouldupdate:
809 mergemod.clean_update(repo[self.originalwd])
809 mergemod.clean_update(repo[self.originalwd])
810
810
811 # Strip from the first rebased revision
811 # Strip from the first rebased revision
812 if rebased:
812 if rebased:
813 repair.strip(repo.ui, repo, strippoints, backup=backup)
813 repair.strip(repo.ui, repo, strippoints, backup=backup)
814
814
815 if self.activebookmark and self.activebookmark in repo._bookmarks:
815 if self.activebookmark and self.activebookmark in repo._bookmarks:
816 bookmarks.activate(repo, self.activebookmark)
816 bookmarks.activate(repo, self.activebookmark)
817
817
818 finally:
818 finally:
819 clearstatus(repo)
819 clearstatus(repo)
820 clearcollapsemsg(repo)
820 clearcollapsemsg(repo)
821 if not suppwarns:
821 if not suppwarns:
822 repo.ui.warn(_(b'rebase aborted\n'))
822 repo.ui.warn(_(b'rebase aborted\n'))
823 return 0
823 return 0
824
824
825
825
826 @command(
826 @command(
827 b'rebase',
827 b'rebase',
828 [
828 [
829 (
829 (
830 b's',
830 b's',
831 b'source',
831 b'source',
832 [],
832 [],
833 _(b'rebase the specified changesets and their descendants'),
833 _(b'rebase the specified changesets and their descendants'),
834 _(b'REV'),
834 _(b'REV'),
835 ),
835 ),
836 (
836 (
837 b'b',
837 b'b',
838 b'base',
838 b'base',
839 [],
839 [],
840 _(b'rebase everything from branching point of specified changeset'),
840 _(b'rebase everything from branching point of specified changeset'),
841 _(b'REV'),
841 _(b'REV'),
842 ),
842 ),
843 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
843 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
844 (
844 (
845 b'd',
845 b'd',
846 b'dest',
846 b'dest',
847 b'',
847 b'',
848 _(b'rebase onto the specified changeset'),
848 _(b'rebase onto the specified changeset'),
849 _(b'REV'),
849 _(b'REV'),
850 ),
850 ),
851 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
851 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
852 (
852 (
853 b'm',
853 b'm',
854 b'message',
854 b'message',
855 b'',
855 b'',
856 _(b'use text as collapse commit message'),
856 _(b'use text as collapse commit message'),
857 _(b'TEXT'),
857 _(b'TEXT'),
858 ),
858 ),
859 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
859 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
860 (
860 (
861 b'l',
861 b'l',
862 b'logfile',
862 b'logfile',
863 b'',
863 b'',
864 _(b'read collapse commit message from file'),
864 _(b'read collapse commit message from file'),
865 _(b'FILE'),
865 _(b'FILE'),
866 ),
866 ),
867 (b'k', b'keep', False, _(b'keep original changesets')),
867 (b'k', b'keep', False, _(b'keep original changesets')),
868 (b'', b'keepbranches', False, _(b'keep original branch names')),
868 (b'', b'keepbranches', False, _(b'keep original branch names')),
869 (b'D', b'detach', False, _(b'(DEPRECATED)')),
869 (b'D', b'detach', False, _(b'(DEPRECATED)')),
870 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
870 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
871 (b't', b'tool', b'', _(b'specify merge tool')),
871 (b't', b'tool', b'', _(b'specify merge tool')),
872 (b'', b'stop', False, _(b'stop interrupted rebase')),
872 (b'', b'stop', False, _(b'stop interrupted rebase')),
873 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
873 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
874 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
874 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
875 (
875 (
876 b'',
876 b'',
877 b'auto-orphans',
877 b'auto-orphans',
878 b'',
878 b'',
879 _(
879 _(
880 b'automatically rebase orphan revisions '
880 b'automatically rebase orphan revisions '
881 b'in the specified revset (EXPERIMENTAL)'
881 b'in the specified revset (EXPERIMENTAL)'
882 ),
882 ),
883 ),
883 ),
884 ]
884 ]
885 + cmdutil.dryrunopts
885 + cmdutil.dryrunopts
886 + cmdutil.formatteropts
886 + cmdutil.formatteropts
887 + cmdutil.confirmopts,
887 + cmdutil.confirmopts,
888 _(b'[[-s REV]... | [-b REV]... | [-r REV]...] [-d REV] [OPTION]...'),
888 _(b'[[-s REV]... | [-b REV]... | [-r REV]...] [-d REV] [OPTION]...'),
889 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
889 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
890 )
890 )
891 def rebase(ui, repo, **opts):
891 def rebase(ui, repo, **opts):
892 """move changeset (and descendants) to a different branch
892 """move changeset (and descendants) to a different branch
893
893
894 Rebase uses repeated merging to graft changesets from one part of
894 Rebase uses repeated merging to graft changesets from one part of
895 history (the source) onto another (the destination). This can be
895 history (the source) onto another (the destination). This can be
896 useful for linearizing *local* changes relative to a master
896 useful for linearizing *local* changes relative to a master
897 development tree.
897 development tree.
898
898
899 Published commits cannot be rebased (see :hg:`help phases`).
899 Published commits cannot be rebased (see :hg:`help phases`).
900 To copy commits, see :hg:`help graft`.
900 To copy commits, see :hg:`help graft`.
901
901
902 If you don't specify a destination changeset (``-d/--dest``), rebase
902 If you don't specify a destination changeset (``-d/--dest``), rebase
903 will use the same logic as :hg:`merge` to pick a destination. if
903 will use the same logic as :hg:`merge` to pick a destination. if
904 the current branch contains exactly one other head, the other head
904 the current branch contains exactly one other head, the other head
905 is merged with by default. Otherwise, an explicit revision with
905 is merged with by default. Otherwise, an explicit revision with
906 which to merge with must be provided. (destination changeset is not
906 which to merge with must be provided. (destination changeset is not
907 modified by rebasing, but new changesets are added as its
907 modified by rebasing, but new changesets are added as its
908 descendants.)
908 descendants.)
909
909
910 Here are the ways to select changesets:
910 Here are the ways to select changesets:
911
911
912 1. Explicitly select them using ``--rev``.
912 1. Explicitly select them using ``--rev``.
913
913
914 2. Use ``--source`` to select a root changeset and include all of its
914 2. Use ``--source`` to select a root changeset and include all of its
915 descendants.
915 descendants.
916
916
917 3. Use ``--base`` to select a changeset; rebase will find ancestors
917 3. Use ``--base`` to select a changeset; rebase will find ancestors
918 and their descendants which are not also ancestors of the destination.
918 and their descendants which are not also ancestors of the destination.
919
919
920 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
920 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
921 rebase will use ``--base .`` as above.
921 rebase will use ``--base .`` as above.
922
922
923 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
923 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
924 can be used in ``--dest``. Destination would be calculated per source
924 can be used in ``--dest``. Destination would be calculated per source
925 revision with ``SRC`` substituted by that single source revision and
925 revision with ``SRC`` substituted by that single source revision and
926 ``ALLSRC`` substituted by all source revisions.
926 ``ALLSRC`` substituted by all source revisions.
927
927
928 Rebase will destroy original changesets unless you use ``--keep``.
928 Rebase will destroy original changesets unless you use ``--keep``.
929 It will also move your bookmarks (even if you do).
929 It will also move your bookmarks (even if you do).
930
930
931 Some changesets may be dropped if they do not contribute changes
931 Some changesets may be dropped if they do not contribute changes
932 (e.g. merges from the destination branch).
932 (e.g. merges from the destination branch).
933
933
934 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
934 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
935 a named branch with two heads. You will need to explicitly specify source
935 a named branch with two heads. You will need to explicitly specify source
936 and/or destination.
936 and/or destination.
937
937
938 If you need to use a tool to automate merge/conflict decisions, you
938 If you need to use a tool to automate merge/conflict decisions, you
939 can specify one with ``--tool``, see :hg:`help merge-tools`.
939 can specify one with ``--tool``, see :hg:`help merge-tools`.
940 As a caveat: the tool will not be used to mediate when a file was
940 As a caveat: the tool will not be used to mediate when a file was
941 deleted, there is no hook presently available for this.
941 deleted, there is no hook presently available for this.
942
942
943 If a rebase is interrupted to manually resolve a conflict, it can be
943 If a rebase is interrupted to manually resolve a conflict, it can be
944 continued with --continue/-c, aborted with --abort/-a, or stopped with
944 continued with --continue/-c, aborted with --abort/-a, or stopped with
945 --stop.
945 --stop.
946
946
947 .. container:: verbose
947 .. container:: verbose
948
948
949 Examples:
949 Examples:
950
950
951 - move "local changes" (current commit back to branching point)
951 - move "local changes" (current commit back to branching point)
952 to the current branch tip after a pull::
952 to the current branch tip after a pull::
953
953
954 hg rebase
954 hg rebase
955
955
956 - move a single changeset to the stable branch::
956 - move a single changeset to the stable branch::
957
957
958 hg rebase -r 5f493448 -d stable
958 hg rebase -r 5f493448 -d stable
959
959
960 - splice a commit and all its descendants onto another part of history::
960 - splice a commit and all its descendants onto another part of history::
961
961
962 hg rebase --source c0c3 --dest 4cf9
962 hg rebase --source c0c3 --dest 4cf9
963
963
964 - rebase everything on a branch marked by a bookmark onto the
964 - rebase everything on a branch marked by a bookmark onto the
965 default branch::
965 default branch::
966
966
967 hg rebase --base myfeature --dest default
967 hg rebase --base myfeature --dest default
968
968
969 - collapse a sequence of changes into a single commit::
969 - collapse a sequence of changes into a single commit::
970
970
971 hg rebase --collapse -r 1520:1525 -d .
971 hg rebase --collapse -r 1520:1525 -d .
972
972
973 - move a named branch while preserving its name::
973 - move a named branch while preserving its name::
974
974
975 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
975 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
976
976
977 - stabilize orphaned changesets so history looks linear::
977 - stabilize orphaned changesets so history looks linear::
978
978
979 hg rebase -r 'orphan()-obsolete()'\
979 hg rebase -r 'orphan()-obsolete()'\
980 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
980 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
981 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
981 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
982
982
983 Configuration Options:
983 Configuration Options:
984
984
985 You can make rebase require a destination if you set the following config
985 You can make rebase require a destination if you set the following config
986 option::
986 option::
987
987
988 [commands]
988 [commands]
989 rebase.requiredest = True
989 rebase.requiredest = True
990
990
991 By default, rebase will close the transaction after each commit. For
991 By default, rebase will close the transaction after each commit. For
992 performance purposes, you can configure rebase to use a single transaction
992 performance purposes, you can configure rebase to use a single transaction
993 across the entire rebase. WARNING: This setting introduces a significant
993 across the entire rebase. WARNING: This setting introduces a significant
994 risk of losing the work you've done in a rebase if the rebase aborts
994 risk of losing the work you've done in a rebase if the rebase aborts
995 unexpectedly::
995 unexpectedly::
996
996
997 [rebase]
997 [rebase]
998 singletransaction = True
998 singletransaction = True
999
999
1000 By default, rebase writes to the working copy, but you can configure it to
1000 By default, rebase writes to the working copy, but you can configure it to
1001 run in-memory for better performance. When the rebase is not moving the
1001 run in-memory for better performance. When the rebase is not moving the
1002 parent(s) of the working copy (AKA the "currently checked out changesets"),
1002 parent(s) of the working copy (AKA the "currently checked out changesets"),
1003 this may also allow it to run even if the working copy is dirty::
1003 this may also allow it to run even if the working copy is dirty::
1004
1004
1005 [rebase]
1005 [rebase]
1006 experimental.inmemory = True
1006 experimental.inmemory = True
1007
1007
1008 Return Values:
1008 Return Values:
1009
1009
1010 Returns 0 on success, 1 if nothing to rebase or there are
1010 Returns 0 on success, 1 if nothing to rebase or there are
1011 unresolved conflicts.
1011 unresolved conflicts.
1012
1012
1013 """
1013 """
1014 opts = pycompat.byteskwargs(opts)
1014 opts = pycompat.byteskwargs(opts)
1015 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1015 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1016 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1016 action = cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
1017 if action:
1017 if action:
1018 cmdutil.check_incompatible_arguments(
1018 cmdutil.check_incompatible_arguments(
1019 opts, action, [b'confirm', b'dry_run']
1019 opts, action, [b'confirm', b'dry_run']
1020 )
1020 )
1021 cmdutil.check_incompatible_arguments(
1021 cmdutil.check_incompatible_arguments(
1022 opts, action, [b'rev', b'source', b'base', b'dest']
1022 opts, action, [b'rev', b'source', b'base', b'dest']
1023 )
1023 )
1024 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1024 cmdutil.check_at_most_one_arg(opts, b'confirm', b'dry_run')
1025 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1025 cmdutil.check_at_most_one_arg(opts, b'rev', b'source', b'base')
1026
1026
1027 if action or repo.currenttransaction() is not None:
1027 if action or repo.currenttransaction() is not None:
1028 # in-memory rebase is not compatible with resuming rebases.
1028 # in-memory rebase is not compatible with resuming rebases.
1029 # (Or if it is run within a transaction, since the restart logic can
1029 # (Or if it is run within a transaction, since the restart logic can
1030 # fail the entire transaction.)
1030 # fail the entire transaction.)
1031 inmemory = False
1031 inmemory = False
1032
1032
1033 if opts.get(b'auto_orphans'):
1033 if opts.get(b'auto_orphans'):
1034 disallowed_opts = set(opts) - {b'auto_orphans'}
1034 disallowed_opts = set(opts) - {b'auto_orphans'}
1035 cmdutil.check_incompatible_arguments(
1035 cmdutil.check_incompatible_arguments(
1036 opts, b'auto_orphans', disallowed_opts
1036 opts, b'auto_orphans', disallowed_opts
1037 )
1037 )
1038
1038
1039 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1039 userrevs = list(repo.revs(opts.get(b'auto_orphans')))
1040 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1040 opts[b'rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1041 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1041 opts[b'dest'] = b'_destautoorphanrebase(SRC)'
1042
1042
1043 if opts.get(b'dry_run') or opts.get(b'confirm'):
1043 if opts.get(b'dry_run') or opts.get(b'confirm'):
1044 return _dryrunrebase(ui, repo, action, opts)
1044 return _dryrunrebase(ui, repo, action, opts)
1045 elif action == b'stop':
1045 elif action == b'stop':
1046 rbsrt = rebaseruntime(repo, ui)
1046 rbsrt = rebaseruntime(repo, ui)
1047 with repo.wlock(), repo.lock():
1047 with repo.wlock(), repo.lock():
1048 rbsrt.restorestatus()
1048 rbsrt.restorestatus()
1049 if rbsrt.collapsef:
1049 if rbsrt.collapsef:
1050 raise error.Abort(_(b"cannot stop in --collapse session"))
1050 raise error.Abort(_(b"cannot stop in --collapse session"))
1051 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1051 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1052 if not (rbsrt.keepf or allowunstable):
1052 if not (rbsrt.keepf or allowunstable):
1053 raise error.Abort(
1053 raise error.Abort(
1054 _(
1054 _(
1055 b"cannot remove original changesets with"
1055 b"cannot remove original changesets with"
1056 b" unrebased descendants"
1056 b" unrebased descendants"
1057 ),
1057 ),
1058 hint=_(
1058 hint=_(
1059 b'either enable obsmarkers to allow unstable '
1059 b'either enable obsmarkers to allow unstable '
1060 b'revisions or use --keep to keep original '
1060 b'revisions or use --keep to keep original '
1061 b'changesets'
1061 b'changesets'
1062 ),
1062 ),
1063 )
1063 )
1064 # update to the current working revision
1064 # update to the current working revision
1065 # to clear interrupted merge
1065 # to clear interrupted merge
1066 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1066 hg.updaterepo(repo, rbsrt.originalwd, overwrite=True)
1067 rbsrt._finishrebase()
1067 rbsrt._finishrebase()
1068 return 0
1068 return 0
1069 elif inmemory:
1069 elif inmemory:
1070 try:
1070 try:
1071 # in-memory merge doesn't support conflicts, so if we hit any, abort
1071 # in-memory merge doesn't support conflicts, so if we hit any, abort
1072 # and re-run as an on-disk merge.
1072 # and re-run as an on-disk merge.
1073 overrides = {(b'rebase', b'singletransaction'): True}
1073 overrides = {(b'rebase', b'singletransaction'): True}
1074 with ui.configoverride(overrides, b'rebase'):
1074 with ui.configoverride(overrides, b'rebase'):
1075 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1075 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1076 except error.InMemoryMergeConflictsError:
1076 except error.InMemoryMergeConflictsError:
1077 ui.warn(
1077 ui.warn(
1078 _(
1078 _(
1079 b'hit merge conflicts; re-running rebase without in-memory'
1079 b'hit merge conflicts; re-running rebase without in-memory'
1080 b' merge\n'
1080 b' merge\n'
1081 )
1081 )
1082 )
1082 )
1083 # TODO: Make in-memory merge not use the on-disk merge state, so
1083 # TODO: Make in-memory merge not use the on-disk merge state, so
1084 # we don't have to clean it here
1084 # we don't have to clean it here
1085 mergestatemod.mergestate.clean(repo)
1085 mergestatemod.mergestate.clean(repo)
1086 clearstatus(repo)
1086 clearstatus(repo)
1087 clearcollapsemsg(repo)
1087 clearcollapsemsg(repo)
1088 return _dorebase(ui, repo, action, opts, inmemory=False)
1088 return _dorebase(ui, repo, action, opts, inmemory=False)
1089 else:
1089 else:
1090 return _dorebase(ui, repo, action, opts)
1090 return _dorebase(ui, repo, action, opts)
1091
1091
1092
1092
1093 def _dryrunrebase(ui, repo, action, opts):
1093 def _dryrunrebase(ui, repo, action, opts):
1094 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
1094 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
1095 confirm = opts.get(b'confirm')
1095 confirm = opts.get(b'confirm')
1096 if confirm:
1096 if confirm:
1097 ui.status(_(b'starting in-memory rebase\n'))
1097 ui.status(_(b'starting in-memory rebase\n'))
1098 else:
1098 else:
1099 ui.status(
1099 ui.status(
1100 _(b'starting dry-run rebase; repository will not be changed\n')
1100 _(b'starting dry-run rebase; repository will not be changed\n')
1101 )
1101 )
1102 with repo.wlock(), repo.lock():
1102 with repo.wlock(), repo.lock():
1103 needsabort = True
1103 needsabort = True
1104 try:
1104 try:
1105 overrides = {(b'rebase', b'singletransaction'): True}
1105 overrides = {(b'rebase', b'singletransaction'): True}
1106 with ui.configoverride(overrides, b'rebase'):
1106 with ui.configoverride(overrides, b'rebase'):
1107 _origrebase(
1107 _origrebase(
1108 ui,
1108 ui,
1109 repo,
1109 repo,
1110 action,
1110 action,
1111 opts,
1111 opts,
1112 rbsrt,
1112 rbsrt,
1113 inmemory=True,
1113 inmemory=True,
1114 leaveunfinished=True,
1114 leaveunfinished=True,
1115 )
1115 )
1116 except error.InMemoryMergeConflictsError:
1116 except error.InMemoryMergeConflictsError:
1117 ui.status(_(b'hit a merge conflict\n'))
1117 ui.status(_(b'hit a merge conflict\n'))
1118 return 1
1118 return 1
1119 except error.Abort:
1119 except error.Abort:
1120 needsabort = False
1120 needsabort = False
1121 raise
1121 raise
1122 else:
1122 else:
1123 if confirm:
1123 if confirm:
1124 ui.status(_(b'rebase completed successfully\n'))
1124 ui.status(_(b'rebase completed successfully\n'))
1125 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1125 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1126 # finish unfinished rebase
1126 # finish unfinished rebase
1127 rbsrt._finishrebase()
1127 rbsrt._finishrebase()
1128 else:
1128 else:
1129 rbsrt._prepareabortorcontinue(
1129 rbsrt._prepareabortorcontinue(
1130 isabort=True,
1130 isabort=True,
1131 backup=False,
1131 backup=False,
1132 suppwarns=True,
1132 suppwarns=True,
1133 confirm=confirm,
1133 confirm=confirm,
1134 )
1134 )
1135 needsabort = False
1135 needsabort = False
1136 else:
1136 else:
1137 ui.status(
1137 ui.status(
1138 _(
1138 _(
1139 b'dry-run rebase completed successfully; run without'
1139 b'dry-run rebase completed successfully; run without'
1140 b' -n/--dry-run to perform this rebase\n'
1140 b' -n/--dry-run to perform this rebase\n'
1141 )
1141 )
1142 )
1142 )
1143 return 0
1143 return 0
1144 finally:
1144 finally:
1145 if needsabort:
1145 if needsabort:
1146 # no need to store backup in case of dryrun
1146 # no need to store backup in case of dryrun
1147 rbsrt._prepareabortorcontinue(
1147 rbsrt._prepareabortorcontinue(
1148 isabort=True,
1148 isabort=True,
1149 backup=False,
1149 backup=False,
1150 suppwarns=True,
1150 suppwarns=True,
1151 dryrun=opts.get(b'dry_run'),
1151 dryrun=opts.get(b'dry_run'),
1152 )
1152 )
1153
1153
1154
1154
1155 def _dorebase(ui, repo, action, opts, inmemory=False):
1155 def _dorebase(ui, repo, action, opts, inmemory=False):
1156 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
1156 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
1157 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
1157 return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory)
1158
1158
1159
1159
1160 def _origrebase(
1160 def _origrebase(
1161 ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
1161 ui, repo, action, opts, rbsrt, inmemory=False, leaveunfinished=False
1162 ):
1162 ):
1163 assert action != b'stop'
1163 assert action != b'stop'
1164 with repo.wlock(), repo.lock():
1164 with repo.wlock(), repo.lock():
1165 if opts.get(b'interactive'):
1165 if opts.get(b'interactive'):
1166 try:
1166 try:
1167 if extensions.find(b'histedit'):
1167 if extensions.find(b'histedit'):
1168 enablehistedit = b''
1168 enablehistedit = b''
1169 except KeyError:
1169 except KeyError:
1170 enablehistedit = b" --config extensions.histedit="
1170 enablehistedit = b" --config extensions.histedit="
1171 help = b"hg%s help -e histedit" % enablehistedit
1171 help = b"hg%s help -e histedit" % enablehistedit
1172 msg = (
1172 msg = (
1173 _(
1173 _(
1174 b"interactive history editing is supported by the "
1174 b"interactive history editing is supported by the "
1175 b"'histedit' extension (see \"%s\")"
1175 b"'histedit' extension (see \"%s\")"
1176 )
1176 )
1177 % help
1177 % help
1178 )
1178 )
1179 raise error.Abort(msg)
1179 raise error.Abort(msg)
1180
1180
1181 if rbsrt.collapsemsg and not rbsrt.collapsef:
1181 if rbsrt.collapsemsg and not rbsrt.collapsef:
1182 raise error.Abort(_(b'message can only be specified with collapse'))
1182 raise error.Abort(_(b'message can only be specified with collapse'))
1183
1183
1184 if action:
1184 if action:
1185 if rbsrt.collapsef:
1185 if rbsrt.collapsef:
1186 raise error.Abort(
1186 raise error.Abort(
1187 _(b'cannot use collapse with continue or abort')
1187 _(b'cannot use collapse with continue or abort')
1188 )
1188 )
1189 if action == b'abort' and opts.get(b'tool', False):
1189 if action == b'abort' and opts.get(b'tool', False):
1190 ui.warn(_(b'tool option will be ignored\n'))
1190 ui.warn(_(b'tool option will be ignored\n'))
1191 if action == b'continue':
1191 if action == b'continue':
1192 ms = mergestatemod.mergestate.read(repo)
1192 ms = mergestatemod.mergestate.read(repo)
1193 mergeutil.checkunresolved(ms)
1193 mergeutil.checkunresolved(ms)
1194
1194
1195 retcode = rbsrt._prepareabortorcontinue(
1195 retcode = rbsrt._prepareabortorcontinue(
1196 isabort=(action == b'abort')
1196 isabort=(action == b'abort')
1197 )
1197 )
1198 if retcode is not None:
1198 if retcode is not None:
1199 return retcode
1199 return retcode
1200 else:
1200 else:
1201 # search default destination in this space
1201 # search default destination in this space
1202 # used in the 'hg pull --rebase' case, see issue 5214.
1202 # used in the 'hg pull --rebase' case, see issue 5214.
1203 destspace = opts.get(b'_destspace')
1203 destspace = opts.get(b'_destspace')
1204 destmap = _definedestmap(
1204 destmap = _definedestmap(
1205 ui,
1205 ui,
1206 repo,
1206 repo,
1207 inmemory,
1207 inmemory,
1208 opts.get(b'dest', None),
1208 opts.get(b'dest', None),
1209 opts.get(b'source', []),
1209 opts.get(b'source', []),
1210 opts.get(b'base', []),
1210 opts.get(b'base', []),
1211 opts.get(b'rev', []),
1211 opts.get(b'rev', []),
1212 destspace=destspace,
1212 destspace=destspace,
1213 )
1213 )
1214 retcode = rbsrt._preparenewrebase(destmap)
1214 retcode = rbsrt._preparenewrebase(destmap)
1215 if retcode is not None:
1215 if retcode is not None:
1216 return retcode
1216 return retcode
1217 storecollapsemsg(repo, rbsrt.collapsemsg)
1217 storecollapsemsg(repo, rbsrt.collapsemsg)
1218
1218
1219 tr = None
1219 tr = None
1220
1220
1221 singletr = ui.configbool(b'rebase', b'singletransaction')
1221 singletr = ui.configbool(b'rebase', b'singletransaction')
1222 if singletr:
1222 if singletr:
1223 tr = repo.transaction(b'rebase')
1223 tr = repo.transaction(b'rebase')
1224
1224
1225 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1225 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1226 # one transaction here. Otherwise, transactions are obtained when
1226 # one transaction here. Otherwise, transactions are obtained when
1227 # committing each node, which is slower but allows partial success.
1227 # committing each node, which is slower but allows partial success.
1228 with util.acceptintervention(tr):
1228 with util.acceptintervention(tr):
1229 # Same logic for the dirstate guard, except we don't create one when
1229 # Same logic for the dirstate guard, except we don't create one when
1230 # rebasing in-memory (it's not needed).
1230 # rebasing in-memory (it's not needed).
1231 dsguard = None
1231 dsguard = None
1232 if singletr and not inmemory:
1232 if singletr and not inmemory:
1233 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1233 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1234 with util.acceptintervention(dsguard):
1234 with util.acceptintervention(dsguard):
1235 rbsrt._performrebase(tr)
1235 rbsrt._performrebase(tr)
1236 if not leaveunfinished:
1236 if not leaveunfinished:
1237 rbsrt._finishrebase()
1237 rbsrt._finishrebase()
1238
1238
1239
1239
1240 def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace):
1240 def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace):
1241 """use revisions argument to define destmap {srcrev: destrev}"""
1241 """use revisions argument to define destmap {srcrev: destrev}"""
1242 if revf is None:
1242 if revf is None:
1243 revf = []
1243 revf = []
1244
1244
1245 # destspace is here to work around issues with `hg pull --rebase` see
1245 # destspace is here to work around issues with `hg pull --rebase` see
1246 # issue5214 for details
1246 # issue5214 for details
1247
1247
1248 cmdutil.checkunfinished(repo)
1248 cmdutil.checkunfinished(repo)
1249 if not inmemory:
1249 if not inmemory:
1250 cmdutil.bailifchanged(repo)
1250 cmdutil.bailifchanged(repo)
1251
1251
1252 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1252 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1253 raise error.Abort(
1253 raise error.Abort(
1254 _(b'you must specify a destination'),
1254 _(b'you must specify a destination'),
1255 hint=_(b'use: hg rebase -d REV'),
1255 hint=_(b'use: hg rebase -d REV'),
1256 )
1256 )
1257
1257
1258 dest = None
1258 dest = None
1259
1259
1260 if revf:
1260 if revf:
1261 rebaseset = scmutil.revrange(repo, revf)
1261 rebaseset = scmutil.revrange(repo, revf)
1262 if not rebaseset:
1262 if not rebaseset:
1263 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1263 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1264 return None
1264 return None
1265 elif srcf:
1265 elif srcf:
1266 src = scmutil.revrange(repo, srcf)
1266 src = scmutil.revrange(repo, srcf)
1267 if not src:
1267 if not src:
1268 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1268 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1269 return None
1269 return None
1270 # `+ (%ld)` to work around `wdir()::` being empty
1270 # `+ (%ld)` to work around `wdir()::` being empty
1271 rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src)
1271 rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src)
1272 else:
1272 else:
1273 base = scmutil.revrange(repo, basef or [b'.'])
1273 base = scmutil.revrange(repo, basef or [b'.'])
1274 if not base:
1274 if not base:
1275 ui.status(
1275 ui.status(
1276 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1276 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1277 )
1277 )
1278 return None
1278 return None
1279 if destf:
1279 if destf:
1280 # --base does not support multiple destinations
1280 # --base does not support multiple destinations
1281 dest = scmutil.revsingle(repo, destf)
1281 dest = scmutil.revsingle(repo, destf)
1282 else:
1282 else:
1283 dest = repo[_destrebase(repo, base, destspace=destspace)]
1283 dest = repo[_destrebase(repo, base, destspace=destspace)]
1284 destf = bytes(dest)
1284 destf = bytes(dest)
1285
1285
1286 roots = [] # selected children of branching points
1286 roots = [] # selected children of branching points
1287 bpbase = {} # {branchingpoint: [origbase]}
1287 bpbase = {} # {branchingpoint: [origbase]}
1288 for b in base: # group bases by branching points
1288 for b in base: # group bases by branching points
1289 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1289 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1290 bpbase[bp] = bpbase.get(bp, []) + [b]
1290 bpbase[bp] = bpbase.get(bp, []) + [b]
1291 if None in bpbase:
1291 if None in bpbase:
1292 # emulate the old behavior, showing "nothing to rebase" (a better
1292 # emulate the old behavior, showing "nothing to rebase" (a better
1293 # behavior may be abort with "cannot find branching point" error)
1293 # behavior may be abort with "cannot find branching point" error)
1294 bpbase.clear()
1294 bpbase.clear()
1295 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1295 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1296 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1296 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1297
1297
1298 rebaseset = repo.revs(b'%ld::', roots)
1298 rebaseset = repo.revs(b'%ld::', roots)
1299
1299
1300 if not rebaseset:
1300 if not rebaseset:
1301 # transform to list because smartsets are not comparable to
1301 # transform to list because smartsets are not comparable to
1302 # lists. This should be improved to honor laziness of
1302 # lists. This should be improved to honor laziness of
1303 # smartset.
1303 # smartset.
1304 if list(base) == [dest.rev()]:
1304 if list(base) == [dest.rev()]:
1305 if basef:
1305 if basef:
1306 ui.status(
1306 ui.status(
1307 _(
1307 _(
1308 b'nothing to rebase - %s is both "base"'
1308 b'nothing to rebase - %s is both "base"'
1309 b' and destination\n'
1309 b' and destination\n'
1310 )
1310 )
1311 % dest
1311 % dest
1312 )
1312 )
1313 else:
1313 else:
1314 ui.status(
1314 ui.status(
1315 _(
1315 _(
1316 b'nothing to rebase - working directory '
1316 b'nothing to rebase - working directory '
1317 b'parent is also destination\n'
1317 b'parent is also destination\n'
1318 )
1318 )
1319 )
1319 )
1320 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1320 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1321 if basef:
1321 if basef:
1322 ui.status(
1322 ui.status(
1323 _(
1323 _(
1324 b'nothing to rebase - "base" %s is '
1324 b'nothing to rebase - "base" %s is '
1325 b'already an ancestor of destination '
1325 b'already an ancestor of destination '
1326 b'%s\n'
1326 b'%s\n'
1327 )
1327 )
1328 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1328 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1329 )
1329 )
1330 else:
1330 else:
1331 ui.status(
1331 ui.status(
1332 _(
1332 _(
1333 b'nothing to rebase - working '
1333 b'nothing to rebase - working '
1334 b'directory parent is already an '
1334 b'directory parent is already an '
1335 b'ancestor of destination %s\n'
1335 b'ancestor of destination %s\n'
1336 )
1336 )
1337 % dest
1337 % dest
1338 )
1338 )
1339 else: # can it happen?
1339 else: # can it happen?
1340 ui.status(
1340 ui.status(
1341 _(b'nothing to rebase from %s to %s\n')
1341 _(b'nothing to rebase from %s to %s\n')
1342 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1342 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1343 )
1343 )
1344 return None
1344 return None
1345
1345
1346 if nodemod.wdirrev in rebaseset:
1346 if nodemod.wdirrev in rebaseset:
1347 raise error.Abort(_(b'cannot rebase the working copy'))
1347 raise error.Abort(_(b'cannot rebase the working copy'))
1348 rebasingwcp = repo[b'.'].rev() in rebaseset
1348 rebasingwcp = repo[b'.'].rev() in rebaseset
1349 ui.log(
1349 ui.log(
1350 b"rebase",
1350 b"rebase",
1351 b"rebasing working copy parent: %r\n",
1351 b"rebasing working copy parent: %r\n",
1352 rebasingwcp,
1352 rebasingwcp,
1353 rebase_rebasing_wcp=rebasingwcp,
1353 rebase_rebasing_wcp=rebasingwcp,
1354 )
1354 )
1355 if inmemory and rebasingwcp:
1355 if inmemory and rebasingwcp:
1356 # Check these since we did not before.
1356 # Check these since we did not before.
1357 cmdutil.checkunfinished(repo)
1357 cmdutil.checkunfinished(repo)
1358 cmdutil.bailifchanged(repo)
1358 cmdutil.bailifchanged(repo)
1359
1359
1360 if not destf:
1360 if not destf:
1361 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1361 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1362 destf = bytes(dest)
1362 destf = bytes(dest)
1363
1363
1364 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1364 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1365 alias = {b'ALLSRC': allsrc}
1365 alias = {b'ALLSRC': allsrc}
1366
1366
1367 if dest is None:
1367 if dest is None:
1368 try:
1368 try:
1369 # fast path: try to resolve dest without SRC alias
1369 # fast path: try to resolve dest without SRC alias
1370 dest = scmutil.revsingle(repo, destf, localalias=alias)
1370 dest = scmutil.revsingle(repo, destf, localalias=alias)
1371 except error.RepoLookupError:
1371 except error.RepoLookupError:
1372 # multi-dest path: resolve dest for each SRC separately
1372 # multi-dest path: resolve dest for each SRC separately
1373 destmap = {}
1373 destmap = {}
1374 for r in rebaseset:
1374 for r in rebaseset:
1375 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1375 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1376 # use repo.anyrevs instead of scmutil.revsingle because we
1376 # use repo.anyrevs instead of scmutil.revsingle because we
1377 # don't want to abort if destset is empty.
1377 # don't want to abort if destset is empty.
1378 destset = repo.anyrevs([destf], user=True, localalias=alias)
1378 destset = repo.anyrevs([destf], user=True, localalias=alias)
1379 size = len(destset)
1379 size = len(destset)
1380 if size == 1:
1380 if size == 1:
1381 destmap[r] = destset.first()
1381 destmap[r] = destset.first()
1382 elif size == 0:
1382 elif size == 0:
1383 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1383 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1384 else:
1384 else:
1385 raise error.Abort(
1385 raise error.Abort(
1386 _(b'rebase destination for %s is not unique') % repo[r]
1386 _(b'rebase destination for %s is not unique') % repo[r]
1387 )
1387 )
1388
1388
1389 if dest is not None:
1389 if dest is not None:
1390 # single-dest case: assign dest to each rev in rebaseset
1390 # single-dest case: assign dest to each rev in rebaseset
1391 destrev = dest.rev()
1391 destrev = dest.rev()
1392 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1392 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1393
1393
1394 if not destmap:
1394 if not destmap:
1395 ui.status(_(b'nothing to rebase - empty destination\n'))
1395 ui.status(_(b'nothing to rebase - empty destination\n'))
1396 return None
1396 return None
1397
1397
1398 return destmap
1398 return destmap
1399
1399
1400
1400
1401 def externalparent(repo, state, destancestors):
1401 def externalparent(repo, state, destancestors):
1402 """Return the revision that should be used as the second parent
1402 """Return the revision that should be used as the second parent
1403 when the revisions in state is collapsed on top of destancestors.
1403 when the revisions in state is collapsed on top of destancestors.
1404 Abort if there is more than one parent.
1404 Abort if there is more than one parent.
1405 """
1405 """
1406 parents = set()
1406 parents = set()
1407 source = min(state)
1407 source = min(state)
1408 for rev in state:
1408 for rev in state:
1409 if rev == source:
1409 if rev == source:
1410 continue
1410 continue
1411 for p in repo[rev].parents():
1411 for p in repo[rev].parents():
1412 if p.rev() not in state and p.rev() not in destancestors:
1412 if p.rev() not in state and p.rev() not in destancestors:
1413 parents.add(p.rev())
1413 parents.add(p.rev())
1414 if not parents:
1414 if not parents:
1415 return nullrev
1415 return nullrev
1416 if len(parents) == 1:
1416 if len(parents) == 1:
1417 return parents.pop()
1417 return parents.pop()
1418 raise error.Abort(
1418 raise error.Abort(
1419 _(
1419 _(
1420 b'unable to collapse on top of %d, there is more '
1420 b'unable to collapse on top of %d, there is more '
1421 b'than one external parent: %s'
1421 b'than one external parent: %s'
1422 )
1422 )
1423 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1423 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1424 )
1424 )
1425
1425
1426
1426
1427 def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
1427 def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
1428 '''Commit the memory changes with parents p1 and p2.
1428 '''Commit the memory changes with parents p1 and p2.
1429 Return node of committed revision.'''
1429 Return node of committed revision.'''
1430 # Replicates the empty check in ``repo.commit``.
1430 # FIXME: make empty commit check consistent with ``repo.commit``
1431 if wctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1431 if wctx.nofilechanges() and not repo.ui.configbool(
1432 b'ui', b'allowemptycommit'
1433 ):
1432 return None
1434 return None
1433
1435
1434 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1436 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1435 # ``branch`` (used when passing ``--keepbranches``).
1437 # ``branch`` (used when passing ``--keepbranches``).
1436 branch = None
1438 branch = None
1437 if b'branch' in extra:
1439 if b'branch' in extra:
1438 branch = extra[b'branch']
1440 branch = extra[b'branch']
1439
1441
1440 memctx = wctx.tomemctx(
1442 memctx = wctx.tomemctx(
1441 commitmsg,
1443 commitmsg,
1442 date=date,
1444 date=date,
1443 extra=extra,
1445 extra=extra,
1444 user=user,
1446 user=user,
1445 branch=branch,
1447 branch=branch,
1446 editor=editor,
1448 editor=editor,
1447 )
1449 )
1448 commitres = repo.commitctx(memctx)
1450 commitres = repo.commitctx(memctx)
1449 wctx.clean() # Might be reused
1451 wctx.clean() # Might be reused
1450 return commitres
1452 return commitres
1451
1453
1452
1454
1453 def commitnode(repo, editor, extra, user, date, commitmsg):
1455 def commitnode(repo, editor, extra, user, date, commitmsg):
1454 '''Commit the wd changes with parents p1 and p2.
1456 '''Commit the wd changes with parents p1 and p2.
1455 Return node of committed revision.'''
1457 Return node of committed revision.'''
1456 dsguard = util.nullcontextmanager()
1458 dsguard = util.nullcontextmanager()
1457 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1459 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1458 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1460 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1459 with dsguard:
1461 with dsguard:
1460 # Commit might fail if unresolved files exist
1462 # Commit might fail if unresolved files exist
1461 newnode = repo.commit(
1463 newnode = repo.commit(
1462 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1464 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1463 )
1465 )
1464
1466
1465 repo.dirstate.setbranch(repo[newnode].branch())
1467 repo.dirstate.setbranch(repo[newnode].branch())
1466 return newnode
1468 return newnode
1467
1469
1468
1470
1469 def rebasenode(repo, rev, p1, p2, base, collapse, dest, wctx):
1471 def rebasenode(repo, rev, p1, p2, base, collapse, dest, wctx):
1470 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1472 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1471 # Merge phase
1473 # Merge phase
1472 # Update to destination and merge it with local
1474 # Update to destination and merge it with local
1473 p1ctx = repo[p1]
1475 p1ctx = repo[p1]
1474 if wctx.isinmemory():
1476 if wctx.isinmemory():
1475 wctx.setbase(p1ctx)
1477 wctx.setbase(p1ctx)
1476 else:
1478 else:
1477 if repo[b'.'].rev() != p1:
1479 if repo[b'.'].rev() != p1:
1478 repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
1480 repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
1479 mergemod.clean_update(p1ctx)
1481 mergemod.clean_update(p1ctx)
1480 else:
1482 else:
1481 repo.ui.debug(b" already in destination\n")
1483 repo.ui.debug(b" already in destination\n")
1482 # This is, alas, necessary to invalidate workingctx's manifest cache,
1484 # This is, alas, necessary to invalidate workingctx's manifest cache,
1483 # as well as other data we litter on it in other places.
1485 # as well as other data we litter on it in other places.
1484 wctx = repo[None]
1486 wctx = repo[None]
1485 repo.dirstate.write(repo.currenttransaction())
1487 repo.dirstate.write(repo.currenttransaction())
1486 ctx = repo[rev]
1488 ctx = repo[rev]
1487 repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx))
1489 repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx))
1488 if base is not None:
1490 if base is not None:
1489 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1491 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1490
1492
1491 # See explanation in merge.graft()
1493 # See explanation in merge.graft()
1492 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1494 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1493 stats = mergemod.update(
1495 stats = mergemod.update(
1494 repo,
1496 repo,
1495 rev,
1497 rev,
1496 branchmerge=True,
1498 branchmerge=True,
1497 force=True,
1499 force=True,
1498 ancestor=base,
1500 ancestor=base,
1499 mergeancestor=mergeancestor,
1501 mergeancestor=mergeancestor,
1500 labels=[b'dest', b'source'],
1502 labels=[b'dest', b'source'],
1501 wc=wctx,
1503 wc=wctx,
1502 )
1504 )
1503 wctx.setparents(p1ctx.node(), repo[p2].node())
1505 wctx.setparents(p1ctx.node(), repo[p2].node())
1504 if collapse:
1506 if collapse:
1505 copies.graftcopies(wctx, ctx, repo[dest])
1507 copies.graftcopies(wctx, ctx, repo[dest])
1506 else:
1508 else:
1507 # If we're not using --collapse, we need to
1509 # If we're not using --collapse, we need to
1508 # duplicate copies between the revision we're
1510 # duplicate copies between the revision we're
1509 # rebasing and its first parent.
1511 # rebasing and its first parent.
1510 copies.graftcopies(wctx, ctx, ctx.p1())
1512 copies.graftcopies(wctx, ctx, ctx.p1())
1511 return stats
1513 return stats
1512
1514
1513
1515
1514 def adjustdest(repo, rev, destmap, state, skipped):
1516 def adjustdest(repo, rev, destmap, state, skipped):
1515 r"""adjust rebase destination given the current rebase state
1517 r"""adjust rebase destination given the current rebase state
1516
1518
1517 rev is what is being rebased. Return a list of two revs, which are the
1519 rev is what is being rebased. Return a list of two revs, which are the
1518 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1520 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1519 nullrev, return dest without adjustment for it.
1521 nullrev, return dest without adjustment for it.
1520
1522
1521 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1523 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1522 to B1, and E's destination will be adjusted from F to B1.
1524 to B1, and E's destination will be adjusted from F to B1.
1523
1525
1524 B1 <- written during rebasing B
1526 B1 <- written during rebasing B
1525 |
1527 |
1526 F <- original destination of B, E
1528 F <- original destination of B, E
1527 |
1529 |
1528 | E <- rev, which is being rebased
1530 | E <- rev, which is being rebased
1529 | |
1531 | |
1530 | D <- prev, one parent of rev being checked
1532 | D <- prev, one parent of rev being checked
1531 | |
1533 | |
1532 | x <- skipped, ex. no successor or successor in (::dest)
1534 | x <- skipped, ex. no successor or successor in (::dest)
1533 | |
1535 | |
1534 | C <- rebased as C', different destination
1536 | C <- rebased as C', different destination
1535 | |
1537 | |
1536 | B <- rebased as B1 C'
1538 | B <- rebased as B1 C'
1537 |/ |
1539 |/ |
1538 A G <- destination of C, different
1540 A G <- destination of C, different
1539
1541
1540 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1542 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1541 first move C to C1, G to G1, and when it's checking H, the adjusted
1543 first move C to C1, G to G1, and when it's checking H, the adjusted
1542 destinations will be [C1, G1].
1544 destinations will be [C1, G1].
1543
1545
1544 H C1 G1
1546 H C1 G1
1545 /| | /
1547 /| | /
1546 F G |/
1548 F G |/
1547 K | | -> K
1549 K | | -> K
1548 | C D |
1550 | C D |
1549 | |/ |
1551 | |/ |
1550 | B | ...
1552 | B | ...
1551 |/ |/
1553 |/ |/
1552 A A
1554 A A
1553
1555
1554 Besides, adjust dest according to existing rebase information. For example,
1556 Besides, adjust dest according to existing rebase information. For example,
1555
1557
1556 B C D B needs to be rebased on top of C, C needs to be rebased on top
1558 B C D B needs to be rebased on top of C, C needs to be rebased on top
1557 \|/ of D. We will rebase C first.
1559 \|/ of D. We will rebase C first.
1558 A
1560 A
1559
1561
1560 C' After rebasing C, when considering B's destination, use C'
1562 C' After rebasing C, when considering B's destination, use C'
1561 | instead of the original C.
1563 | instead of the original C.
1562 B D
1564 B D
1563 \ /
1565 \ /
1564 A
1566 A
1565 """
1567 """
1566 # pick already rebased revs with same dest from state as interesting source
1568 # pick already rebased revs with same dest from state as interesting source
1567 dest = destmap[rev]
1569 dest = destmap[rev]
1568 source = [
1570 source = [
1569 s
1571 s
1570 for s, d in state.items()
1572 for s, d in state.items()
1571 if d > 0 and destmap[s] == dest and s not in skipped
1573 if d > 0 and destmap[s] == dest and s not in skipped
1572 ]
1574 ]
1573
1575
1574 result = []
1576 result = []
1575 for prev in repo.changelog.parentrevs(rev):
1577 for prev in repo.changelog.parentrevs(rev):
1576 adjusted = dest
1578 adjusted = dest
1577 if prev != nullrev:
1579 if prev != nullrev:
1578 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1580 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1579 if candidate is not None:
1581 if candidate is not None:
1580 adjusted = state[candidate]
1582 adjusted = state[candidate]
1581 if adjusted == dest and dest in state:
1583 if adjusted == dest and dest in state:
1582 adjusted = state[dest]
1584 adjusted = state[dest]
1583 if adjusted == revtodo:
1585 if adjusted == revtodo:
1584 # sortsource should produce an order that makes this impossible
1586 # sortsource should produce an order that makes this impossible
1585 raise error.ProgrammingError(
1587 raise error.ProgrammingError(
1586 b'rev %d should be rebased already at this time' % dest
1588 b'rev %d should be rebased already at this time' % dest
1587 )
1589 )
1588 result.append(adjusted)
1590 result.append(adjusted)
1589 return result
1591 return result
1590
1592
1591
1593
1592 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1594 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1593 """
1595 """
1594 Abort if rebase will create divergence or rebase is noop because of markers
1596 Abort if rebase will create divergence or rebase is noop because of markers
1595
1597
1596 `rebaseobsrevs`: set of obsolete revision in source
1598 `rebaseobsrevs`: set of obsolete revision in source
1597 `rebaseobsskipped`: set of revisions from source skipped because they have
1599 `rebaseobsskipped`: set of revisions from source skipped because they have
1598 successors in destination or no non-obsolete successor.
1600 successors in destination or no non-obsolete successor.
1599 """
1601 """
1600 # Obsolete node with successors not in dest leads to divergence
1602 # Obsolete node with successors not in dest leads to divergence
1601 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1603 divergenceok = ui.configbool(b'experimental', b'evolution.allowdivergence')
1602 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1604 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1603
1605
1604 if divergencebasecandidates and not divergenceok:
1606 if divergencebasecandidates and not divergenceok:
1605 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1607 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1606 msg = _(b"this rebase will cause divergences from: %s")
1608 msg = _(b"this rebase will cause divergences from: %s")
1607 h = _(
1609 h = _(
1608 b"to force the rebase please set "
1610 b"to force the rebase please set "
1609 b"experimental.evolution.allowdivergence=True"
1611 b"experimental.evolution.allowdivergence=True"
1610 )
1612 )
1611 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1613 raise error.Abort(msg % (b",".join(divhashes),), hint=h)
1612
1614
1613
1615
1614 def successorrevs(unfi, rev):
1616 def successorrevs(unfi, rev):
1615 """yield revision numbers for successors of rev"""
1617 """yield revision numbers for successors of rev"""
1616 assert unfi.filtername is None
1618 assert unfi.filtername is None
1617 get_rev = unfi.changelog.index.get_rev
1619 get_rev = unfi.changelog.index.get_rev
1618 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1620 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1619 r = get_rev(s)
1621 r = get_rev(s)
1620 if r is not None:
1622 if r is not None:
1621 yield r
1623 yield r
1622
1624
1623
1625
1624 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1626 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1625 """Return new parents and optionally a merge base for rev being rebased
1627 """Return new parents and optionally a merge base for rev being rebased
1626
1628
1627 The destination specified by "dest" cannot always be used directly because
1629 The destination specified by "dest" cannot always be used directly because
1628 previously rebase result could affect destination. For example,
1630 previously rebase result could affect destination. For example,
1629
1631
1630 D E rebase -r C+D+E -d B
1632 D E rebase -r C+D+E -d B
1631 |/ C will be rebased to C'
1633 |/ C will be rebased to C'
1632 B C D's new destination will be C' instead of B
1634 B C D's new destination will be C' instead of B
1633 |/ E's new destination will be C' instead of B
1635 |/ E's new destination will be C' instead of B
1634 A
1636 A
1635
1637
1636 The new parents of a merge is slightly more complicated. See the comment
1638 The new parents of a merge is slightly more complicated. See the comment
1637 block below.
1639 block below.
1638 """
1640 """
1639 # use unfiltered changelog since successorrevs may return filtered nodes
1641 # use unfiltered changelog since successorrevs may return filtered nodes
1640 assert repo.filtername is None
1642 assert repo.filtername is None
1641 cl = repo.changelog
1643 cl = repo.changelog
1642 isancestor = cl.isancestorrev
1644 isancestor = cl.isancestorrev
1643
1645
1644 dest = destmap[rev]
1646 dest = destmap[rev]
1645 oldps = repo.changelog.parentrevs(rev) # old parents
1647 oldps = repo.changelog.parentrevs(rev) # old parents
1646 newps = [nullrev, nullrev] # new parents
1648 newps = [nullrev, nullrev] # new parents
1647 dests = adjustdest(repo, rev, destmap, state, skipped)
1649 dests = adjustdest(repo, rev, destmap, state, skipped)
1648 bases = list(oldps) # merge base candidates, initially just old parents
1650 bases = list(oldps) # merge base candidates, initially just old parents
1649
1651
1650 if all(r == nullrev for r in oldps[1:]):
1652 if all(r == nullrev for r in oldps[1:]):
1651 # For non-merge changeset, just move p to adjusted dest as requested.
1653 # For non-merge changeset, just move p to adjusted dest as requested.
1652 newps[0] = dests[0]
1654 newps[0] = dests[0]
1653 else:
1655 else:
1654 # For merge changeset, if we move p to dests[i] unconditionally, both
1656 # For merge changeset, if we move p to dests[i] unconditionally, both
1655 # parents may change and the end result looks like "the merge loses a
1657 # parents may change and the end result looks like "the merge loses a
1656 # parent", which is a surprise. This is a limit because "--dest" only
1658 # parent", which is a surprise. This is a limit because "--dest" only
1657 # accepts one dest per src.
1659 # accepts one dest per src.
1658 #
1660 #
1659 # Therefore, only move p with reasonable conditions (in this order):
1661 # Therefore, only move p with reasonable conditions (in this order):
1660 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1662 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1661 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1663 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1662 #
1664 #
1663 # Comparing with adjustdest, the logic here does some additional work:
1665 # Comparing with adjustdest, the logic here does some additional work:
1664 # 1. decide which parents will not be moved towards dest
1666 # 1. decide which parents will not be moved towards dest
1665 # 2. if the above decision is "no", should a parent still be moved
1667 # 2. if the above decision is "no", should a parent still be moved
1666 # because it was rebased?
1668 # because it was rebased?
1667 #
1669 #
1668 # For example:
1670 # For example:
1669 #
1671 #
1670 # C # "rebase -r C -d D" is an error since none of the parents
1672 # C # "rebase -r C -d D" is an error since none of the parents
1671 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1673 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1672 # A B D # B (using rule "2."), since B will be rebased.
1674 # A B D # B (using rule "2."), since B will be rebased.
1673 #
1675 #
1674 # The loop tries to be not rely on the fact that a Mercurial node has
1676 # The loop tries to be not rely on the fact that a Mercurial node has
1675 # at most 2 parents.
1677 # at most 2 parents.
1676 for i, p in enumerate(oldps):
1678 for i, p in enumerate(oldps):
1677 np = p # new parent
1679 np = p # new parent
1678 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1680 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1679 np = dests[i]
1681 np = dests[i]
1680 elif p in state and state[p] > 0:
1682 elif p in state and state[p] > 0:
1681 np = state[p]
1683 np = state[p]
1682
1684
1683 # If one parent becomes an ancestor of the other, drop the ancestor
1685 # If one parent becomes an ancestor of the other, drop the ancestor
1684 for j, x in enumerate(newps[:i]):
1686 for j, x in enumerate(newps[:i]):
1685 if x == nullrev:
1687 if x == nullrev:
1686 continue
1688 continue
1687 if isancestor(np, x): # CASE-1
1689 if isancestor(np, x): # CASE-1
1688 np = nullrev
1690 np = nullrev
1689 elif isancestor(x, np): # CASE-2
1691 elif isancestor(x, np): # CASE-2
1690 newps[j] = np
1692 newps[j] = np
1691 np = nullrev
1693 np = nullrev
1692 # New parents forming an ancestor relationship does not
1694 # New parents forming an ancestor relationship does not
1693 # mean the old parents have a similar relationship. Do not
1695 # mean the old parents have a similar relationship. Do not
1694 # set bases[x] to nullrev.
1696 # set bases[x] to nullrev.
1695 bases[j], bases[i] = bases[i], bases[j]
1697 bases[j], bases[i] = bases[i], bases[j]
1696
1698
1697 newps[i] = np
1699 newps[i] = np
1698
1700
1699 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1701 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1700 # base. If only p2 changes, merging using unchanged p1 as merge base is
1702 # base. If only p2 changes, merging using unchanged p1 as merge base is
1701 # suboptimal. Therefore swap parents to make the merge sane.
1703 # suboptimal. Therefore swap parents to make the merge sane.
1702 if newps[1] != nullrev and oldps[0] == newps[0]:
1704 if newps[1] != nullrev and oldps[0] == newps[0]:
1703 assert len(newps) == 2 and len(oldps) == 2
1705 assert len(newps) == 2 and len(oldps) == 2
1704 newps.reverse()
1706 newps.reverse()
1705 bases.reverse()
1707 bases.reverse()
1706
1708
1707 # No parent change might be an error because we fail to make rev a
1709 # No parent change might be an error because we fail to make rev a
1708 # descendent of requested dest. This can happen, for example:
1710 # descendent of requested dest. This can happen, for example:
1709 #
1711 #
1710 # C # rebase -r C -d D
1712 # C # rebase -r C -d D
1711 # /| # None of A and B will be changed to D and rebase fails.
1713 # /| # None of A and B will be changed to D and rebase fails.
1712 # A B D
1714 # A B D
1713 if set(newps) == set(oldps) and dest not in newps:
1715 if set(newps) == set(oldps) and dest not in newps:
1714 raise error.Abort(
1716 raise error.Abort(
1715 _(
1717 _(
1716 b'cannot rebase %d:%s without '
1718 b'cannot rebase %d:%s without '
1717 b'moving at least one of its parents'
1719 b'moving at least one of its parents'
1718 )
1720 )
1719 % (rev, repo[rev])
1721 % (rev, repo[rev])
1720 )
1722 )
1721
1723
1722 # Source should not be ancestor of dest. The check here guarantees it's
1724 # Source should not be ancestor of dest. The check here guarantees it's
1723 # impossible. With multi-dest, the initial check does not cover complex
1725 # impossible. With multi-dest, the initial check does not cover complex
1724 # cases since we don't have abstractions to dry-run rebase cheaply.
1726 # cases since we don't have abstractions to dry-run rebase cheaply.
1725 if any(p != nullrev and isancestor(rev, p) for p in newps):
1727 if any(p != nullrev and isancestor(rev, p) for p in newps):
1726 raise error.Abort(_(b'source is ancestor of destination'))
1728 raise error.Abort(_(b'source is ancestor of destination'))
1727
1729
1728 # Check if the merge will contain unwanted changes. That may happen if
1730 # Check if the merge will contain unwanted changes. That may happen if
1729 # there are multiple special (non-changelog ancestor) merge bases, which
1731 # there are multiple special (non-changelog ancestor) merge bases, which
1730 # cannot be handled well by the 3-way merge algorithm. For example:
1732 # cannot be handled well by the 3-way merge algorithm. For example:
1731 #
1733 #
1732 # F
1734 # F
1733 # /|
1735 # /|
1734 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1736 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1735 # | | # as merge base, the difference between D and F will include
1737 # | | # as merge base, the difference between D and F will include
1736 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1738 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1737 # |/ # chosen, the rebased F will contain B.
1739 # |/ # chosen, the rebased F will contain B.
1738 # A Z
1740 # A Z
1739 #
1741 #
1740 # But our merge base candidates (D and E in above case) could still be
1742 # But our merge base candidates (D and E in above case) could still be
1741 # better than the default (ancestor(F, Z) == null). Therefore still
1743 # better than the default (ancestor(F, Z) == null). Therefore still
1742 # pick one (so choose p1 above).
1744 # pick one (so choose p1 above).
1743 if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1:
1745 if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1:
1744 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1746 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1745 for i, base in enumerate(bases):
1747 for i, base in enumerate(bases):
1746 if base == nullrev or base in newps:
1748 if base == nullrev or base in newps:
1747 continue
1749 continue
1748 # Revisions in the side (not chosen as merge base) branch that
1750 # Revisions in the side (not chosen as merge base) branch that
1749 # might contain "surprising" contents
1751 # might contain "surprising" contents
1750 other_bases = set(bases) - {base}
1752 other_bases = set(bases) - {base}
1751 siderevs = list(
1753 siderevs = list(
1752 repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
1754 repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
1753 )
1755 )
1754
1756
1755 # If those revisions are covered by rebaseset, the result is good.
1757 # If those revisions are covered by rebaseset, the result is good.
1756 # A merge in rebaseset would be considered to cover its ancestors.
1758 # A merge in rebaseset would be considered to cover its ancestors.
1757 if siderevs:
1759 if siderevs:
1758 rebaseset = [
1760 rebaseset = [
1759 r for r, d in state.items() if d > 0 and r not in obsskipped
1761 r for r, d in state.items() if d > 0 and r not in obsskipped
1760 ]
1762 ]
1761 merges = [
1763 merges = [
1762 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1764 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1763 ]
1765 ]
1764 unwanted[i] = list(
1766 unwanted[i] = list(
1765 repo.revs(
1767 repo.revs(
1766 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1768 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1767 )
1769 )
1768 )
1770 )
1769
1771
1770 if any(revs is not None for revs in unwanted):
1772 if any(revs is not None for revs in unwanted):
1771 # Choose a merge base that has a minimal number of unwanted revs.
1773 # Choose a merge base that has a minimal number of unwanted revs.
1772 l, i = min(
1774 l, i = min(
1773 (len(revs), i)
1775 (len(revs), i)
1774 for i, revs in enumerate(unwanted)
1776 for i, revs in enumerate(unwanted)
1775 if revs is not None
1777 if revs is not None
1776 )
1778 )
1777
1779
1778 # The merge will include unwanted revisions. Abort now. Revisit this if
1780 # The merge will include unwanted revisions. Abort now. Revisit this if
1779 # we have a more advanced merge algorithm that handles multiple bases.
1781 # we have a more advanced merge algorithm that handles multiple bases.
1780 if l > 0:
1782 if l > 0:
1781 unwanteddesc = _(b' or ').join(
1783 unwanteddesc = _(b' or ').join(
1782 (
1784 (
1783 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1785 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1784 for revs in unwanted
1786 for revs in unwanted
1785 if revs is not None
1787 if revs is not None
1786 )
1788 )
1787 )
1789 )
1788 raise error.Abort(
1790 raise error.Abort(
1789 _(b'rebasing %d:%s will include unwanted changes from %s')
1791 _(b'rebasing %d:%s will include unwanted changes from %s')
1790 % (rev, repo[rev], unwanteddesc)
1792 % (rev, repo[rev], unwanteddesc)
1791 )
1793 )
1792
1794
1793 # newps[0] should match merge base if possible. Currently, if newps[i]
1795 # newps[0] should match merge base if possible. Currently, if newps[i]
1794 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1796 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1795 # the other's ancestor. In that case, it's fine to not swap newps here.
1797 # the other's ancestor. In that case, it's fine to not swap newps here.
1796 # (see CASE-1 and CASE-2 above)
1798 # (see CASE-1 and CASE-2 above)
1797 if i != 0:
1799 if i != 0:
1798 if newps[i] != nullrev:
1800 if newps[i] != nullrev:
1799 newps[0], newps[i] = newps[i], newps[0]
1801 newps[0], newps[i] = newps[i], newps[0]
1800 bases[0], bases[i] = bases[i], bases[0]
1802 bases[0], bases[i] = bases[i], bases[0]
1801
1803
1802 # "rebasenode" updates to new p1, use the corresponding merge base.
1804 # "rebasenode" updates to new p1, use the corresponding merge base.
1803 base = bases[0]
1805 base = bases[0]
1804
1806
1805 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1807 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1806
1808
1807 return newps[0], newps[1], base
1809 return newps[0], newps[1], base
1808
1810
1809
1811
1810 def isagitpatch(repo, patchname):
1812 def isagitpatch(repo, patchname):
1811 """Return true if the given patch is in git format"""
1813 """Return true if the given patch is in git format"""
1812 mqpatch = os.path.join(repo.mq.path, patchname)
1814 mqpatch = os.path.join(repo.mq.path, patchname)
1813 for line in patch.linereader(open(mqpatch, b'rb')):
1815 for line in patch.linereader(open(mqpatch, b'rb')):
1814 if line.startswith(b'diff --git'):
1816 if line.startswith(b'diff --git'):
1815 return True
1817 return True
1816 return False
1818 return False
1817
1819
1818
1820
1819 def updatemq(repo, state, skipped, **opts):
1821 def updatemq(repo, state, skipped, **opts):
1820 """Update rebased mq patches - finalize and then import them"""
1822 """Update rebased mq patches - finalize and then import them"""
1821 mqrebase = {}
1823 mqrebase = {}
1822 mq = repo.mq
1824 mq = repo.mq
1823 original_series = mq.fullseries[:]
1825 original_series = mq.fullseries[:]
1824 skippedpatches = set()
1826 skippedpatches = set()
1825
1827
1826 for p in mq.applied:
1828 for p in mq.applied:
1827 rev = repo[p.node].rev()
1829 rev = repo[p.node].rev()
1828 if rev in state:
1830 if rev in state:
1829 repo.ui.debug(
1831 repo.ui.debug(
1830 b'revision %d is an mq patch (%s), finalize it.\n'
1832 b'revision %d is an mq patch (%s), finalize it.\n'
1831 % (rev, p.name)
1833 % (rev, p.name)
1832 )
1834 )
1833 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1835 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1834 else:
1836 else:
1835 # Applied but not rebased, not sure this should happen
1837 # Applied but not rebased, not sure this should happen
1836 skippedpatches.add(p.name)
1838 skippedpatches.add(p.name)
1837
1839
1838 if mqrebase:
1840 if mqrebase:
1839 mq.finish(repo, mqrebase.keys())
1841 mq.finish(repo, mqrebase.keys())
1840
1842
1841 # We must start import from the newest revision
1843 # We must start import from the newest revision
1842 for rev in sorted(mqrebase, reverse=True):
1844 for rev in sorted(mqrebase, reverse=True):
1843 if rev not in skipped:
1845 if rev not in skipped:
1844 name, isgit = mqrebase[rev]
1846 name, isgit = mqrebase[rev]
1845 repo.ui.note(
1847 repo.ui.note(
1846 _(b'updating mq patch %s to %d:%s\n')
1848 _(b'updating mq patch %s to %d:%s\n')
1847 % (name, state[rev], repo[state[rev]])
1849 % (name, state[rev], repo[state[rev]])
1848 )
1850 )
1849 mq.qimport(
1851 mq.qimport(
1850 repo,
1852 repo,
1851 (),
1853 (),
1852 patchname=name,
1854 patchname=name,
1853 git=isgit,
1855 git=isgit,
1854 rev=[b"%d" % state[rev]],
1856 rev=[b"%d" % state[rev]],
1855 )
1857 )
1856 else:
1858 else:
1857 # Rebased and skipped
1859 # Rebased and skipped
1858 skippedpatches.add(mqrebase[rev][0])
1860 skippedpatches.add(mqrebase[rev][0])
1859
1861
1860 # Patches were either applied and rebased and imported in
1862 # Patches were either applied and rebased and imported in
1861 # order, applied and removed or unapplied. Discard the removed
1863 # order, applied and removed or unapplied. Discard the removed
1862 # ones while preserving the original series order and guards.
1864 # ones while preserving the original series order and guards.
1863 newseries = [
1865 newseries = [
1864 s
1866 s
1865 for s in original_series
1867 for s in original_series
1866 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1868 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1867 ]
1869 ]
1868 mq.fullseries[:] = newseries
1870 mq.fullseries[:] = newseries
1869 mq.seriesdirty = True
1871 mq.seriesdirty = True
1870 mq.savedirty()
1872 mq.savedirty()
1871
1873
1872
1874
1873 def storecollapsemsg(repo, collapsemsg):
1875 def storecollapsemsg(repo, collapsemsg):
1874 """Store the collapse message to allow recovery"""
1876 """Store the collapse message to allow recovery"""
1875 collapsemsg = collapsemsg or b''
1877 collapsemsg = collapsemsg or b''
1876 f = repo.vfs(b"last-message.txt", b"w")
1878 f = repo.vfs(b"last-message.txt", b"w")
1877 f.write(b"%s\n" % collapsemsg)
1879 f.write(b"%s\n" % collapsemsg)
1878 f.close()
1880 f.close()
1879
1881
1880
1882
1881 def clearcollapsemsg(repo):
1883 def clearcollapsemsg(repo):
1882 """Remove collapse message file"""
1884 """Remove collapse message file"""
1883 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1885 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1884
1886
1885
1887
1886 def restorecollapsemsg(repo, isabort):
1888 def restorecollapsemsg(repo, isabort):
1887 """Restore previously stored collapse message"""
1889 """Restore previously stored collapse message"""
1888 try:
1890 try:
1889 f = repo.vfs(b"last-message.txt")
1891 f = repo.vfs(b"last-message.txt")
1890 collapsemsg = f.readline().strip()
1892 collapsemsg = f.readline().strip()
1891 f.close()
1893 f.close()
1892 except IOError as err:
1894 except IOError as err:
1893 if err.errno != errno.ENOENT:
1895 if err.errno != errno.ENOENT:
1894 raise
1896 raise
1895 if isabort:
1897 if isabort:
1896 # Oh well, just abort like normal
1898 # Oh well, just abort like normal
1897 collapsemsg = b''
1899 collapsemsg = b''
1898 else:
1900 else:
1899 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1901 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1900 return collapsemsg
1902 return collapsemsg
1901
1903
1902
1904
1903 def clearstatus(repo):
1905 def clearstatus(repo):
1904 """Remove the status files"""
1906 """Remove the status files"""
1905 # Make sure the active transaction won't write the state file
1907 # Make sure the active transaction won't write the state file
1906 tr = repo.currenttransaction()
1908 tr = repo.currenttransaction()
1907 if tr:
1909 if tr:
1908 tr.removefilegenerator(b'rebasestate')
1910 tr.removefilegenerator(b'rebasestate')
1909 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1911 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1910
1912
1911
1913
1912 def sortsource(destmap):
1914 def sortsource(destmap):
1913 """yield source revisions in an order that we only rebase things once
1915 """yield source revisions in an order that we only rebase things once
1914
1916
1915 If source and destination overlaps, we should filter out revisions
1917 If source and destination overlaps, we should filter out revisions
1916 depending on other revisions which hasn't been rebased yet.
1918 depending on other revisions which hasn't been rebased yet.
1917
1919
1918 Yield a sorted list of revisions each time.
1920 Yield a sorted list of revisions each time.
1919
1921
1920 For example, when rebasing A to B, B to C. This function yields [B], then
1922 For example, when rebasing A to B, B to C. This function yields [B], then
1921 [A], indicating B needs to be rebased first.
1923 [A], indicating B needs to be rebased first.
1922
1924
1923 Raise if there is a cycle so the rebase is impossible.
1925 Raise if there is a cycle so the rebase is impossible.
1924 """
1926 """
1925 srcset = set(destmap)
1927 srcset = set(destmap)
1926 while srcset:
1928 while srcset:
1927 srclist = sorted(srcset)
1929 srclist = sorted(srcset)
1928 result = []
1930 result = []
1929 for r in srclist:
1931 for r in srclist:
1930 if destmap[r] not in srcset:
1932 if destmap[r] not in srcset:
1931 result.append(r)
1933 result.append(r)
1932 if not result:
1934 if not result:
1933 raise error.Abort(_(b'source and destination form a cycle'))
1935 raise error.Abort(_(b'source and destination form a cycle'))
1934 srcset -= set(result)
1936 srcset -= set(result)
1935 yield result
1937 yield result
1936
1938
1937
1939
1938 def buildstate(repo, destmap, collapse):
1940 def buildstate(repo, destmap, collapse):
1939 '''Define which revisions are going to be rebased and where
1941 '''Define which revisions are going to be rebased and where
1940
1942
1941 repo: repo
1943 repo: repo
1942 destmap: {srcrev: destrev}
1944 destmap: {srcrev: destrev}
1943 '''
1945 '''
1944 rebaseset = destmap.keys()
1946 rebaseset = destmap.keys()
1945 originalwd = repo[b'.'].rev()
1947 originalwd = repo[b'.'].rev()
1946
1948
1947 # This check isn't strictly necessary, since mq detects commits over an
1949 # This check isn't strictly necessary, since mq detects commits over an
1948 # applied patch. But it prevents messing up the working directory when
1950 # applied patch. But it prevents messing up the working directory when
1949 # a partially completed rebase is blocked by mq.
1951 # a partially completed rebase is blocked by mq.
1950 if b'qtip' in repo.tags():
1952 if b'qtip' in repo.tags():
1951 mqapplied = {repo[s.node].rev() for s in repo.mq.applied}
1953 mqapplied = {repo[s.node].rev() for s in repo.mq.applied}
1952 if set(destmap.values()) & mqapplied:
1954 if set(destmap.values()) & mqapplied:
1953 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1955 raise error.Abort(_(b'cannot rebase onto an applied mq patch'))
1954
1956
1955 # Get "cycle" error early by exhausting the generator.
1957 # Get "cycle" error early by exhausting the generator.
1956 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1958 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1957 if not sortedsrc:
1959 if not sortedsrc:
1958 raise error.Abort(_(b'no matching revisions'))
1960 raise error.Abort(_(b'no matching revisions'))
1959
1961
1960 # Only check the first batch of revisions to rebase not depending on other
1962 # Only check the first batch of revisions to rebase not depending on other
1961 # rebaseset. This means "source is ancestor of destination" for the second
1963 # rebaseset. This means "source is ancestor of destination" for the second
1962 # (and following) batches of revisions are not checked here. We rely on
1964 # (and following) batches of revisions are not checked here. We rely on
1963 # "defineparents" to do that check.
1965 # "defineparents" to do that check.
1964 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1966 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
1965 if not roots:
1967 if not roots:
1966 raise error.Abort(_(b'no matching revisions'))
1968 raise error.Abort(_(b'no matching revisions'))
1967
1969
1968 def revof(r):
1970 def revof(r):
1969 return r.rev()
1971 return r.rev()
1970
1972
1971 roots = sorted(roots, key=revof)
1973 roots = sorted(roots, key=revof)
1972 state = dict.fromkeys(rebaseset, revtodo)
1974 state = dict.fromkeys(rebaseset, revtodo)
1973 emptyrebase = len(sortedsrc) == 1
1975 emptyrebase = len(sortedsrc) == 1
1974 for root in roots:
1976 for root in roots:
1975 dest = repo[destmap[root.rev()]]
1977 dest = repo[destmap[root.rev()]]
1976 commonbase = root.ancestor(dest)
1978 commonbase = root.ancestor(dest)
1977 if commonbase == root:
1979 if commonbase == root:
1978 raise error.Abort(_(b'source is ancestor of destination'))
1980 raise error.Abort(_(b'source is ancestor of destination'))
1979 if commonbase == dest:
1981 if commonbase == dest:
1980 wctx = repo[None]
1982 wctx = repo[None]
1981 if dest == wctx.p1():
1983 if dest == wctx.p1():
1982 # when rebasing to '.', it will use the current wd branch name
1984 # when rebasing to '.', it will use the current wd branch name
1983 samebranch = root.branch() == wctx.branch()
1985 samebranch = root.branch() == wctx.branch()
1984 else:
1986 else:
1985 samebranch = root.branch() == dest.branch()
1987 samebranch = root.branch() == dest.branch()
1986 if not collapse and samebranch and dest in root.parents():
1988 if not collapse and samebranch and dest in root.parents():
1987 # mark the revision as done by setting its new revision
1989 # mark the revision as done by setting its new revision
1988 # equal to its old (current) revisions
1990 # equal to its old (current) revisions
1989 state[root.rev()] = root.rev()
1991 state[root.rev()] = root.rev()
1990 repo.ui.debug(b'source is a child of destination\n')
1992 repo.ui.debug(b'source is a child of destination\n')
1991 continue
1993 continue
1992
1994
1993 emptyrebase = False
1995 emptyrebase = False
1994 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
1996 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
1995 if emptyrebase:
1997 if emptyrebase:
1996 return None
1998 return None
1997 for rev in sorted(state):
1999 for rev in sorted(state):
1998 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
2000 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1999 # if all parents of this revision are done, then so is this revision
2001 # if all parents of this revision are done, then so is this revision
2000 if parents and all((state.get(p) == p for p in parents)):
2002 if parents and all((state.get(p) == p for p in parents)):
2001 state[rev] = rev
2003 state[rev] = rev
2002 return originalwd, destmap, state
2004 return originalwd, destmap, state
2003
2005
2004
2006
2005 def clearrebased(
2007 def clearrebased(
2006 ui,
2008 ui,
2007 repo,
2009 repo,
2008 destmap,
2010 destmap,
2009 state,
2011 state,
2010 skipped,
2012 skipped,
2011 collapsedas=None,
2013 collapsedas=None,
2012 keepf=False,
2014 keepf=False,
2013 fm=None,
2015 fm=None,
2014 backup=True,
2016 backup=True,
2015 ):
2017 ):
2016 """dispose of rebased revision at the end of the rebase
2018 """dispose of rebased revision at the end of the rebase
2017
2019
2018 If `collapsedas` is not None, the rebase was a collapse whose result if the
2020 If `collapsedas` is not None, the rebase was a collapse whose result if the
2019 `collapsedas` node.
2021 `collapsedas` node.
2020
2022
2021 If `keepf` is not True, the rebase has --keep set and no nodes should be
2023 If `keepf` is not True, the rebase has --keep set and no nodes should be
2022 removed (but bookmarks still need to be moved).
2024 removed (but bookmarks still need to be moved).
2023
2025
2024 If `backup` is False, no backup will be stored when stripping rebased
2026 If `backup` is False, no backup will be stored when stripping rebased
2025 revisions.
2027 revisions.
2026 """
2028 """
2027 tonode = repo.changelog.node
2029 tonode = repo.changelog.node
2028 replacements = {}
2030 replacements = {}
2029 moves = {}
2031 moves = {}
2030 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2032 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2031
2033
2032 collapsednodes = []
2034 collapsednodes = []
2033 for rev, newrev in sorted(state.items()):
2035 for rev, newrev in sorted(state.items()):
2034 if newrev >= 0 and newrev != rev:
2036 if newrev >= 0 and newrev != rev:
2035 oldnode = tonode(rev)
2037 oldnode = tonode(rev)
2036 newnode = collapsedas or tonode(newrev)
2038 newnode = collapsedas or tonode(newrev)
2037 moves[oldnode] = newnode
2039 moves[oldnode] = newnode
2038 succs = None
2040 succs = None
2039 if rev in skipped:
2041 if rev in skipped:
2040 if stripcleanup or not repo[rev].obsolete():
2042 if stripcleanup or not repo[rev].obsolete():
2041 succs = ()
2043 succs = ()
2042 elif collapsedas:
2044 elif collapsedas:
2043 collapsednodes.append(oldnode)
2045 collapsednodes.append(oldnode)
2044 else:
2046 else:
2045 succs = (newnode,)
2047 succs = (newnode,)
2046 if succs is not None:
2048 if succs is not None:
2047 replacements[(oldnode,)] = succs
2049 replacements[(oldnode,)] = succs
2048 if collapsednodes:
2050 if collapsednodes:
2049 replacements[tuple(collapsednodes)] = (collapsedas,)
2051 replacements[tuple(collapsednodes)] = (collapsedas,)
2050 if fm:
2052 if fm:
2051 hf = fm.hexfunc
2053 hf = fm.hexfunc
2052 fl = fm.formatlist
2054 fl = fm.formatlist
2053 fd = fm.formatdict
2055 fd = fm.formatdict
2054 changes = {}
2056 changes = {}
2055 for oldns, newn in pycompat.iteritems(replacements):
2057 for oldns, newn in pycompat.iteritems(replacements):
2056 for oldn in oldns:
2058 for oldn in oldns:
2057 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2059 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2058 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2060 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2059 fm.data(nodechanges=nodechanges)
2061 fm.data(nodechanges=nodechanges)
2060 if keepf:
2062 if keepf:
2061 replacements = {}
2063 replacements = {}
2062 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2064 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2063
2065
2064
2066
2065 def pullrebase(orig, ui, repo, *args, **opts):
2067 def pullrebase(orig, ui, repo, *args, **opts):
2066 """Call rebase after pull if the latter has been invoked with --rebase"""
2068 """Call rebase after pull if the latter has been invoked with --rebase"""
2067 if opts.get('rebase'):
2069 if opts.get('rebase'):
2068 if ui.configbool(b'commands', b'rebase.requiredest'):
2070 if ui.configbool(b'commands', b'rebase.requiredest'):
2069 msg = _(b'rebase destination required by configuration')
2071 msg = _(b'rebase destination required by configuration')
2070 hint = _(b'use hg pull followed by hg rebase -d DEST')
2072 hint = _(b'use hg pull followed by hg rebase -d DEST')
2071 raise error.Abort(msg, hint=hint)
2073 raise error.Abort(msg, hint=hint)
2072
2074
2073 with repo.wlock(), repo.lock():
2075 with repo.wlock(), repo.lock():
2074 if opts.get('update'):
2076 if opts.get('update'):
2075 del opts['update']
2077 del opts['update']
2076 ui.debug(
2078 ui.debug(
2077 b'--update and --rebase are not compatible, ignoring '
2079 b'--update and --rebase are not compatible, ignoring '
2078 b'the update flag\n'
2080 b'the update flag\n'
2079 )
2081 )
2080
2082
2081 cmdutil.checkunfinished(repo, skipmerge=True)
2083 cmdutil.checkunfinished(repo, skipmerge=True)
2082 cmdutil.bailifchanged(
2084 cmdutil.bailifchanged(
2083 repo,
2085 repo,
2084 hint=_(
2086 hint=_(
2085 b'cannot pull with rebase: '
2087 b'cannot pull with rebase: '
2086 b'please commit or shelve your changes first'
2088 b'please commit or shelve your changes first'
2087 ),
2089 ),
2088 )
2090 )
2089
2091
2090 revsprepull = len(repo)
2092 revsprepull = len(repo)
2091 origpostincoming = commands.postincoming
2093 origpostincoming = commands.postincoming
2092
2094
2093 def _dummy(*args, **kwargs):
2095 def _dummy(*args, **kwargs):
2094 pass
2096 pass
2095
2097
2096 commands.postincoming = _dummy
2098 commands.postincoming = _dummy
2097 try:
2099 try:
2098 ret = orig(ui, repo, *args, **opts)
2100 ret = orig(ui, repo, *args, **opts)
2099 finally:
2101 finally:
2100 commands.postincoming = origpostincoming
2102 commands.postincoming = origpostincoming
2101 revspostpull = len(repo)
2103 revspostpull = len(repo)
2102 if revspostpull > revsprepull:
2104 if revspostpull > revsprepull:
2103 # --rev option from pull conflict with rebase own --rev
2105 # --rev option from pull conflict with rebase own --rev
2104 # dropping it
2106 # dropping it
2105 if 'rev' in opts:
2107 if 'rev' in opts:
2106 del opts['rev']
2108 del opts['rev']
2107 # positional argument from pull conflicts with rebase's own
2109 # positional argument from pull conflicts with rebase's own
2108 # --source.
2110 # --source.
2109 if 'source' in opts:
2111 if 'source' in opts:
2110 del opts['source']
2112 del opts['source']
2111 # revsprepull is the len of the repo, not revnum of tip.
2113 # revsprepull is the len of the repo, not revnum of tip.
2112 destspace = list(repo.changelog.revs(start=revsprepull))
2114 destspace = list(repo.changelog.revs(start=revsprepull))
2113 opts['_destspace'] = destspace
2115 opts['_destspace'] = destspace
2114 try:
2116 try:
2115 rebase(ui, repo, **opts)
2117 rebase(ui, repo, **opts)
2116 except error.NoMergeDestAbort:
2118 except error.NoMergeDestAbort:
2117 # we can maybe update instead
2119 # we can maybe update instead
2118 rev, _a, _b = destutil.destupdate(repo)
2120 rev, _a, _b = destutil.destupdate(repo)
2119 if rev == repo[b'.'].rev():
2121 if rev == repo[b'.'].rev():
2120 ui.status(_(b'nothing to rebase\n'))
2122 ui.status(_(b'nothing to rebase\n'))
2121 else:
2123 else:
2122 ui.status(_(b'nothing to rebase - updating instead\n'))
2124 ui.status(_(b'nothing to rebase - updating instead\n'))
2123 # not passing argument to get the bare update behavior
2125 # not passing argument to get the bare update behavior
2124 # with warning and trumpets
2126 # with warning and trumpets
2125 commands.update(ui, repo)
2127 commands.update(ui, repo)
2126 else:
2128 else:
2127 if opts.get('tool'):
2129 if opts.get('tool'):
2128 raise error.Abort(_(b'--tool can only be used with --rebase'))
2130 raise error.Abort(_(b'--tool can only be used with --rebase'))
2129 ret = orig(ui, repo, *args, **opts)
2131 ret = orig(ui, repo, *args, **opts)
2130
2132
2131 return ret
2133 return ret
2132
2134
2133
2135
2134 def _filterobsoleterevs(repo, revs):
2136 def _filterobsoleterevs(repo, revs):
2135 """returns a set of the obsolete revisions in revs"""
2137 """returns a set of the obsolete revisions in revs"""
2136 return {r for r in revs if repo[r].obsolete()}
2138 return {r for r in revs if repo[r].obsolete()}
2137
2139
2138
2140
2139 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2141 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
2140 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2142 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
2141
2143
2142 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2144 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
2143 obsolete nodes to be rebased given in `rebaseobsrevs`.
2145 obsolete nodes to be rebased given in `rebaseobsrevs`.
2144
2146
2145 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2147 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
2146 without a successor in destination.
2148 without a successor in destination.
2147
2149
2148 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2150 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
2149 obsolete successors.
2151 obsolete successors.
2150 """
2152 """
2151 obsoletenotrebased = {}
2153 obsoletenotrebased = {}
2152 obsoletewithoutsuccessorindestination = set()
2154 obsoletewithoutsuccessorindestination = set()
2153 obsoleteextinctsuccessors = set()
2155 obsoleteextinctsuccessors = set()
2154
2156
2155 assert repo.filtername is None
2157 assert repo.filtername is None
2156 cl = repo.changelog
2158 cl = repo.changelog
2157 get_rev = cl.index.get_rev
2159 get_rev = cl.index.get_rev
2158 extinctrevs = set(repo.revs(b'extinct()'))
2160 extinctrevs = set(repo.revs(b'extinct()'))
2159 for srcrev in rebaseobsrevs:
2161 for srcrev in rebaseobsrevs:
2160 srcnode = cl.node(srcrev)
2162 srcnode = cl.node(srcrev)
2161 # XXX: more advanced APIs are required to handle split correctly
2163 # XXX: more advanced APIs are required to handle split correctly
2162 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2164 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2163 # obsutil.allsuccessors includes node itself
2165 # obsutil.allsuccessors includes node itself
2164 successors.remove(srcnode)
2166 successors.remove(srcnode)
2165 succrevs = {get_rev(s) for s in successors}
2167 succrevs = {get_rev(s) for s in successors}
2166 succrevs.discard(None)
2168 succrevs.discard(None)
2167 if succrevs.issubset(extinctrevs):
2169 if succrevs.issubset(extinctrevs):
2168 # all successors are extinct
2170 # all successors are extinct
2169 obsoleteextinctsuccessors.add(srcrev)
2171 obsoleteextinctsuccessors.add(srcrev)
2170 if not successors:
2172 if not successors:
2171 # no successor
2173 # no successor
2172 obsoletenotrebased[srcrev] = None
2174 obsoletenotrebased[srcrev] = None
2173 else:
2175 else:
2174 dstrev = destmap[srcrev]
2176 dstrev = destmap[srcrev]
2175 for succrev in succrevs:
2177 for succrev in succrevs:
2176 if cl.isancestorrev(succrev, dstrev):
2178 if cl.isancestorrev(succrev, dstrev):
2177 obsoletenotrebased[srcrev] = succrev
2179 obsoletenotrebased[srcrev] = succrev
2178 break
2180 break
2179 else:
2181 else:
2180 # If 'srcrev' has a successor in rebase set but none in
2182 # If 'srcrev' has a successor in rebase set but none in
2181 # destination (which would be catched above), we shall skip it
2183 # destination (which would be catched above), we shall skip it
2182 # and its descendants to avoid divergence.
2184 # and its descendants to avoid divergence.
2183 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2185 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2184 obsoletewithoutsuccessorindestination.add(srcrev)
2186 obsoletewithoutsuccessorindestination.add(srcrev)
2185
2187
2186 return (
2188 return (
2187 obsoletenotrebased,
2189 obsoletenotrebased,
2188 obsoletewithoutsuccessorindestination,
2190 obsoletewithoutsuccessorindestination,
2189 obsoleteextinctsuccessors,
2191 obsoleteextinctsuccessors,
2190 )
2192 )
2191
2193
2192
2194
2193 def abortrebase(ui, repo):
2195 def abortrebase(ui, repo):
2194 with repo.wlock(), repo.lock():
2196 with repo.wlock(), repo.lock():
2195 rbsrt = rebaseruntime(repo, ui)
2197 rbsrt = rebaseruntime(repo, ui)
2196 rbsrt._prepareabortorcontinue(isabort=True)
2198 rbsrt._prepareabortorcontinue(isabort=True)
2197
2199
2198
2200
2199 def continuerebase(ui, repo):
2201 def continuerebase(ui, repo):
2200 with repo.wlock(), repo.lock():
2202 with repo.wlock(), repo.lock():
2201 rbsrt = rebaseruntime(repo, ui)
2203 rbsrt = rebaseruntime(repo, ui)
2202 ms = mergestatemod.mergestate.read(repo)
2204 ms = mergestatemod.mergestate.read(repo)
2203 mergeutil.checkunresolved(ms)
2205 mergeutil.checkunresolved(ms)
2204 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2206 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2205 if retcode is not None:
2207 if retcode is not None:
2206 return retcode
2208 return retcode
2207 rbsrt._performrebase(None)
2209 rbsrt._performrebase(None)
2208 rbsrt._finishrebase()
2210 rbsrt._finishrebase()
2209
2211
2210
2212
2211 def summaryhook(ui, repo):
2213 def summaryhook(ui, repo):
2212 if not repo.vfs.exists(b'rebasestate'):
2214 if not repo.vfs.exists(b'rebasestate'):
2213 return
2215 return
2214 try:
2216 try:
2215 rbsrt = rebaseruntime(repo, ui, {})
2217 rbsrt = rebaseruntime(repo, ui, {})
2216 rbsrt.restorestatus()
2218 rbsrt.restorestatus()
2217 state = rbsrt.state
2219 state = rbsrt.state
2218 except error.RepoLookupError:
2220 except error.RepoLookupError:
2219 # i18n: column positioning for "hg summary"
2221 # i18n: column positioning for "hg summary"
2220 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2222 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2221 ui.write(msg)
2223 ui.write(msg)
2222 return
2224 return
2223 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2225 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2224 # i18n: column positioning for "hg summary"
2226 # i18n: column positioning for "hg summary"
2225 ui.write(
2227 ui.write(
2226 _(b'rebase: %s, %s (rebase --continue)\n')
2228 _(b'rebase: %s, %s (rebase --continue)\n')
2227 % (
2229 % (
2228 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2230 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2229 ui.label(_(b'%d remaining'), b'rebase.remaining')
2231 ui.label(_(b'%d remaining'), b'rebase.remaining')
2230 % (len(state) - numrebased),
2232 % (len(state) - numrebased),
2231 )
2233 )
2232 )
2234 )
2233
2235
2234
2236
2235 def uisetup(ui):
2237 def uisetup(ui):
2236 # Replace pull with a decorator to provide --rebase option
2238 # Replace pull with a decorator to provide --rebase option
2237 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2239 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2238 entry[1].append(
2240 entry[1].append(
2239 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2241 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2240 )
2242 )
2241 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2243 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2242 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2244 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2243 statemod.addunfinished(
2245 statemod.addunfinished(
2244 b'rebase',
2246 b'rebase',
2245 fname=b'rebasestate',
2247 fname=b'rebasestate',
2246 stopflag=True,
2248 stopflag=True,
2247 continueflag=True,
2249 continueflag=True,
2248 abortfunc=abortrebase,
2250 abortfunc=abortrebase,
2249 continuefunc=continuerebase,
2251 continuefunc=continuerebase,
2250 )
2252 )
@@ -1,3089 +1,3089 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 dagop,
31 dagop,
32 encoding,
32 encoding,
33 error,
33 error,
34 fileset,
34 fileset,
35 match as matchmod,
35 match as matchmod,
36 mergestate as mergestatemod,
36 mergestate as mergestatemod,
37 metadata,
37 metadata,
38 obsolete as obsmod,
38 obsolete as obsmod,
39 patch,
39 patch,
40 pathutil,
40 pathutil,
41 phases,
41 phases,
42 pycompat,
42 pycompat,
43 repoview,
43 repoview,
44 scmutil,
44 scmutil,
45 sparse,
45 sparse,
46 subrepo,
46 subrepo,
47 subrepoutil,
47 subrepoutil,
48 util,
48 util,
49 )
49 )
50 from .utils import (
50 from .utils import (
51 dateutil,
51 dateutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 propertycache = util.propertycache
55 propertycache = util.propertycache
56
56
57
57
58 class basectx(object):
58 class basectx(object):
59 """A basectx object represents the common logic for its children:
59 """A basectx object represents the common logic for its children:
60 changectx: read-only context that is already present in the repo,
60 changectx: read-only context that is already present in the repo,
61 workingctx: a context that represents the working directory and can
61 workingctx: a context that represents the working directory and can
62 be committed,
62 be committed,
63 memctx: a context that represents changes in-memory and can also
63 memctx: a context that represents changes in-memory and can also
64 be committed."""
64 be committed."""
65
65
66 def __init__(self, repo):
66 def __init__(self, repo):
67 self._repo = repo
67 self._repo = repo
68
68
69 def __bytes__(self):
69 def __bytes__(self):
70 return short(self.node())
70 return short(self.node())
71
71
72 __str__ = encoding.strmethod(__bytes__)
72 __str__ = encoding.strmethod(__bytes__)
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _buildstatusmanifest(self, status):
95 def _buildstatusmanifest(self, status):
96 """Builds a manifest that includes the given status results, if this is
96 """Builds a manifest that includes the given status results, if this is
97 a working copy context. For non-working copy contexts, it just returns
97 a working copy context. For non-working copy contexts, it just returns
98 the normal manifest."""
98 the normal manifest."""
99 return self.manifest()
99 return self.manifest()
100
100
101 def _matchstatus(self, other, match):
101 def _matchstatus(self, other, match):
102 """This internal method provides a way for child objects to override the
102 """This internal method provides a way for child objects to override the
103 match operator.
103 match operator.
104 """
104 """
105 return match
105 return match
106
106
107 def _buildstatus(
107 def _buildstatus(
108 self, other, s, match, listignored, listclean, listunknown
108 self, other, s, match, listignored, listclean, listunknown
109 ):
109 ):
110 """build a status with respect to another context"""
110 """build a status with respect to another context"""
111 # Load earliest manifest first for caching reasons. More specifically,
111 # Load earliest manifest first for caching reasons. More specifically,
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
114 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # 1000 and cache it so that when you read 1001, we just need to apply a
115 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta to what's in the cache. So that's one full reconstruction + one
116 # delta application.
116 # delta application.
117 mf2 = None
117 mf2 = None
118 if self.rev() is not None and self.rev() < other.rev():
118 if self.rev() is not None and self.rev() < other.rev():
119 mf2 = self._buildstatusmanifest(s)
119 mf2 = self._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
121 if mf2 is None:
121 if mf2 is None:
122 mf2 = self._buildstatusmanifest(s)
122 mf2 = self._buildstatusmanifest(s)
123
123
124 modified, added = [], []
124 modified, added = [], []
125 removed = []
125 removed = []
126 clean = []
126 clean = []
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 deletedset = set(deleted)
128 deletedset = set(deleted)
129 d = mf1.diff(mf2, match=match, clean=listclean)
129 d = mf1.diff(mf2, match=match, clean=listclean)
130 for fn, value in pycompat.iteritems(d):
130 for fn, value in pycompat.iteritems(d):
131 if fn in deletedset:
131 if fn in deletedset:
132 continue
132 continue
133 if value is None:
133 if value is None:
134 clean.append(fn)
134 clean.append(fn)
135 continue
135 continue
136 (node1, flag1), (node2, flag2) = value
136 (node1, flag1), (node2, flag2) = value
137 if node1 is None:
137 if node1 is None:
138 added.append(fn)
138 added.append(fn)
139 elif node2 is None:
139 elif node2 is None:
140 removed.append(fn)
140 removed.append(fn)
141 elif flag1 != flag2:
141 elif flag1 != flag2:
142 modified.append(fn)
142 modified.append(fn)
143 elif node2 not in wdirfilenodeids:
143 elif node2 not in wdirfilenodeids:
144 # When comparing files between two commits, we save time by
144 # When comparing files between two commits, we save time by
145 # not comparing the file contents when the nodeids differ.
145 # not comparing the file contents when the nodeids differ.
146 # Note that this means we incorrectly report a reverted change
146 # Note that this means we incorrectly report a reverted change
147 # to a file as a modification.
147 # to a file as a modification.
148 modified.append(fn)
148 modified.append(fn)
149 elif self[fn].cmp(other[fn]):
149 elif self[fn].cmp(other[fn]):
150 modified.append(fn)
150 modified.append(fn)
151 else:
151 else:
152 clean.append(fn)
152 clean.append(fn)
153
153
154 if removed:
154 if removed:
155 # need to filter files if they are already reported as removed
155 # need to filter files if they are already reported as removed
156 unknown = [
156 unknown = [
157 fn
157 fn
158 for fn in unknown
158 for fn in unknown
159 if fn not in mf1 and (not match or match(fn))
159 if fn not in mf1 and (not match or match(fn))
160 ]
160 ]
161 ignored = [
161 ignored = [
162 fn
162 fn
163 for fn in ignored
163 for fn in ignored
164 if fn not in mf1 and (not match or match(fn))
164 if fn not in mf1 and (not match or match(fn))
165 ]
165 ]
166 # if they're deleted, don't report them as removed
166 # if they're deleted, don't report them as removed
167 removed = [fn for fn in removed if fn not in deletedset]
167 removed = [fn for fn in removed if fn not in deletedset]
168
168
169 return scmutil.status(
169 return scmutil.status(
170 modified, added, removed, deleted, unknown, ignored, clean
170 modified, added, removed, deleted, unknown, ignored, clean
171 )
171 )
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepoutil.state(self, self._repo.ui)
175 return subrepoutil.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182
182
183 def node(self):
183 def node(self):
184 return self._node
184 return self._node
185
185
186 def hex(self):
186 def hex(self):
187 return hex(self.node())
187 return hex(self.node())
188
188
189 def manifest(self):
189 def manifest(self):
190 return self._manifest
190 return self._manifest
191
191
192 def manifestctx(self):
192 def manifestctx(self):
193 return self._manifestctx
193 return self._manifestctx
194
194
195 def repo(self):
195 def repo(self):
196 return self._repo
196 return self._repo
197
197
198 def phasestr(self):
198 def phasestr(self):
199 return phases.phasenames[self.phase()]
199 return phases.phasenames[self.phase()]
200
200
201 def mutable(self):
201 def mutable(self):
202 return self.phase() > phases.public
202 return self.phase() > phases.public
203
203
204 def matchfileset(self, cwd, expr, badfn=None):
204 def matchfileset(self, cwd, expr, badfn=None):
205 return fileset.match(self, cwd, expr, badfn=badfn)
205 return fileset.match(self, cwd, expr, badfn=badfn)
206
206
207 def obsolete(self):
207 def obsolete(self):
208 """True if the changeset is obsolete"""
208 """True if the changeset is obsolete"""
209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
210
210
211 def extinct(self):
211 def extinct(self):
212 """True if the changeset is extinct"""
212 """True if the changeset is extinct"""
213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
214
214
215 def orphan(self):
215 def orphan(self):
216 """True if the changeset is not obsolete, but its ancestor is"""
216 """True if the changeset is not obsolete, but its ancestor is"""
217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
218
218
219 def phasedivergent(self):
219 def phasedivergent(self):
220 """True if the changeset tries to be a successor of a public changeset
220 """True if the changeset tries to be a successor of a public changeset
221
221
222 Only non-public and non-obsolete changesets may be phase-divergent.
222 Only non-public and non-obsolete changesets may be phase-divergent.
223 """
223 """
224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
225
225
226 def contentdivergent(self):
226 def contentdivergent(self):
227 """Is a successor of a changeset with multiple possible successor sets
227 """Is a successor of a changeset with multiple possible successor sets
228
228
229 Only non-public and non-obsolete changesets may be content-divergent.
229 Only non-public and non-obsolete changesets may be content-divergent.
230 """
230 """
231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
232
232
233 def isunstable(self):
233 def isunstable(self):
234 """True if the changeset is either orphan, phase-divergent or
234 """True if the changeset is either orphan, phase-divergent or
235 content-divergent"""
235 content-divergent"""
236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
237
237
238 def instabilities(self):
238 def instabilities(self):
239 """return the list of instabilities affecting this changeset.
239 """return the list of instabilities affecting this changeset.
240
240
241 Instabilities are returned as strings. possible values are:
241 Instabilities are returned as strings. possible values are:
242 - orphan,
242 - orphan,
243 - phase-divergent,
243 - phase-divergent,
244 - content-divergent.
244 - content-divergent.
245 """
245 """
246 instabilities = []
246 instabilities = []
247 if self.orphan():
247 if self.orphan():
248 instabilities.append(b'orphan')
248 instabilities.append(b'orphan')
249 if self.phasedivergent():
249 if self.phasedivergent():
250 instabilities.append(b'phase-divergent')
250 instabilities.append(b'phase-divergent')
251 if self.contentdivergent():
251 if self.contentdivergent():
252 instabilities.append(b'content-divergent')
252 instabilities.append(b'content-divergent')
253 return instabilities
253 return instabilities
254
254
255 def parents(self):
255 def parents(self):
256 """return contexts for each parent changeset"""
256 """return contexts for each parent changeset"""
257 return self._parents
257 return self._parents
258
258
259 def p1(self):
259 def p1(self):
260 return self._parents[0]
260 return self._parents[0]
261
261
262 def p2(self):
262 def p2(self):
263 parents = self._parents
263 parents = self._parents
264 if len(parents) == 2:
264 if len(parents) == 2:
265 return parents[1]
265 return parents[1]
266 return self._repo[nullrev]
266 return self._repo[nullrev]
267
267
268 def _fileinfo(self, path):
268 def _fileinfo(self, path):
269 if '_manifest' in self.__dict__:
269 if '_manifest' in self.__dict__:
270 try:
270 try:
271 return self._manifest.find(path)
271 return self._manifest.find(path)
272 except KeyError:
272 except KeyError:
273 raise error.ManifestLookupError(
273 raise error.ManifestLookupError(
274 self._node, path, _(b'not found in manifest')
274 self._node, path, _(b'not found in manifest')
275 )
275 )
276 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if '_manifestdelta' in self.__dict__ or path in self.files():
277 if path in self._manifestdelta:
277 if path in self._manifestdelta:
278 return (
278 return (
279 self._manifestdelta[path],
279 self._manifestdelta[path],
280 self._manifestdelta.flags(path),
280 self._manifestdelta.flags(path),
281 )
281 )
282 mfl = self._repo.manifestlog
282 mfl = self._repo.manifestlog
283 try:
283 try:
284 node, flag = mfl[self._changeset.manifest].find(path)
284 node, flag = mfl[self._changeset.manifest].find(path)
285 except KeyError:
285 except KeyError:
286 raise error.ManifestLookupError(
286 raise error.ManifestLookupError(
287 self._node, path, _(b'not found in manifest')
287 self._node, path, _(b'not found in manifest')
288 )
288 )
289
289
290 return node, flag
290 return node, flag
291
291
292 def filenode(self, path):
292 def filenode(self, path):
293 return self._fileinfo(path)[0]
293 return self._fileinfo(path)[0]
294
294
295 def flags(self, path):
295 def flags(self, path):
296 try:
296 try:
297 return self._fileinfo(path)[1]
297 return self._fileinfo(path)[1]
298 except error.LookupError:
298 except error.LookupError:
299 return b''
299 return b''
300
300
301 @propertycache
301 @propertycache
302 def _copies(self):
302 def _copies(self):
303 return metadata.computechangesetcopies(self)
303 return metadata.computechangesetcopies(self)
304
304
305 def p1copies(self):
305 def p1copies(self):
306 return self._copies[0]
306 return self._copies[0]
307
307
308 def p2copies(self):
308 def p2copies(self):
309 return self._copies[1]
309 return self._copies[1]
310
310
311 def sub(self, path, allowcreate=True):
311 def sub(self, path, allowcreate=True):
312 '''return a subrepo for the stored revision of path, never wdir()'''
312 '''return a subrepo for the stored revision of path, never wdir()'''
313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
314
314
315 def nullsub(self, path, pctx):
315 def nullsub(self, path, pctx):
316 return subrepo.nullsubrepo(self, path, pctx)
316 return subrepo.nullsubrepo(self, path, pctx)
317
317
318 def workingsub(self, path):
318 def workingsub(self, path):
319 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 '''return a subrepo for the stored revision, or wdir if this is a wdir
320 context.
320 context.
321 '''
321 '''
322 return subrepo.subrepo(self, path, allowwdir=True)
322 return subrepo.subrepo(self, path, allowwdir=True)
323
323
324 def match(
324 def match(
325 self,
325 self,
326 pats=None,
326 pats=None,
327 include=None,
327 include=None,
328 exclude=None,
328 exclude=None,
329 default=b'glob',
329 default=b'glob',
330 listsubrepos=False,
330 listsubrepos=False,
331 badfn=None,
331 badfn=None,
332 cwd=None,
332 cwd=None,
333 ):
333 ):
334 r = self._repo
334 r = self._repo
335 if not cwd:
335 if not cwd:
336 cwd = r.getcwd()
336 cwd = r.getcwd()
337 return matchmod.match(
337 return matchmod.match(
338 r.root,
338 r.root,
339 cwd,
339 cwd,
340 pats,
340 pats,
341 include,
341 include,
342 exclude,
342 exclude,
343 default,
343 default,
344 auditor=r.nofsauditor,
344 auditor=r.nofsauditor,
345 ctx=self,
345 ctx=self,
346 listsubrepos=listsubrepos,
346 listsubrepos=listsubrepos,
347 badfn=badfn,
347 badfn=badfn,
348 )
348 )
349
349
350 def diff(
350 def diff(
351 self,
351 self,
352 ctx2=None,
352 ctx2=None,
353 match=None,
353 match=None,
354 changes=None,
354 changes=None,
355 opts=None,
355 opts=None,
356 losedatafn=None,
356 losedatafn=None,
357 pathfn=None,
357 pathfn=None,
358 copy=None,
358 copy=None,
359 copysourcematch=None,
359 copysourcematch=None,
360 hunksfilterfn=None,
360 hunksfilterfn=None,
361 ):
361 ):
362 """Returns a diff generator for the given contexts and matcher"""
362 """Returns a diff generator for the given contexts and matcher"""
363 if ctx2 is None:
363 if ctx2 is None:
364 ctx2 = self.p1()
364 ctx2 = self.p1()
365 if ctx2 is not None:
365 if ctx2 is not None:
366 ctx2 = self._repo[ctx2]
366 ctx2 = self._repo[ctx2]
367 return patch.diff(
367 return patch.diff(
368 self._repo,
368 self._repo,
369 ctx2,
369 ctx2,
370 self,
370 self,
371 match=match,
371 match=match,
372 changes=changes,
372 changes=changes,
373 opts=opts,
373 opts=opts,
374 losedatafn=losedatafn,
374 losedatafn=losedatafn,
375 pathfn=pathfn,
375 pathfn=pathfn,
376 copy=copy,
376 copy=copy,
377 copysourcematch=copysourcematch,
377 copysourcematch=copysourcematch,
378 hunksfilterfn=hunksfilterfn,
378 hunksfilterfn=hunksfilterfn,
379 )
379 )
380
380
381 def dirs(self):
381 def dirs(self):
382 return self._manifest.dirs()
382 return self._manifest.dirs()
383
383
384 def hasdir(self, dir):
384 def hasdir(self, dir):
385 return self._manifest.hasdir(dir)
385 return self._manifest.hasdir(dir)
386
386
387 def status(
387 def status(
388 self,
388 self,
389 other=None,
389 other=None,
390 match=None,
390 match=None,
391 listignored=False,
391 listignored=False,
392 listclean=False,
392 listclean=False,
393 listunknown=False,
393 listunknown=False,
394 listsubrepos=False,
394 listsubrepos=False,
395 ):
395 ):
396 """return status of files between two nodes or node and working
396 """return status of files between two nodes or node and working
397 directory.
397 directory.
398
398
399 If other is None, compare this node with working directory.
399 If other is None, compare this node with working directory.
400
400
401 returns (modified, added, removed, deleted, unknown, ignored, clean)
401 returns (modified, added, removed, deleted, unknown, ignored, clean)
402 """
402 """
403
403
404 ctx1 = self
404 ctx1 = self
405 ctx2 = self._repo[other]
405 ctx2 = self._repo[other]
406
406
407 # This next code block is, admittedly, fragile logic that tests for
407 # This next code block is, admittedly, fragile logic that tests for
408 # reversing the contexts and wouldn't need to exist if it weren't for
408 # reversing the contexts and wouldn't need to exist if it weren't for
409 # the fast (and common) code path of comparing the working directory
409 # the fast (and common) code path of comparing the working directory
410 # with its first parent.
410 # with its first parent.
411 #
411 #
412 # What we're aiming for here is the ability to call:
412 # What we're aiming for here is the ability to call:
413 #
413 #
414 # workingctx.status(parentctx)
414 # workingctx.status(parentctx)
415 #
415 #
416 # If we always built the manifest for each context and compared those,
416 # If we always built the manifest for each context and compared those,
417 # then we'd be done. But the special case of the above call means we
417 # then we'd be done. But the special case of the above call means we
418 # just copy the manifest of the parent.
418 # just copy the manifest of the parent.
419 reversed = False
419 reversed = False
420 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
420 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
421 reversed = True
421 reversed = True
422 ctx1, ctx2 = ctx2, ctx1
422 ctx1, ctx2 = ctx2, ctx1
423
423
424 match = self._repo.narrowmatch(match)
424 match = self._repo.narrowmatch(match)
425 match = ctx2._matchstatus(ctx1, match)
425 match = ctx2._matchstatus(ctx1, match)
426 r = scmutil.status([], [], [], [], [], [], [])
426 r = scmutil.status([], [], [], [], [], [], [])
427 r = ctx2._buildstatus(
427 r = ctx2._buildstatus(
428 ctx1, r, match, listignored, listclean, listunknown
428 ctx1, r, match, listignored, listclean, listunknown
429 )
429 )
430
430
431 if reversed:
431 if reversed:
432 # Reverse added and removed. Clear deleted, unknown and ignored as
432 # Reverse added and removed. Clear deleted, unknown and ignored as
433 # these make no sense to reverse.
433 # these make no sense to reverse.
434 r = scmutil.status(
434 r = scmutil.status(
435 r.modified, r.removed, r.added, [], [], [], r.clean
435 r.modified, r.removed, r.added, [], [], [], r.clean
436 )
436 )
437
437
438 if listsubrepos:
438 if listsubrepos:
439 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
439 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
440 try:
440 try:
441 rev2 = ctx2.subrev(subpath)
441 rev2 = ctx2.subrev(subpath)
442 except KeyError:
442 except KeyError:
443 # A subrepo that existed in node1 was deleted between
443 # A subrepo that existed in node1 was deleted between
444 # node1 and node2 (inclusive). Thus, ctx2's substate
444 # node1 and node2 (inclusive). Thus, ctx2's substate
445 # won't contain that subpath. The best we can do ignore it.
445 # won't contain that subpath. The best we can do ignore it.
446 rev2 = None
446 rev2 = None
447 submatch = matchmod.subdirmatcher(subpath, match)
447 submatch = matchmod.subdirmatcher(subpath, match)
448 s = sub.status(
448 s = sub.status(
449 rev2,
449 rev2,
450 match=submatch,
450 match=submatch,
451 ignored=listignored,
451 ignored=listignored,
452 clean=listclean,
452 clean=listclean,
453 unknown=listunknown,
453 unknown=listunknown,
454 listsubrepos=True,
454 listsubrepos=True,
455 )
455 )
456 for k in (
456 for k in (
457 'modified',
457 'modified',
458 'added',
458 'added',
459 'removed',
459 'removed',
460 'deleted',
460 'deleted',
461 'unknown',
461 'unknown',
462 'ignored',
462 'ignored',
463 'clean',
463 'clean',
464 ):
464 ):
465 rfiles, sfiles = getattr(r, k), getattr(s, k)
465 rfiles, sfiles = getattr(r, k), getattr(s, k)
466 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
466 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
467
467
468 r.modified.sort()
468 r.modified.sort()
469 r.added.sort()
469 r.added.sort()
470 r.removed.sort()
470 r.removed.sort()
471 r.deleted.sort()
471 r.deleted.sort()
472 r.unknown.sort()
472 r.unknown.sort()
473 r.ignored.sort()
473 r.ignored.sort()
474 r.clean.sort()
474 r.clean.sort()
475
475
476 return r
476 return r
477
477
478 def mergestate(self, clean=False):
478 def mergestate(self, clean=False):
479 """Get a mergestate object for this context."""
479 """Get a mergestate object for this context."""
480 raise NotImplementedError(
480 raise NotImplementedError(
481 '%s does not implement mergestate()' % self.__class__
481 '%s does not implement mergestate()' % self.__class__
482 )
482 )
483
483
484
484
485 class changectx(basectx):
485 class changectx(basectx):
486 """A changecontext object makes access to data related to a particular
486 """A changecontext object makes access to data related to a particular
487 changeset convenient. It represents a read-only context already present in
487 changeset convenient. It represents a read-only context already present in
488 the repo."""
488 the repo."""
489
489
490 def __init__(self, repo, rev, node, maybe_filtered=True):
490 def __init__(self, repo, rev, node, maybe_filtered=True):
491 super(changectx, self).__init__(repo)
491 super(changectx, self).__init__(repo)
492 self._rev = rev
492 self._rev = rev
493 self._node = node
493 self._node = node
494 # When maybe_filtered is True, the revision might be affected by
494 # When maybe_filtered is True, the revision might be affected by
495 # changelog filtering and operation through the filtered changelog must be used.
495 # changelog filtering and operation through the filtered changelog must be used.
496 #
496 #
497 # When maybe_filtered is False, the revision has already been checked
497 # When maybe_filtered is False, the revision has already been checked
498 # against filtering and is not filtered. Operation through the
498 # against filtering and is not filtered. Operation through the
499 # unfiltered changelog might be used in some case.
499 # unfiltered changelog might be used in some case.
500 self._maybe_filtered = maybe_filtered
500 self._maybe_filtered = maybe_filtered
501
501
502 def __hash__(self):
502 def __hash__(self):
503 try:
503 try:
504 return hash(self._rev)
504 return hash(self._rev)
505 except AttributeError:
505 except AttributeError:
506 return id(self)
506 return id(self)
507
507
508 def __nonzero__(self):
508 def __nonzero__(self):
509 return self._rev != nullrev
509 return self._rev != nullrev
510
510
511 __bool__ = __nonzero__
511 __bool__ = __nonzero__
512
512
513 @propertycache
513 @propertycache
514 def _changeset(self):
514 def _changeset(self):
515 if self._maybe_filtered:
515 if self._maybe_filtered:
516 repo = self._repo
516 repo = self._repo
517 else:
517 else:
518 repo = self._repo.unfiltered()
518 repo = self._repo.unfiltered()
519 return repo.changelog.changelogrevision(self.rev())
519 return repo.changelog.changelogrevision(self.rev())
520
520
521 @propertycache
521 @propertycache
522 def _manifest(self):
522 def _manifest(self):
523 return self._manifestctx.read()
523 return self._manifestctx.read()
524
524
525 @property
525 @property
526 def _manifestctx(self):
526 def _manifestctx(self):
527 return self._repo.manifestlog[self._changeset.manifest]
527 return self._repo.manifestlog[self._changeset.manifest]
528
528
529 @propertycache
529 @propertycache
530 def _manifestdelta(self):
530 def _manifestdelta(self):
531 return self._manifestctx.readdelta()
531 return self._manifestctx.readdelta()
532
532
533 @propertycache
533 @propertycache
534 def _parents(self):
534 def _parents(self):
535 repo = self._repo
535 repo = self._repo
536 if self._maybe_filtered:
536 if self._maybe_filtered:
537 cl = repo.changelog
537 cl = repo.changelog
538 else:
538 else:
539 cl = repo.unfiltered().changelog
539 cl = repo.unfiltered().changelog
540
540
541 p1, p2 = cl.parentrevs(self._rev)
541 p1, p2 = cl.parentrevs(self._rev)
542 if p2 == nullrev:
542 if p2 == nullrev:
543 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
543 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
544 return [
544 return [
545 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
545 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
546 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
546 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
547 ]
547 ]
548
548
549 def changeset(self):
549 def changeset(self):
550 c = self._changeset
550 c = self._changeset
551 return (
551 return (
552 c.manifest,
552 c.manifest,
553 c.user,
553 c.user,
554 c.date,
554 c.date,
555 c.files,
555 c.files,
556 c.description,
556 c.description,
557 c.extra,
557 c.extra,
558 )
558 )
559
559
560 def manifestnode(self):
560 def manifestnode(self):
561 return self._changeset.manifest
561 return self._changeset.manifest
562
562
563 def user(self):
563 def user(self):
564 return self._changeset.user
564 return self._changeset.user
565
565
566 def date(self):
566 def date(self):
567 return self._changeset.date
567 return self._changeset.date
568
568
569 def files(self):
569 def files(self):
570 return self._changeset.files
570 return self._changeset.files
571
571
572 def filesmodified(self):
572 def filesmodified(self):
573 modified = set(self.files())
573 modified = set(self.files())
574 modified.difference_update(self.filesadded())
574 modified.difference_update(self.filesadded())
575 modified.difference_update(self.filesremoved())
575 modified.difference_update(self.filesremoved())
576 return sorted(modified)
576 return sorted(modified)
577
577
578 def filesadded(self):
578 def filesadded(self):
579 filesadded = self._changeset.filesadded
579 filesadded = self._changeset.filesadded
580 compute_on_none = True
580 compute_on_none = True
581 if self._repo.filecopiesmode == b'changeset-sidedata':
581 if self._repo.filecopiesmode == b'changeset-sidedata':
582 compute_on_none = False
582 compute_on_none = False
583 else:
583 else:
584 source = self._repo.ui.config(b'experimental', b'copies.read-from')
584 source = self._repo.ui.config(b'experimental', b'copies.read-from')
585 if source == b'changeset-only':
585 if source == b'changeset-only':
586 compute_on_none = False
586 compute_on_none = False
587 elif source != b'compatibility':
587 elif source != b'compatibility':
588 # filelog mode, ignore any changelog content
588 # filelog mode, ignore any changelog content
589 filesadded = None
589 filesadded = None
590 if filesadded is None:
590 if filesadded is None:
591 if compute_on_none:
591 if compute_on_none:
592 filesadded = metadata.computechangesetfilesadded(self)
592 filesadded = metadata.computechangesetfilesadded(self)
593 else:
593 else:
594 filesadded = []
594 filesadded = []
595 return filesadded
595 return filesadded
596
596
597 def filesremoved(self):
597 def filesremoved(self):
598 filesremoved = self._changeset.filesremoved
598 filesremoved = self._changeset.filesremoved
599 compute_on_none = True
599 compute_on_none = True
600 if self._repo.filecopiesmode == b'changeset-sidedata':
600 if self._repo.filecopiesmode == b'changeset-sidedata':
601 compute_on_none = False
601 compute_on_none = False
602 else:
602 else:
603 source = self._repo.ui.config(b'experimental', b'copies.read-from')
603 source = self._repo.ui.config(b'experimental', b'copies.read-from')
604 if source == b'changeset-only':
604 if source == b'changeset-only':
605 compute_on_none = False
605 compute_on_none = False
606 elif source != b'compatibility':
606 elif source != b'compatibility':
607 # filelog mode, ignore any changelog content
607 # filelog mode, ignore any changelog content
608 filesremoved = None
608 filesremoved = None
609 if filesremoved is None:
609 if filesremoved is None:
610 if compute_on_none:
610 if compute_on_none:
611 filesremoved = metadata.computechangesetfilesremoved(self)
611 filesremoved = metadata.computechangesetfilesremoved(self)
612 else:
612 else:
613 filesremoved = []
613 filesremoved = []
614 return filesremoved
614 return filesremoved
615
615
616 @propertycache
616 @propertycache
617 def _copies(self):
617 def _copies(self):
618 p1copies = self._changeset.p1copies
618 p1copies = self._changeset.p1copies
619 p2copies = self._changeset.p2copies
619 p2copies = self._changeset.p2copies
620 compute_on_none = True
620 compute_on_none = True
621 if self._repo.filecopiesmode == b'changeset-sidedata':
621 if self._repo.filecopiesmode == b'changeset-sidedata':
622 compute_on_none = False
622 compute_on_none = False
623 else:
623 else:
624 source = self._repo.ui.config(b'experimental', b'copies.read-from')
624 source = self._repo.ui.config(b'experimental', b'copies.read-from')
625 # If config says to get copy metadata only from changeset, then
625 # If config says to get copy metadata only from changeset, then
626 # return that, defaulting to {} if there was no copy metadata. In
626 # return that, defaulting to {} if there was no copy metadata. In
627 # compatibility mode, we return copy data from the changeset if it
627 # compatibility mode, we return copy data from the changeset if it
628 # was recorded there, and otherwise we fall back to getting it from
628 # was recorded there, and otherwise we fall back to getting it from
629 # the filelogs (below).
629 # the filelogs (below).
630 #
630 #
631 # If we are in compatiblity mode and there is not data in the
631 # If we are in compatiblity mode and there is not data in the
632 # changeset), we get the copy metadata from the filelogs.
632 # changeset), we get the copy metadata from the filelogs.
633 #
633 #
634 # otherwise, when config said to read only from filelog, we get the
634 # otherwise, when config said to read only from filelog, we get the
635 # copy metadata from the filelogs.
635 # copy metadata from the filelogs.
636 if source == b'changeset-only':
636 if source == b'changeset-only':
637 compute_on_none = False
637 compute_on_none = False
638 elif source != b'compatibility':
638 elif source != b'compatibility':
639 # filelog mode, ignore any changelog content
639 # filelog mode, ignore any changelog content
640 p1copies = p2copies = None
640 p1copies = p2copies = None
641 if p1copies is None:
641 if p1copies is None:
642 if compute_on_none:
642 if compute_on_none:
643 p1copies, p2copies = super(changectx, self)._copies
643 p1copies, p2copies = super(changectx, self)._copies
644 else:
644 else:
645 if p1copies is None:
645 if p1copies is None:
646 p1copies = {}
646 p1copies = {}
647 if p2copies is None:
647 if p2copies is None:
648 p2copies = {}
648 p2copies = {}
649 return p1copies, p2copies
649 return p1copies, p2copies
650
650
651 def description(self):
651 def description(self):
652 return self._changeset.description
652 return self._changeset.description
653
653
654 def branch(self):
654 def branch(self):
655 return encoding.tolocal(self._changeset.extra.get(b"branch"))
655 return encoding.tolocal(self._changeset.extra.get(b"branch"))
656
656
657 def closesbranch(self):
657 def closesbranch(self):
658 return b'close' in self._changeset.extra
658 return b'close' in self._changeset.extra
659
659
660 def extra(self):
660 def extra(self):
661 """Return a dict of extra information."""
661 """Return a dict of extra information."""
662 return self._changeset.extra
662 return self._changeset.extra
663
663
664 def tags(self):
664 def tags(self):
665 """Return a list of byte tag names"""
665 """Return a list of byte tag names"""
666 return self._repo.nodetags(self._node)
666 return self._repo.nodetags(self._node)
667
667
668 def bookmarks(self):
668 def bookmarks(self):
669 """Return a list of byte bookmark names."""
669 """Return a list of byte bookmark names."""
670 return self._repo.nodebookmarks(self._node)
670 return self._repo.nodebookmarks(self._node)
671
671
672 def phase(self):
672 def phase(self):
673 return self._repo._phasecache.phase(self._repo, self._rev)
673 return self._repo._phasecache.phase(self._repo, self._rev)
674
674
675 def hidden(self):
675 def hidden(self):
676 return self._rev in repoview.filterrevs(self._repo, b'visible')
676 return self._rev in repoview.filterrevs(self._repo, b'visible')
677
677
678 def isinmemory(self):
678 def isinmemory(self):
679 return False
679 return False
680
680
681 def children(self):
681 def children(self):
682 """return list of changectx contexts for each child changeset.
682 """return list of changectx contexts for each child changeset.
683
683
684 This returns only the immediate child changesets. Use descendants() to
684 This returns only the immediate child changesets. Use descendants() to
685 recursively walk children.
685 recursively walk children.
686 """
686 """
687 c = self._repo.changelog.children(self._node)
687 c = self._repo.changelog.children(self._node)
688 return [self._repo[x] for x in c]
688 return [self._repo[x] for x in c]
689
689
690 def ancestors(self):
690 def ancestors(self):
691 for a in self._repo.changelog.ancestors([self._rev]):
691 for a in self._repo.changelog.ancestors([self._rev]):
692 yield self._repo[a]
692 yield self._repo[a]
693
693
694 def descendants(self):
694 def descendants(self):
695 """Recursively yield all children of the changeset.
695 """Recursively yield all children of the changeset.
696
696
697 For just the immediate children, use children()
697 For just the immediate children, use children()
698 """
698 """
699 for d in self._repo.changelog.descendants([self._rev]):
699 for d in self._repo.changelog.descendants([self._rev]):
700 yield self._repo[d]
700 yield self._repo[d]
701
701
702 def filectx(self, path, fileid=None, filelog=None):
702 def filectx(self, path, fileid=None, filelog=None):
703 """get a file context from this changeset"""
703 """get a file context from this changeset"""
704 if fileid is None:
704 if fileid is None:
705 fileid = self.filenode(path)
705 fileid = self.filenode(path)
706 return filectx(
706 return filectx(
707 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
707 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
708 )
708 )
709
709
710 def ancestor(self, c2, warn=False):
710 def ancestor(self, c2, warn=False):
711 """return the "best" ancestor context of self and c2
711 """return the "best" ancestor context of self and c2
712
712
713 If there are multiple candidates, it will show a message and check
713 If there are multiple candidates, it will show a message and check
714 merge.preferancestor configuration before falling back to the
714 merge.preferancestor configuration before falling back to the
715 revlog ancestor."""
715 revlog ancestor."""
716 # deal with workingctxs
716 # deal with workingctxs
717 n2 = c2._node
717 n2 = c2._node
718 if n2 is None:
718 if n2 is None:
719 n2 = c2._parents[0]._node
719 n2 = c2._parents[0]._node
720 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
720 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
721 if not cahs:
721 if not cahs:
722 anc = nullid
722 anc = nullid
723 elif len(cahs) == 1:
723 elif len(cahs) == 1:
724 anc = cahs[0]
724 anc = cahs[0]
725 else:
725 else:
726 # experimental config: merge.preferancestor
726 # experimental config: merge.preferancestor
727 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
727 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
728 try:
728 try:
729 ctx = scmutil.revsymbol(self._repo, r)
729 ctx = scmutil.revsymbol(self._repo, r)
730 except error.RepoLookupError:
730 except error.RepoLookupError:
731 continue
731 continue
732 anc = ctx.node()
732 anc = ctx.node()
733 if anc in cahs:
733 if anc in cahs:
734 break
734 break
735 else:
735 else:
736 anc = self._repo.changelog.ancestor(self._node, n2)
736 anc = self._repo.changelog.ancestor(self._node, n2)
737 if warn:
737 if warn:
738 self._repo.ui.status(
738 self._repo.ui.status(
739 (
739 (
740 _(b"note: using %s as ancestor of %s and %s\n")
740 _(b"note: using %s as ancestor of %s and %s\n")
741 % (short(anc), short(self._node), short(n2))
741 % (short(anc), short(self._node), short(n2))
742 )
742 )
743 + b''.join(
743 + b''.join(
744 _(
744 _(
745 b" alternatively, use --config "
745 b" alternatively, use --config "
746 b"merge.preferancestor=%s\n"
746 b"merge.preferancestor=%s\n"
747 )
747 )
748 % short(n)
748 % short(n)
749 for n in sorted(cahs)
749 for n in sorted(cahs)
750 if n != anc
750 if n != anc
751 )
751 )
752 )
752 )
753 return self._repo[anc]
753 return self._repo[anc]
754
754
755 def isancestorof(self, other):
755 def isancestorof(self, other):
756 """True if this changeset is an ancestor of other"""
756 """True if this changeset is an ancestor of other"""
757 return self._repo.changelog.isancestorrev(self._rev, other._rev)
757 return self._repo.changelog.isancestorrev(self._rev, other._rev)
758
758
759 def walk(self, match):
759 def walk(self, match):
760 '''Generates matching file names.'''
760 '''Generates matching file names.'''
761
761
762 # Wrap match.bad method to have message with nodeid
762 # Wrap match.bad method to have message with nodeid
763 def bad(fn, msg):
763 def bad(fn, msg):
764 # The manifest doesn't know about subrepos, so don't complain about
764 # The manifest doesn't know about subrepos, so don't complain about
765 # paths into valid subrepos.
765 # paths into valid subrepos.
766 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
766 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
767 return
767 return
768 match.bad(fn, _(b'no such file in rev %s') % self)
768 match.bad(fn, _(b'no such file in rev %s') % self)
769
769
770 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
770 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
771 return self._manifest.walk(m)
771 return self._manifest.walk(m)
772
772
773 def matches(self, match):
773 def matches(self, match):
774 return self.walk(match)
774 return self.walk(match)
775
775
776
776
777 class basefilectx(object):
777 class basefilectx(object):
778 """A filecontext object represents the common logic for its children:
778 """A filecontext object represents the common logic for its children:
779 filectx: read-only access to a filerevision that is already present
779 filectx: read-only access to a filerevision that is already present
780 in the repo,
780 in the repo,
781 workingfilectx: a filecontext that represents files from the working
781 workingfilectx: a filecontext that represents files from the working
782 directory,
782 directory,
783 memfilectx: a filecontext that represents files in-memory,
783 memfilectx: a filecontext that represents files in-memory,
784 """
784 """
785
785
786 @propertycache
786 @propertycache
787 def _filelog(self):
787 def _filelog(self):
788 return self._repo.file(self._path)
788 return self._repo.file(self._path)
789
789
790 @propertycache
790 @propertycache
791 def _changeid(self):
791 def _changeid(self):
792 if '_changectx' in self.__dict__:
792 if '_changectx' in self.__dict__:
793 return self._changectx.rev()
793 return self._changectx.rev()
794 elif '_descendantrev' in self.__dict__:
794 elif '_descendantrev' in self.__dict__:
795 # this file context was created from a revision with a known
795 # this file context was created from a revision with a known
796 # descendant, we can (lazily) correct for linkrev aliases
796 # descendant, we can (lazily) correct for linkrev aliases
797 return self._adjustlinkrev(self._descendantrev)
797 return self._adjustlinkrev(self._descendantrev)
798 else:
798 else:
799 return self._filelog.linkrev(self._filerev)
799 return self._filelog.linkrev(self._filerev)
800
800
801 @propertycache
801 @propertycache
802 def _filenode(self):
802 def _filenode(self):
803 if '_fileid' in self.__dict__:
803 if '_fileid' in self.__dict__:
804 return self._filelog.lookup(self._fileid)
804 return self._filelog.lookup(self._fileid)
805 else:
805 else:
806 return self._changectx.filenode(self._path)
806 return self._changectx.filenode(self._path)
807
807
808 @propertycache
808 @propertycache
809 def _filerev(self):
809 def _filerev(self):
810 return self._filelog.rev(self._filenode)
810 return self._filelog.rev(self._filenode)
811
811
812 @propertycache
812 @propertycache
813 def _repopath(self):
813 def _repopath(self):
814 return self._path
814 return self._path
815
815
816 def __nonzero__(self):
816 def __nonzero__(self):
817 try:
817 try:
818 self._filenode
818 self._filenode
819 return True
819 return True
820 except error.LookupError:
820 except error.LookupError:
821 # file is missing
821 # file is missing
822 return False
822 return False
823
823
824 __bool__ = __nonzero__
824 __bool__ = __nonzero__
825
825
826 def __bytes__(self):
826 def __bytes__(self):
827 try:
827 try:
828 return b"%s@%s" % (self.path(), self._changectx)
828 return b"%s@%s" % (self.path(), self._changectx)
829 except error.LookupError:
829 except error.LookupError:
830 return b"%s@???" % self.path()
830 return b"%s@???" % self.path()
831
831
832 __str__ = encoding.strmethod(__bytes__)
832 __str__ = encoding.strmethod(__bytes__)
833
833
834 def __repr__(self):
834 def __repr__(self):
835 return "<%s %s>" % (type(self).__name__, str(self))
835 return "<%s %s>" % (type(self).__name__, str(self))
836
836
837 def __hash__(self):
837 def __hash__(self):
838 try:
838 try:
839 return hash((self._path, self._filenode))
839 return hash((self._path, self._filenode))
840 except AttributeError:
840 except AttributeError:
841 return id(self)
841 return id(self)
842
842
843 def __eq__(self, other):
843 def __eq__(self, other):
844 try:
844 try:
845 return (
845 return (
846 type(self) == type(other)
846 type(self) == type(other)
847 and self._path == other._path
847 and self._path == other._path
848 and self._filenode == other._filenode
848 and self._filenode == other._filenode
849 )
849 )
850 except AttributeError:
850 except AttributeError:
851 return False
851 return False
852
852
853 def __ne__(self, other):
853 def __ne__(self, other):
854 return not (self == other)
854 return not (self == other)
855
855
856 def filerev(self):
856 def filerev(self):
857 return self._filerev
857 return self._filerev
858
858
859 def filenode(self):
859 def filenode(self):
860 return self._filenode
860 return self._filenode
861
861
862 @propertycache
862 @propertycache
863 def _flags(self):
863 def _flags(self):
864 return self._changectx.flags(self._path)
864 return self._changectx.flags(self._path)
865
865
866 def flags(self):
866 def flags(self):
867 return self._flags
867 return self._flags
868
868
869 def filelog(self):
869 def filelog(self):
870 return self._filelog
870 return self._filelog
871
871
872 def rev(self):
872 def rev(self):
873 return self._changeid
873 return self._changeid
874
874
875 def linkrev(self):
875 def linkrev(self):
876 return self._filelog.linkrev(self._filerev)
876 return self._filelog.linkrev(self._filerev)
877
877
878 def node(self):
878 def node(self):
879 return self._changectx.node()
879 return self._changectx.node()
880
880
881 def hex(self):
881 def hex(self):
882 return self._changectx.hex()
882 return self._changectx.hex()
883
883
884 def user(self):
884 def user(self):
885 return self._changectx.user()
885 return self._changectx.user()
886
886
887 def date(self):
887 def date(self):
888 return self._changectx.date()
888 return self._changectx.date()
889
889
890 def files(self):
890 def files(self):
891 return self._changectx.files()
891 return self._changectx.files()
892
892
893 def description(self):
893 def description(self):
894 return self._changectx.description()
894 return self._changectx.description()
895
895
896 def branch(self):
896 def branch(self):
897 return self._changectx.branch()
897 return self._changectx.branch()
898
898
899 def extra(self):
899 def extra(self):
900 return self._changectx.extra()
900 return self._changectx.extra()
901
901
902 def phase(self):
902 def phase(self):
903 return self._changectx.phase()
903 return self._changectx.phase()
904
904
905 def phasestr(self):
905 def phasestr(self):
906 return self._changectx.phasestr()
906 return self._changectx.phasestr()
907
907
908 def obsolete(self):
908 def obsolete(self):
909 return self._changectx.obsolete()
909 return self._changectx.obsolete()
910
910
911 def instabilities(self):
911 def instabilities(self):
912 return self._changectx.instabilities()
912 return self._changectx.instabilities()
913
913
914 def manifest(self):
914 def manifest(self):
915 return self._changectx.manifest()
915 return self._changectx.manifest()
916
916
917 def changectx(self):
917 def changectx(self):
918 return self._changectx
918 return self._changectx
919
919
920 def renamed(self):
920 def renamed(self):
921 return self._copied
921 return self._copied
922
922
923 def copysource(self):
923 def copysource(self):
924 return self._copied and self._copied[0]
924 return self._copied and self._copied[0]
925
925
926 def repo(self):
926 def repo(self):
927 return self._repo
927 return self._repo
928
928
929 def size(self):
929 def size(self):
930 return len(self.data())
930 return len(self.data())
931
931
932 def path(self):
932 def path(self):
933 return self._path
933 return self._path
934
934
935 def isbinary(self):
935 def isbinary(self):
936 try:
936 try:
937 return stringutil.binary(self.data())
937 return stringutil.binary(self.data())
938 except IOError:
938 except IOError:
939 return False
939 return False
940
940
941 def isexec(self):
941 def isexec(self):
942 return b'x' in self.flags()
942 return b'x' in self.flags()
943
943
944 def islink(self):
944 def islink(self):
945 return b'l' in self.flags()
945 return b'l' in self.flags()
946
946
947 def isabsent(self):
947 def isabsent(self):
948 """whether this filectx represents a file not in self._changectx
948 """whether this filectx represents a file not in self._changectx
949
949
950 This is mainly for merge code to detect change/delete conflicts. This is
950 This is mainly for merge code to detect change/delete conflicts. This is
951 expected to be True for all subclasses of basectx."""
951 expected to be True for all subclasses of basectx."""
952 return False
952 return False
953
953
954 _customcmp = False
954 _customcmp = False
955
955
956 def cmp(self, fctx):
956 def cmp(self, fctx):
957 """compare with other file context
957 """compare with other file context
958
958
959 returns True if different than fctx.
959 returns True if different than fctx.
960 """
960 """
961 if fctx._customcmp:
961 if fctx._customcmp:
962 return fctx.cmp(self)
962 return fctx.cmp(self)
963
963
964 if self._filenode is None:
964 if self._filenode is None:
965 raise error.ProgrammingError(
965 raise error.ProgrammingError(
966 b'filectx.cmp() must be reimplemented if not backed by revlog'
966 b'filectx.cmp() must be reimplemented if not backed by revlog'
967 )
967 )
968
968
969 if fctx._filenode is None:
969 if fctx._filenode is None:
970 if self._repo._encodefilterpats:
970 if self._repo._encodefilterpats:
971 # can't rely on size() because wdir content may be decoded
971 # can't rely on size() because wdir content may be decoded
972 return self._filelog.cmp(self._filenode, fctx.data())
972 return self._filelog.cmp(self._filenode, fctx.data())
973 if self.size() - 4 == fctx.size():
973 if self.size() - 4 == fctx.size():
974 # size() can match:
974 # size() can match:
975 # if file data starts with '\1\n', empty metadata block is
975 # if file data starts with '\1\n', empty metadata block is
976 # prepended, which adds 4 bytes to filelog.size().
976 # prepended, which adds 4 bytes to filelog.size().
977 return self._filelog.cmp(self._filenode, fctx.data())
977 return self._filelog.cmp(self._filenode, fctx.data())
978 if self.size() == fctx.size():
978 if self.size() == fctx.size():
979 # size() matches: need to compare content
979 # size() matches: need to compare content
980 return self._filelog.cmp(self._filenode, fctx.data())
980 return self._filelog.cmp(self._filenode, fctx.data())
981
981
982 # size() differs
982 # size() differs
983 return True
983 return True
984
984
985 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
985 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
986 """return the first ancestor of <srcrev> introducing <fnode>
986 """return the first ancestor of <srcrev> introducing <fnode>
987
987
988 If the linkrev of the file revision does not point to an ancestor of
988 If the linkrev of the file revision does not point to an ancestor of
989 srcrev, we'll walk down the ancestors until we find one introducing
989 srcrev, we'll walk down the ancestors until we find one introducing
990 this file revision.
990 this file revision.
991
991
992 :srcrev: the changeset revision we search ancestors from
992 :srcrev: the changeset revision we search ancestors from
993 :inclusive: if true, the src revision will also be checked
993 :inclusive: if true, the src revision will also be checked
994 :stoprev: an optional revision to stop the walk at. If no introduction
994 :stoprev: an optional revision to stop the walk at. If no introduction
995 of this file content could be found before this floor
995 of this file content could be found before this floor
996 revision, the function will returns "None" and stops its
996 revision, the function will returns "None" and stops its
997 iteration.
997 iteration.
998 """
998 """
999 repo = self._repo
999 repo = self._repo
1000 cl = repo.unfiltered().changelog
1000 cl = repo.unfiltered().changelog
1001 mfl = repo.manifestlog
1001 mfl = repo.manifestlog
1002 # fetch the linkrev
1002 # fetch the linkrev
1003 lkr = self.linkrev()
1003 lkr = self.linkrev()
1004 if srcrev == lkr:
1004 if srcrev == lkr:
1005 return lkr
1005 return lkr
1006 # hack to reuse ancestor computation when searching for renames
1006 # hack to reuse ancestor computation when searching for renames
1007 memberanc = getattr(self, '_ancestrycontext', None)
1007 memberanc = getattr(self, '_ancestrycontext', None)
1008 iteranc = None
1008 iteranc = None
1009 if srcrev is None:
1009 if srcrev is None:
1010 # wctx case, used by workingfilectx during mergecopy
1010 # wctx case, used by workingfilectx during mergecopy
1011 revs = [p.rev() for p in self._repo[None].parents()]
1011 revs = [p.rev() for p in self._repo[None].parents()]
1012 inclusive = True # we skipped the real (revless) source
1012 inclusive = True # we skipped the real (revless) source
1013 else:
1013 else:
1014 revs = [srcrev]
1014 revs = [srcrev]
1015 if memberanc is None:
1015 if memberanc is None:
1016 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1016 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1017 # check if this linkrev is an ancestor of srcrev
1017 # check if this linkrev is an ancestor of srcrev
1018 if lkr not in memberanc:
1018 if lkr not in memberanc:
1019 if iteranc is None:
1019 if iteranc is None:
1020 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1020 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1021 fnode = self._filenode
1021 fnode = self._filenode
1022 path = self._path
1022 path = self._path
1023 for a in iteranc:
1023 for a in iteranc:
1024 if stoprev is not None and a < stoprev:
1024 if stoprev is not None and a < stoprev:
1025 return None
1025 return None
1026 ac = cl.read(a) # get changeset data (we avoid object creation)
1026 ac = cl.read(a) # get changeset data (we avoid object creation)
1027 if path in ac[3]: # checking the 'files' field.
1027 if path in ac[3]: # checking the 'files' field.
1028 # The file has been touched, check if the content is
1028 # The file has been touched, check if the content is
1029 # similar to the one we search for.
1029 # similar to the one we search for.
1030 if fnode == mfl[ac[0]].readfast().get(path):
1030 if fnode == mfl[ac[0]].readfast().get(path):
1031 return a
1031 return a
1032 # In theory, we should never get out of that loop without a result.
1032 # In theory, we should never get out of that loop without a result.
1033 # But if manifest uses a buggy file revision (not children of the
1033 # But if manifest uses a buggy file revision (not children of the
1034 # one it replaces) we could. Such a buggy situation will likely
1034 # one it replaces) we could. Such a buggy situation will likely
1035 # result is crash somewhere else at to some point.
1035 # result is crash somewhere else at to some point.
1036 return lkr
1036 return lkr
1037
1037
1038 def isintroducedafter(self, changelogrev):
1038 def isintroducedafter(self, changelogrev):
1039 """True if a filectx has been introduced after a given floor revision
1039 """True if a filectx has been introduced after a given floor revision
1040 """
1040 """
1041 if self.linkrev() >= changelogrev:
1041 if self.linkrev() >= changelogrev:
1042 return True
1042 return True
1043 introrev = self._introrev(stoprev=changelogrev)
1043 introrev = self._introrev(stoprev=changelogrev)
1044 if introrev is None:
1044 if introrev is None:
1045 return False
1045 return False
1046 return introrev >= changelogrev
1046 return introrev >= changelogrev
1047
1047
1048 def introrev(self):
1048 def introrev(self):
1049 """return the rev of the changeset which introduced this file revision
1049 """return the rev of the changeset which introduced this file revision
1050
1050
1051 This method is different from linkrev because it take into account the
1051 This method is different from linkrev because it take into account the
1052 changeset the filectx was created from. It ensures the returned
1052 changeset the filectx was created from. It ensures the returned
1053 revision is one of its ancestors. This prevents bugs from
1053 revision is one of its ancestors. This prevents bugs from
1054 'linkrev-shadowing' when a file revision is used by multiple
1054 'linkrev-shadowing' when a file revision is used by multiple
1055 changesets.
1055 changesets.
1056 """
1056 """
1057 return self._introrev()
1057 return self._introrev()
1058
1058
1059 def _introrev(self, stoprev=None):
1059 def _introrev(self, stoprev=None):
1060 """
1060 """
1061 Same as `introrev` but, with an extra argument to limit changelog
1061 Same as `introrev` but, with an extra argument to limit changelog
1062 iteration range in some internal usecase.
1062 iteration range in some internal usecase.
1063
1063
1064 If `stoprev` is set, the `introrev` will not be searched past that
1064 If `stoprev` is set, the `introrev` will not be searched past that
1065 `stoprev` revision and "None" might be returned. This is useful to
1065 `stoprev` revision and "None" might be returned. This is useful to
1066 limit the iteration range.
1066 limit the iteration range.
1067 """
1067 """
1068 toprev = None
1068 toprev = None
1069 attrs = vars(self)
1069 attrs = vars(self)
1070 if '_changeid' in attrs:
1070 if '_changeid' in attrs:
1071 # We have a cached value already
1071 # We have a cached value already
1072 toprev = self._changeid
1072 toprev = self._changeid
1073 elif '_changectx' in attrs:
1073 elif '_changectx' in attrs:
1074 # We know which changelog entry we are coming from
1074 # We know which changelog entry we are coming from
1075 toprev = self._changectx.rev()
1075 toprev = self._changectx.rev()
1076
1076
1077 if toprev is not None:
1077 if toprev is not None:
1078 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1078 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1079 elif '_descendantrev' in attrs:
1079 elif '_descendantrev' in attrs:
1080 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1080 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1081 # be nice and cache the result of the computation
1081 # be nice and cache the result of the computation
1082 if introrev is not None:
1082 if introrev is not None:
1083 self._changeid = introrev
1083 self._changeid = introrev
1084 return introrev
1084 return introrev
1085 else:
1085 else:
1086 return self.linkrev()
1086 return self.linkrev()
1087
1087
1088 def introfilectx(self):
1088 def introfilectx(self):
1089 """Return filectx having identical contents, but pointing to the
1089 """Return filectx having identical contents, but pointing to the
1090 changeset revision where this filectx was introduced"""
1090 changeset revision where this filectx was introduced"""
1091 introrev = self.introrev()
1091 introrev = self.introrev()
1092 if self.rev() == introrev:
1092 if self.rev() == introrev:
1093 return self
1093 return self
1094 return self.filectx(self.filenode(), changeid=introrev)
1094 return self.filectx(self.filenode(), changeid=introrev)
1095
1095
1096 def _parentfilectx(self, path, fileid, filelog):
1096 def _parentfilectx(self, path, fileid, filelog):
1097 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1097 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1098 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1098 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1099 if '_changeid' in vars(self) or '_changectx' in vars(self):
1099 if '_changeid' in vars(self) or '_changectx' in vars(self):
1100 # If self is associated with a changeset (probably explicitly
1100 # If self is associated with a changeset (probably explicitly
1101 # fed), ensure the created filectx is associated with a
1101 # fed), ensure the created filectx is associated with a
1102 # changeset that is an ancestor of self.changectx.
1102 # changeset that is an ancestor of self.changectx.
1103 # This lets us later use _adjustlinkrev to get a correct link.
1103 # This lets us later use _adjustlinkrev to get a correct link.
1104 fctx._descendantrev = self.rev()
1104 fctx._descendantrev = self.rev()
1105 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1105 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1106 elif '_descendantrev' in vars(self):
1106 elif '_descendantrev' in vars(self):
1107 # Otherwise propagate _descendantrev if we have one associated.
1107 # Otherwise propagate _descendantrev if we have one associated.
1108 fctx._descendantrev = self._descendantrev
1108 fctx._descendantrev = self._descendantrev
1109 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1109 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1110 return fctx
1110 return fctx
1111
1111
1112 def parents(self):
1112 def parents(self):
1113 _path = self._path
1113 _path = self._path
1114 fl = self._filelog
1114 fl = self._filelog
1115 parents = self._filelog.parents(self._filenode)
1115 parents = self._filelog.parents(self._filenode)
1116 pl = [(_path, node, fl) for node in parents if node != nullid]
1116 pl = [(_path, node, fl) for node in parents if node != nullid]
1117
1117
1118 r = fl.renamed(self._filenode)
1118 r = fl.renamed(self._filenode)
1119 if r:
1119 if r:
1120 # - In the simple rename case, both parent are nullid, pl is empty.
1120 # - In the simple rename case, both parent are nullid, pl is empty.
1121 # - In case of merge, only one of the parent is null id and should
1121 # - In case of merge, only one of the parent is null id and should
1122 # be replaced with the rename information. This parent is -always-
1122 # be replaced with the rename information. This parent is -always-
1123 # the first one.
1123 # the first one.
1124 #
1124 #
1125 # As null id have always been filtered out in the previous list
1125 # As null id have always been filtered out in the previous list
1126 # comprehension, inserting to 0 will always result in "replacing
1126 # comprehension, inserting to 0 will always result in "replacing
1127 # first nullid parent with rename information.
1127 # first nullid parent with rename information.
1128 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1128 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1129
1129
1130 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1130 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1131
1131
1132 def p1(self):
1132 def p1(self):
1133 return self.parents()[0]
1133 return self.parents()[0]
1134
1134
1135 def p2(self):
1135 def p2(self):
1136 p = self.parents()
1136 p = self.parents()
1137 if len(p) == 2:
1137 if len(p) == 2:
1138 return p[1]
1138 return p[1]
1139 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1139 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1140
1140
1141 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1141 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1142 """Returns a list of annotateline objects for each line in the file
1142 """Returns a list of annotateline objects for each line in the file
1143
1143
1144 - line.fctx is the filectx of the node where that line was last changed
1144 - line.fctx is the filectx of the node where that line was last changed
1145 - line.lineno is the line number at the first appearance in the managed
1145 - line.lineno is the line number at the first appearance in the managed
1146 file
1146 file
1147 - line.text is the data on that line (including newline character)
1147 - line.text is the data on that line (including newline character)
1148 """
1148 """
1149 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1149 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1150
1150
1151 def parents(f):
1151 def parents(f):
1152 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1152 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1153 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1153 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1154 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1154 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1155 # isn't an ancestor of the srcrev.
1155 # isn't an ancestor of the srcrev.
1156 f._changeid
1156 f._changeid
1157 pl = f.parents()
1157 pl = f.parents()
1158
1158
1159 # Don't return renamed parents if we aren't following.
1159 # Don't return renamed parents if we aren't following.
1160 if not follow:
1160 if not follow:
1161 pl = [p for p in pl if p.path() == f.path()]
1161 pl = [p for p in pl if p.path() == f.path()]
1162
1162
1163 # renamed filectx won't have a filelog yet, so set it
1163 # renamed filectx won't have a filelog yet, so set it
1164 # from the cache to save time
1164 # from the cache to save time
1165 for p in pl:
1165 for p in pl:
1166 if not '_filelog' in p.__dict__:
1166 if not '_filelog' in p.__dict__:
1167 p._filelog = getlog(p.path())
1167 p._filelog = getlog(p.path())
1168
1168
1169 return pl
1169 return pl
1170
1170
1171 # use linkrev to find the first changeset where self appeared
1171 # use linkrev to find the first changeset where self appeared
1172 base = self.introfilectx()
1172 base = self.introfilectx()
1173 if getattr(base, '_ancestrycontext', None) is None:
1173 if getattr(base, '_ancestrycontext', None) is None:
1174 # it is safe to use an unfiltered repository here because we are
1174 # it is safe to use an unfiltered repository here because we are
1175 # walking ancestors only.
1175 # walking ancestors only.
1176 cl = self._repo.unfiltered().changelog
1176 cl = self._repo.unfiltered().changelog
1177 if base.rev() is None:
1177 if base.rev() is None:
1178 # wctx is not inclusive, but works because _ancestrycontext
1178 # wctx is not inclusive, but works because _ancestrycontext
1179 # is used to test filelog revisions
1179 # is used to test filelog revisions
1180 ac = cl.ancestors(
1180 ac = cl.ancestors(
1181 [p.rev() for p in base.parents()], inclusive=True
1181 [p.rev() for p in base.parents()], inclusive=True
1182 )
1182 )
1183 else:
1183 else:
1184 ac = cl.ancestors([base.rev()], inclusive=True)
1184 ac = cl.ancestors([base.rev()], inclusive=True)
1185 base._ancestrycontext = ac
1185 base._ancestrycontext = ac
1186
1186
1187 return dagop.annotate(
1187 return dagop.annotate(
1188 base, parents, skiprevs=skiprevs, diffopts=diffopts
1188 base, parents, skiprevs=skiprevs, diffopts=diffopts
1189 )
1189 )
1190
1190
1191 def ancestors(self, followfirst=False):
1191 def ancestors(self, followfirst=False):
1192 visit = {}
1192 visit = {}
1193 c = self
1193 c = self
1194 if followfirst:
1194 if followfirst:
1195 cut = 1
1195 cut = 1
1196 else:
1196 else:
1197 cut = None
1197 cut = None
1198
1198
1199 while True:
1199 while True:
1200 for parent in c.parents()[:cut]:
1200 for parent in c.parents()[:cut]:
1201 visit[(parent.linkrev(), parent.filenode())] = parent
1201 visit[(parent.linkrev(), parent.filenode())] = parent
1202 if not visit:
1202 if not visit:
1203 break
1203 break
1204 c = visit.pop(max(visit))
1204 c = visit.pop(max(visit))
1205 yield c
1205 yield c
1206
1206
1207 def decodeddata(self):
1207 def decodeddata(self):
1208 """Returns `data()` after running repository decoding filters.
1208 """Returns `data()` after running repository decoding filters.
1209
1209
1210 This is often equivalent to how the data would be expressed on disk.
1210 This is often equivalent to how the data would be expressed on disk.
1211 """
1211 """
1212 return self._repo.wwritedata(self.path(), self.data())
1212 return self._repo.wwritedata(self.path(), self.data())
1213
1213
1214
1214
1215 class filectx(basefilectx):
1215 class filectx(basefilectx):
1216 """A filecontext object makes access to data related to a particular
1216 """A filecontext object makes access to data related to a particular
1217 filerevision convenient."""
1217 filerevision convenient."""
1218
1218
1219 def __init__(
1219 def __init__(
1220 self,
1220 self,
1221 repo,
1221 repo,
1222 path,
1222 path,
1223 changeid=None,
1223 changeid=None,
1224 fileid=None,
1224 fileid=None,
1225 filelog=None,
1225 filelog=None,
1226 changectx=None,
1226 changectx=None,
1227 ):
1227 ):
1228 """changeid must be a revision number, if specified.
1228 """changeid must be a revision number, if specified.
1229 fileid can be a file revision or node."""
1229 fileid can be a file revision or node."""
1230 self._repo = repo
1230 self._repo = repo
1231 self._path = path
1231 self._path = path
1232
1232
1233 assert (
1233 assert (
1234 changeid is not None or fileid is not None or changectx is not None
1234 changeid is not None or fileid is not None or changectx is not None
1235 ), (
1235 ), (
1236 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1236 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1237 % (changeid, fileid, changectx,)
1237 % (changeid, fileid, changectx,)
1238 )
1238 )
1239
1239
1240 if filelog is not None:
1240 if filelog is not None:
1241 self._filelog = filelog
1241 self._filelog = filelog
1242
1242
1243 if changeid is not None:
1243 if changeid is not None:
1244 self._changeid = changeid
1244 self._changeid = changeid
1245 if changectx is not None:
1245 if changectx is not None:
1246 self._changectx = changectx
1246 self._changectx = changectx
1247 if fileid is not None:
1247 if fileid is not None:
1248 self._fileid = fileid
1248 self._fileid = fileid
1249
1249
1250 @propertycache
1250 @propertycache
1251 def _changectx(self):
1251 def _changectx(self):
1252 try:
1252 try:
1253 return self._repo[self._changeid]
1253 return self._repo[self._changeid]
1254 except error.FilteredRepoLookupError:
1254 except error.FilteredRepoLookupError:
1255 # Linkrev may point to any revision in the repository. When the
1255 # Linkrev may point to any revision in the repository. When the
1256 # repository is filtered this may lead to `filectx` trying to build
1256 # repository is filtered this may lead to `filectx` trying to build
1257 # `changectx` for filtered revision. In such case we fallback to
1257 # `changectx` for filtered revision. In such case we fallback to
1258 # creating `changectx` on the unfiltered version of the reposition.
1258 # creating `changectx` on the unfiltered version of the reposition.
1259 # This fallback should not be an issue because `changectx` from
1259 # This fallback should not be an issue because `changectx` from
1260 # `filectx` are not used in complex operations that care about
1260 # `filectx` are not used in complex operations that care about
1261 # filtering.
1261 # filtering.
1262 #
1262 #
1263 # This fallback is a cheap and dirty fix that prevent several
1263 # This fallback is a cheap and dirty fix that prevent several
1264 # crashes. It does not ensure the behavior is correct. However the
1264 # crashes. It does not ensure the behavior is correct. However the
1265 # behavior was not correct before filtering either and "incorrect
1265 # behavior was not correct before filtering either and "incorrect
1266 # behavior" is seen as better as "crash"
1266 # behavior" is seen as better as "crash"
1267 #
1267 #
1268 # Linkrevs have several serious troubles with filtering that are
1268 # Linkrevs have several serious troubles with filtering that are
1269 # complicated to solve. Proper handling of the issue here should be
1269 # complicated to solve. Proper handling of the issue here should be
1270 # considered when solving linkrev issue are on the table.
1270 # considered when solving linkrev issue are on the table.
1271 return self._repo.unfiltered()[self._changeid]
1271 return self._repo.unfiltered()[self._changeid]
1272
1272
1273 def filectx(self, fileid, changeid=None):
1273 def filectx(self, fileid, changeid=None):
1274 '''opens an arbitrary revision of the file without
1274 '''opens an arbitrary revision of the file without
1275 opening a new filelog'''
1275 opening a new filelog'''
1276 return filectx(
1276 return filectx(
1277 self._repo,
1277 self._repo,
1278 self._path,
1278 self._path,
1279 fileid=fileid,
1279 fileid=fileid,
1280 filelog=self._filelog,
1280 filelog=self._filelog,
1281 changeid=changeid,
1281 changeid=changeid,
1282 )
1282 )
1283
1283
1284 def rawdata(self):
1284 def rawdata(self):
1285 return self._filelog.rawdata(self._filenode)
1285 return self._filelog.rawdata(self._filenode)
1286
1286
1287 def rawflags(self):
1287 def rawflags(self):
1288 """low-level revlog flags"""
1288 """low-level revlog flags"""
1289 return self._filelog.flags(self._filerev)
1289 return self._filelog.flags(self._filerev)
1290
1290
1291 def data(self):
1291 def data(self):
1292 try:
1292 try:
1293 return self._filelog.read(self._filenode)
1293 return self._filelog.read(self._filenode)
1294 except error.CensoredNodeError:
1294 except error.CensoredNodeError:
1295 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1295 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1296 return b""
1296 return b""
1297 raise error.Abort(
1297 raise error.Abort(
1298 _(b"censored node: %s") % short(self._filenode),
1298 _(b"censored node: %s") % short(self._filenode),
1299 hint=_(b"set censor.policy to ignore errors"),
1299 hint=_(b"set censor.policy to ignore errors"),
1300 )
1300 )
1301
1301
1302 def size(self):
1302 def size(self):
1303 return self._filelog.size(self._filerev)
1303 return self._filelog.size(self._filerev)
1304
1304
1305 @propertycache
1305 @propertycache
1306 def _copied(self):
1306 def _copied(self):
1307 """check if file was actually renamed in this changeset revision
1307 """check if file was actually renamed in this changeset revision
1308
1308
1309 If rename logged in file revision, we report copy for changeset only
1309 If rename logged in file revision, we report copy for changeset only
1310 if file revisions linkrev points back to the changeset in question
1310 if file revisions linkrev points back to the changeset in question
1311 or both changeset parents contain different file revisions.
1311 or both changeset parents contain different file revisions.
1312 """
1312 """
1313
1313
1314 renamed = self._filelog.renamed(self._filenode)
1314 renamed = self._filelog.renamed(self._filenode)
1315 if not renamed:
1315 if not renamed:
1316 return None
1316 return None
1317
1317
1318 if self.rev() == self.linkrev():
1318 if self.rev() == self.linkrev():
1319 return renamed
1319 return renamed
1320
1320
1321 name = self.path()
1321 name = self.path()
1322 fnode = self._filenode
1322 fnode = self._filenode
1323 for p in self._changectx.parents():
1323 for p in self._changectx.parents():
1324 try:
1324 try:
1325 if fnode == p.filenode(name):
1325 if fnode == p.filenode(name):
1326 return None
1326 return None
1327 except error.LookupError:
1327 except error.LookupError:
1328 pass
1328 pass
1329 return renamed
1329 return renamed
1330
1330
1331 def children(self):
1331 def children(self):
1332 # hard for renames
1332 # hard for renames
1333 c = self._filelog.children(self._filenode)
1333 c = self._filelog.children(self._filenode)
1334 return [
1334 return [
1335 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1335 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1336 for x in c
1336 for x in c
1337 ]
1337 ]
1338
1338
1339
1339
1340 class committablectx(basectx):
1340 class committablectx(basectx):
1341 """A committablectx object provides common functionality for a context that
1341 """A committablectx object provides common functionality for a context that
1342 wants the ability to commit, e.g. workingctx or memctx."""
1342 wants the ability to commit, e.g. workingctx or memctx."""
1343
1343
1344 def __init__(
1344 def __init__(
1345 self,
1345 self,
1346 repo,
1346 repo,
1347 text=b"",
1347 text=b"",
1348 user=None,
1348 user=None,
1349 date=None,
1349 date=None,
1350 extra=None,
1350 extra=None,
1351 changes=None,
1351 changes=None,
1352 branch=None,
1352 branch=None,
1353 ):
1353 ):
1354 super(committablectx, self).__init__(repo)
1354 super(committablectx, self).__init__(repo)
1355 self._rev = None
1355 self._rev = None
1356 self._node = None
1356 self._node = None
1357 self._text = text
1357 self._text = text
1358 if date:
1358 if date:
1359 self._date = dateutil.parsedate(date)
1359 self._date = dateutil.parsedate(date)
1360 if user:
1360 if user:
1361 self._user = user
1361 self._user = user
1362 if changes:
1362 if changes:
1363 self._status = changes
1363 self._status = changes
1364
1364
1365 self._extra = {}
1365 self._extra = {}
1366 if extra:
1366 if extra:
1367 self._extra = extra.copy()
1367 self._extra = extra.copy()
1368 if branch is not None:
1368 if branch is not None:
1369 self._extra[b'branch'] = encoding.fromlocal(branch)
1369 self._extra[b'branch'] = encoding.fromlocal(branch)
1370 if not self._extra.get(b'branch'):
1370 if not self._extra.get(b'branch'):
1371 self._extra[b'branch'] = b'default'
1371 self._extra[b'branch'] = b'default'
1372
1372
1373 def __bytes__(self):
1373 def __bytes__(self):
1374 return bytes(self._parents[0]) + b"+"
1374 return bytes(self._parents[0]) + b"+"
1375
1375
1376 __str__ = encoding.strmethod(__bytes__)
1376 __str__ = encoding.strmethod(__bytes__)
1377
1377
1378 def __nonzero__(self):
1378 def __nonzero__(self):
1379 return True
1379 return True
1380
1380
1381 __bool__ = __nonzero__
1381 __bool__ = __nonzero__
1382
1382
1383 @propertycache
1383 @propertycache
1384 def _status(self):
1384 def _status(self):
1385 return self._repo.status()
1385 return self._repo.status()
1386
1386
1387 @propertycache
1387 @propertycache
1388 def _user(self):
1388 def _user(self):
1389 return self._repo.ui.username()
1389 return self._repo.ui.username()
1390
1390
1391 @propertycache
1391 @propertycache
1392 def _date(self):
1392 def _date(self):
1393 ui = self._repo.ui
1393 ui = self._repo.ui
1394 date = ui.configdate(b'devel', b'default-date')
1394 date = ui.configdate(b'devel', b'default-date')
1395 if date is None:
1395 if date is None:
1396 date = dateutil.makedate()
1396 date = dateutil.makedate()
1397 return date
1397 return date
1398
1398
1399 def subrev(self, subpath):
1399 def subrev(self, subpath):
1400 return None
1400 return None
1401
1401
1402 def manifestnode(self):
1402 def manifestnode(self):
1403 return None
1403 return None
1404
1404
1405 def user(self):
1405 def user(self):
1406 return self._user or self._repo.ui.username()
1406 return self._user or self._repo.ui.username()
1407
1407
1408 def date(self):
1408 def date(self):
1409 return self._date
1409 return self._date
1410
1410
1411 def description(self):
1411 def description(self):
1412 return self._text
1412 return self._text
1413
1413
1414 def files(self):
1414 def files(self):
1415 return sorted(
1415 return sorted(
1416 self._status.modified + self._status.added + self._status.removed
1416 self._status.modified + self._status.added + self._status.removed
1417 )
1417 )
1418
1418
1419 def modified(self):
1419 def modified(self):
1420 return self._status.modified
1420 return self._status.modified
1421
1421
1422 def added(self):
1422 def added(self):
1423 return self._status.added
1423 return self._status.added
1424
1424
1425 def removed(self):
1425 def removed(self):
1426 return self._status.removed
1426 return self._status.removed
1427
1427
1428 def deleted(self):
1428 def deleted(self):
1429 return self._status.deleted
1429 return self._status.deleted
1430
1430
1431 filesmodified = modified
1431 filesmodified = modified
1432 filesadded = added
1432 filesadded = added
1433 filesremoved = removed
1433 filesremoved = removed
1434
1434
1435 def branch(self):
1435 def branch(self):
1436 return encoding.tolocal(self._extra[b'branch'])
1436 return encoding.tolocal(self._extra[b'branch'])
1437
1437
1438 def closesbranch(self):
1438 def closesbranch(self):
1439 return b'close' in self._extra
1439 return b'close' in self._extra
1440
1440
1441 def extra(self):
1441 def extra(self):
1442 return self._extra
1442 return self._extra
1443
1443
1444 def isinmemory(self):
1444 def isinmemory(self):
1445 return False
1445 return False
1446
1446
1447 def tags(self):
1447 def tags(self):
1448 return []
1448 return []
1449
1449
1450 def bookmarks(self):
1450 def bookmarks(self):
1451 b = []
1451 b = []
1452 for p in self.parents():
1452 for p in self.parents():
1453 b.extend(p.bookmarks())
1453 b.extend(p.bookmarks())
1454 return b
1454 return b
1455
1455
1456 def phase(self):
1456 def phase(self):
1457 phase = phases.newcommitphase(self._repo.ui)
1457 phase = phases.newcommitphase(self._repo.ui)
1458 for p in self.parents():
1458 for p in self.parents():
1459 phase = max(phase, p.phase())
1459 phase = max(phase, p.phase())
1460 return phase
1460 return phase
1461
1461
1462 def hidden(self):
1462 def hidden(self):
1463 return False
1463 return False
1464
1464
1465 def children(self):
1465 def children(self):
1466 return []
1466 return []
1467
1467
1468 def flags(self, path):
1468 def flags(self, path):
1469 if '_manifest' in self.__dict__:
1469 if '_manifest' in self.__dict__:
1470 try:
1470 try:
1471 return self._manifest.flags(path)
1471 return self._manifest.flags(path)
1472 except KeyError:
1472 except KeyError:
1473 return b''
1473 return b''
1474
1474
1475 try:
1475 try:
1476 return self._flagfunc(path)
1476 return self._flagfunc(path)
1477 except OSError:
1477 except OSError:
1478 return b''
1478 return b''
1479
1479
1480 def ancestor(self, c2):
1480 def ancestor(self, c2):
1481 """return the "best" ancestor context of self and c2"""
1481 """return the "best" ancestor context of self and c2"""
1482 return self._parents[0].ancestor(c2) # punt on two parents for now
1482 return self._parents[0].ancestor(c2) # punt on two parents for now
1483
1483
1484 def ancestors(self):
1484 def ancestors(self):
1485 for p in self._parents:
1485 for p in self._parents:
1486 yield p
1486 yield p
1487 for a in self._repo.changelog.ancestors(
1487 for a in self._repo.changelog.ancestors(
1488 [p.rev() for p in self._parents]
1488 [p.rev() for p in self._parents]
1489 ):
1489 ):
1490 yield self._repo[a]
1490 yield self._repo[a]
1491
1491
1492 def markcommitted(self, node):
1492 def markcommitted(self, node):
1493 """Perform post-commit cleanup necessary after committing this ctx
1493 """Perform post-commit cleanup necessary after committing this ctx
1494
1494
1495 Specifically, this updates backing stores this working context
1495 Specifically, this updates backing stores this working context
1496 wraps to reflect the fact that the changes reflected by this
1496 wraps to reflect the fact that the changes reflected by this
1497 workingctx have been committed. For example, it marks
1497 workingctx have been committed. For example, it marks
1498 modified and added files as normal in the dirstate.
1498 modified and added files as normal in the dirstate.
1499
1499
1500 """
1500 """
1501
1501
1502 def dirty(self, missing=False, merge=True, branch=True):
1502 def dirty(self, missing=False, merge=True, branch=True):
1503 return False
1503 return False
1504
1504
1505
1505
1506 class workingctx(committablectx):
1506 class workingctx(committablectx):
1507 """A workingctx object makes access to data related to
1507 """A workingctx object makes access to data related to
1508 the current working directory convenient.
1508 the current working directory convenient.
1509 date - any valid date string or (unixtime, offset), or None.
1509 date - any valid date string or (unixtime, offset), or None.
1510 user - username string, or None.
1510 user - username string, or None.
1511 extra - a dictionary of extra values, or None.
1511 extra - a dictionary of extra values, or None.
1512 changes - a list of file lists as returned by localrepo.status()
1512 changes - a list of file lists as returned by localrepo.status()
1513 or None to use the repository status.
1513 or None to use the repository status.
1514 """
1514 """
1515
1515
1516 def __init__(
1516 def __init__(
1517 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1517 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1518 ):
1518 ):
1519 branch = None
1519 branch = None
1520 if not extra or b'branch' not in extra:
1520 if not extra or b'branch' not in extra:
1521 try:
1521 try:
1522 branch = repo.dirstate.branch()
1522 branch = repo.dirstate.branch()
1523 except UnicodeDecodeError:
1523 except UnicodeDecodeError:
1524 raise error.Abort(_(b'branch name not in UTF-8!'))
1524 raise error.Abort(_(b'branch name not in UTF-8!'))
1525 super(workingctx, self).__init__(
1525 super(workingctx, self).__init__(
1526 repo, text, user, date, extra, changes, branch=branch
1526 repo, text, user, date, extra, changes, branch=branch
1527 )
1527 )
1528
1528
1529 def __iter__(self):
1529 def __iter__(self):
1530 d = self._repo.dirstate
1530 d = self._repo.dirstate
1531 for f in d:
1531 for f in d:
1532 if d[f] != b'r':
1532 if d[f] != b'r':
1533 yield f
1533 yield f
1534
1534
1535 def __contains__(self, key):
1535 def __contains__(self, key):
1536 return self._repo.dirstate[key] not in b"?r"
1536 return self._repo.dirstate[key] not in b"?r"
1537
1537
1538 def hex(self):
1538 def hex(self):
1539 return wdirhex
1539 return wdirhex
1540
1540
1541 @propertycache
1541 @propertycache
1542 def _parents(self):
1542 def _parents(self):
1543 p = self._repo.dirstate.parents()
1543 p = self._repo.dirstate.parents()
1544 if p[1] == nullid:
1544 if p[1] == nullid:
1545 p = p[:-1]
1545 p = p[:-1]
1546 # use unfiltered repo to delay/avoid loading obsmarkers
1546 # use unfiltered repo to delay/avoid loading obsmarkers
1547 unfi = self._repo.unfiltered()
1547 unfi = self._repo.unfiltered()
1548 return [
1548 return [
1549 changectx(
1549 changectx(
1550 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1550 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1551 )
1551 )
1552 for n in p
1552 for n in p
1553 ]
1553 ]
1554
1554
1555 def setparents(self, p1node, p2node=nullid):
1555 def setparents(self, p1node, p2node=nullid):
1556 dirstate = self._repo.dirstate
1556 dirstate = self._repo.dirstate
1557 with dirstate.parentchange():
1557 with dirstate.parentchange():
1558 copies = dirstate.setparents(p1node, p2node)
1558 copies = dirstate.setparents(p1node, p2node)
1559 pctx = self._repo[p1node]
1559 pctx = self._repo[p1node]
1560 if copies:
1560 if copies:
1561 # Adjust copy records, the dirstate cannot do it, it
1561 # Adjust copy records, the dirstate cannot do it, it
1562 # requires access to parents manifests. Preserve them
1562 # requires access to parents manifests. Preserve them
1563 # only for entries added to first parent.
1563 # only for entries added to first parent.
1564 for f in copies:
1564 for f in copies:
1565 if f not in pctx and copies[f] in pctx:
1565 if f not in pctx and copies[f] in pctx:
1566 dirstate.copy(copies[f], f)
1566 dirstate.copy(copies[f], f)
1567 if p2node == nullid:
1567 if p2node == nullid:
1568 for f, s in sorted(dirstate.copies().items()):
1568 for f, s in sorted(dirstate.copies().items()):
1569 if f not in pctx and s not in pctx:
1569 if f not in pctx and s not in pctx:
1570 dirstate.copy(None, f)
1570 dirstate.copy(None, f)
1571
1571
1572 def _fileinfo(self, path):
1572 def _fileinfo(self, path):
1573 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1573 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1574 self._manifest
1574 self._manifest
1575 return super(workingctx, self)._fileinfo(path)
1575 return super(workingctx, self)._fileinfo(path)
1576
1576
1577 def _buildflagfunc(self):
1577 def _buildflagfunc(self):
1578 # Create a fallback function for getting file flags when the
1578 # Create a fallback function for getting file flags when the
1579 # filesystem doesn't support them
1579 # filesystem doesn't support them
1580
1580
1581 copiesget = self._repo.dirstate.copies().get
1581 copiesget = self._repo.dirstate.copies().get
1582 parents = self.parents()
1582 parents = self.parents()
1583 if len(parents) < 2:
1583 if len(parents) < 2:
1584 # when we have one parent, it's easy: copy from parent
1584 # when we have one parent, it's easy: copy from parent
1585 man = parents[0].manifest()
1585 man = parents[0].manifest()
1586
1586
1587 def func(f):
1587 def func(f):
1588 f = copiesget(f, f)
1588 f = copiesget(f, f)
1589 return man.flags(f)
1589 return man.flags(f)
1590
1590
1591 else:
1591 else:
1592 # merges are tricky: we try to reconstruct the unstored
1592 # merges are tricky: we try to reconstruct the unstored
1593 # result from the merge (issue1802)
1593 # result from the merge (issue1802)
1594 p1, p2 = parents
1594 p1, p2 = parents
1595 pa = p1.ancestor(p2)
1595 pa = p1.ancestor(p2)
1596 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1596 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1597
1597
1598 def func(f):
1598 def func(f):
1599 f = copiesget(f, f) # may be wrong for merges with copies
1599 f = copiesget(f, f) # may be wrong for merges with copies
1600 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1600 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1601 if fl1 == fl2:
1601 if fl1 == fl2:
1602 return fl1
1602 return fl1
1603 if fl1 == fla:
1603 if fl1 == fla:
1604 return fl2
1604 return fl2
1605 if fl2 == fla:
1605 if fl2 == fla:
1606 return fl1
1606 return fl1
1607 return b'' # punt for conflicts
1607 return b'' # punt for conflicts
1608
1608
1609 return func
1609 return func
1610
1610
1611 @propertycache
1611 @propertycache
1612 def _flagfunc(self):
1612 def _flagfunc(self):
1613 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1613 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1614
1614
1615 def flags(self, path):
1615 def flags(self, path):
1616 try:
1616 try:
1617 return self._flagfunc(path)
1617 return self._flagfunc(path)
1618 except OSError:
1618 except OSError:
1619 return b''
1619 return b''
1620
1620
1621 def filectx(self, path, filelog=None):
1621 def filectx(self, path, filelog=None):
1622 """get a file context from the working directory"""
1622 """get a file context from the working directory"""
1623 return workingfilectx(
1623 return workingfilectx(
1624 self._repo, path, workingctx=self, filelog=filelog
1624 self._repo, path, workingctx=self, filelog=filelog
1625 )
1625 )
1626
1626
1627 def dirty(self, missing=False, merge=True, branch=True):
1627 def dirty(self, missing=False, merge=True, branch=True):
1628 """check whether a working directory is modified"""
1628 """check whether a working directory is modified"""
1629 # check subrepos first
1629 # check subrepos first
1630 for s in sorted(self.substate):
1630 for s in sorted(self.substate):
1631 if self.sub(s).dirty(missing=missing):
1631 if self.sub(s).dirty(missing=missing):
1632 return True
1632 return True
1633 # check current working dir
1633 # check current working dir
1634 return (
1634 return (
1635 (merge and self.p2())
1635 (merge and self.p2())
1636 or (branch and self.branch() != self.p1().branch())
1636 or (branch and self.branch() != self.p1().branch())
1637 or self.modified()
1637 or self.modified()
1638 or self.added()
1638 or self.added()
1639 or self.removed()
1639 or self.removed()
1640 or (missing and self.deleted())
1640 or (missing and self.deleted())
1641 )
1641 )
1642
1642
1643 def add(self, list, prefix=b""):
1643 def add(self, list, prefix=b""):
1644 with self._repo.wlock():
1644 with self._repo.wlock():
1645 ui, ds = self._repo.ui, self._repo.dirstate
1645 ui, ds = self._repo.ui, self._repo.dirstate
1646 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1646 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1647 rejected = []
1647 rejected = []
1648 lstat = self._repo.wvfs.lstat
1648 lstat = self._repo.wvfs.lstat
1649 for f in list:
1649 for f in list:
1650 # ds.pathto() returns an absolute file when this is invoked from
1650 # ds.pathto() returns an absolute file when this is invoked from
1651 # the keyword extension. That gets flagged as non-portable on
1651 # the keyword extension. That gets flagged as non-portable on
1652 # Windows, since it contains the drive letter and colon.
1652 # Windows, since it contains the drive letter and colon.
1653 scmutil.checkportable(ui, os.path.join(prefix, f))
1653 scmutil.checkportable(ui, os.path.join(prefix, f))
1654 try:
1654 try:
1655 st = lstat(f)
1655 st = lstat(f)
1656 except OSError:
1656 except OSError:
1657 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1657 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1658 rejected.append(f)
1658 rejected.append(f)
1659 continue
1659 continue
1660 limit = ui.configbytes(b'ui', b'large-file-limit')
1660 limit = ui.configbytes(b'ui', b'large-file-limit')
1661 if limit != 0 and st.st_size > limit:
1661 if limit != 0 and st.st_size > limit:
1662 ui.warn(
1662 ui.warn(
1663 _(
1663 _(
1664 b"%s: up to %d MB of RAM may be required "
1664 b"%s: up to %d MB of RAM may be required "
1665 b"to manage this file\n"
1665 b"to manage this file\n"
1666 b"(use 'hg revert %s' to cancel the "
1666 b"(use 'hg revert %s' to cancel the "
1667 b"pending addition)\n"
1667 b"pending addition)\n"
1668 )
1668 )
1669 % (f, 3 * st.st_size // 1000000, uipath(f))
1669 % (f, 3 * st.st_size // 1000000, uipath(f))
1670 )
1670 )
1671 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1671 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1672 ui.warn(
1672 ui.warn(
1673 _(
1673 _(
1674 b"%s not added: only files and symlinks "
1674 b"%s not added: only files and symlinks "
1675 b"supported currently\n"
1675 b"supported currently\n"
1676 )
1676 )
1677 % uipath(f)
1677 % uipath(f)
1678 )
1678 )
1679 rejected.append(f)
1679 rejected.append(f)
1680 elif ds[f] in b'amn':
1680 elif ds[f] in b'amn':
1681 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1681 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1682 elif ds[f] == b'r':
1682 elif ds[f] == b'r':
1683 ds.normallookup(f)
1683 ds.normallookup(f)
1684 else:
1684 else:
1685 ds.add(f)
1685 ds.add(f)
1686 return rejected
1686 return rejected
1687
1687
1688 def forget(self, files, prefix=b""):
1688 def forget(self, files, prefix=b""):
1689 with self._repo.wlock():
1689 with self._repo.wlock():
1690 ds = self._repo.dirstate
1690 ds = self._repo.dirstate
1691 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1691 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1692 rejected = []
1692 rejected = []
1693 for f in files:
1693 for f in files:
1694 if f not in ds:
1694 if f not in ds:
1695 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1695 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1696 rejected.append(f)
1696 rejected.append(f)
1697 elif ds[f] != b'a':
1697 elif ds[f] != b'a':
1698 ds.remove(f)
1698 ds.remove(f)
1699 else:
1699 else:
1700 ds.drop(f)
1700 ds.drop(f)
1701 return rejected
1701 return rejected
1702
1702
1703 def copy(self, source, dest):
1703 def copy(self, source, dest):
1704 try:
1704 try:
1705 st = self._repo.wvfs.lstat(dest)
1705 st = self._repo.wvfs.lstat(dest)
1706 except OSError as err:
1706 except OSError as err:
1707 if err.errno != errno.ENOENT:
1707 if err.errno != errno.ENOENT:
1708 raise
1708 raise
1709 self._repo.ui.warn(
1709 self._repo.ui.warn(
1710 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1710 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1711 )
1711 )
1712 return
1712 return
1713 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1713 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1714 self._repo.ui.warn(
1714 self._repo.ui.warn(
1715 _(b"copy failed: %s is not a file or a symbolic link\n")
1715 _(b"copy failed: %s is not a file or a symbolic link\n")
1716 % self._repo.dirstate.pathto(dest)
1716 % self._repo.dirstate.pathto(dest)
1717 )
1717 )
1718 else:
1718 else:
1719 with self._repo.wlock():
1719 with self._repo.wlock():
1720 ds = self._repo.dirstate
1720 ds = self._repo.dirstate
1721 if ds[dest] in b'?':
1721 if ds[dest] in b'?':
1722 ds.add(dest)
1722 ds.add(dest)
1723 elif ds[dest] in b'r':
1723 elif ds[dest] in b'r':
1724 ds.normallookup(dest)
1724 ds.normallookup(dest)
1725 ds.copy(source, dest)
1725 ds.copy(source, dest)
1726
1726
1727 def match(
1727 def match(
1728 self,
1728 self,
1729 pats=None,
1729 pats=None,
1730 include=None,
1730 include=None,
1731 exclude=None,
1731 exclude=None,
1732 default=b'glob',
1732 default=b'glob',
1733 listsubrepos=False,
1733 listsubrepos=False,
1734 badfn=None,
1734 badfn=None,
1735 cwd=None,
1735 cwd=None,
1736 ):
1736 ):
1737 r = self._repo
1737 r = self._repo
1738 if not cwd:
1738 if not cwd:
1739 cwd = r.getcwd()
1739 cwd = r.getcwd()
1740
1740
1741 # Only a case insensitive filesystem needs magic to translate user input
1741 # Only a case insensitive filesystem needs magic to translate user input
1742 # to actual case in the filesystem.
1742 # to actual case in the filesystem.
1743 icasefs = not util.fscasesensitive(r.root)
1743 icasefs = not util.fscasesensitive(r.root)
1744 return matchmod.match(
1744 return matchmod.match(
1745 r.root,
1745 r.root,
1746 cwd,
1746 cwd,
1747 pats,
1747 pats,
1748 include,
1748 include,
1749 exclude,
1749 exclude,
1750 default,
1750 default,
1751 auditor=r.auditor,
1751 auditor=r.auditor,
1752 ctx=self,
1752 ctx=self,
1753 listsubrepos=listsubrepos,
1753 listsubrepos=listsubrepos,
1754 badfn=badfn,
1754 badfn=badfn,
1755 icasefs=icasefs,
1755 icasefs=icasefs,
1756 )
1756 )
1757
1757
1758 def _filtersuspectsymlink(self, files):
1758 def _filtersuspectsymlink(self, files):
1759 if not files or self._repo.dirstate._checklink:
1759 if not files or self._repo.dirstate._checklink:
1760 return files
1760 return files
1761
1761
1762 # Symlink placeholders may get non-symlink-like contents
1762 # Symlink placeholders may get non-symlink-like contents
1763 # via user error or dereferencing by NFS or Samba servers,
1763 # via user error or dereferencing by NFS or Samba servers,
1764 # so we filter out any placeholders that don't look like a
1764 # so we filter out any placeholders that don't look like a
1765 # symlink
1765 # symlink
1766 sane = []
1766 sane = []
1767 for f in files:
1767 for f in files:
1768 if self.flags(f) == b'l':
1768 if self.flags(f) == b'l':
1769 d = self[f].data()
1769 d = self[f].data()
1770 if (
1770 if (
1771 d == b''
1771 d == b''
1772 or len(d) >= 1024
1772 or len(d) >= 1024
1773 or b'\n' in d
1773 or b'\n' in d
1774 or stringutil.binary(d)
1774 or stringutil.binary(d)
1775 ):
1775 ):
1776 self._repo.ui.debug(
1776 self._repo.ui.debug(
1777 b'ignoring suspect symlink placeholder "%s"\n' % f
1777 b'ignoring suspect symlink placeholder "%s"\n' % f
1778 )
1778 )
1779 continue
1779 continue
1780 sane.append(f)
1780 sane.append(f)
1781 return sane
1781 return sane
1782
1782
1783 def _checklookup(self, files):
1783 def _checklookup(self, files):
1784 # check for any possibly clean files
1784 # check for any possibly clean files
1785 if not files:
1785 if not files:
1786 return [], [], []
1786 return [], [], []
1787
1787
1788 modified = []
1788 modified = []
1789 deleted = []
1789 deleted = []
1790 fixup = []
1790 fixup = []
1791 pctx = self._parents[0]
1791 pctx = self._parents[0]
1792 # do a full compare of any files that might have changed
1792 # do a full compare of any files that might have changed
1793 for f in sorted(files):
1793 for f in sorted(files):
1794 try:
1794 try:
1795 # This will return True for a file that got replaced by a
1795 # This will return True for a file that got replaced by a
1796 # directory in the interim, but fixing that is pretty hard.
1796 # directory in the interim, but fixing that is pretty hard.
1797 if (
1797 if (
1798 f not in pctx
1798 f not in pctx
1799 or self.flags(f) != pctx.flags(f)
1799 or self.flags(f) != pctx.flags(f)
1800 or pctx[f].cmp(self[f])
1800 or pctx[f].cmp(self[f])
1801 ):
1801 ):
1802 modified.append(f)
1802 modified.append(f)
1803 else:
1803 else:
1804 fixup.append(f)
1804 fixup.append(f)
1805 except (IOError, OSError):
1805 except (IOError, OSError):
1806 # A file become inaccessible in between? Mark it as deleted,
1806 # A file become inaccessible in between? Mark it as deleted,
1807 # matching dirstate behavior (issue5584).
1807 # matching dirstate behavior (issue5584).
1808 # The dirstate has more complex behavior around whether a
1808 # The dirstate has more complex behavior around whether a
1809 # missing file matches a directory, etc, but we don't need to
1809 # missing file matches a directory, etc, but we don't need to
1810 # bother with that: if f has made it to this point, we're sure
1810 # bother with that: if f has made it to this point, we're sure
1811 # it's in the dirstate.
1811 # it's in the dirstate.
1812 deleted.append(f)
1812 deleted.append(f)
1813
1813
1814 return modified, deleted, fixup
1814 return modified, deleted, fixup
1815
1815
1816 def _poststatusfixup(self, status, fixup):
1816 def _poststatusfixup(self, status, fixup):
1817 """update dirstate for files that are actually clean"""
1817 """update dirstate for files that are actually clean"""
1818 poststatus = self._repo.postdsstatus()
1818 poststatus = self._repo.postdsstatus()
1819 if fixup or poststatus:
1819 if fixup or poststatus:
1820 try:
1820 try:
1821 oldid = self._repo.dirstate.identity()
1821 oldid = self._repo.dirstate.identity()
1822
1822
1823 # updating the dirstate is optional
1823 # updating the dirstate is optional
1824 # so we don't wait on the lock
1824 # so we don't wait on the lock
1825 # wlock can invalidate the dirstate, so cache normal _after_
1825 # wlock can invalidate the dirstate, so cache normal _after_
1826 # taking the lock
1826 # taking the lock
1827 with self._repo.wlock(False):
1827 with self._repo.wlock(False):
1828 if self._repo.dirstate.identity() == oldid:
1828 if self._repo.dirstate.identity() == oldid:
1829 if fixup:
1829 if fixup:
1830 normal = self._repo.dirstate.normal
1830 normal = self._repo.dirstate.normal
1831 for f in fixup:
1831 for f in fixup:
1832 normal(f)
1832 normal(f)
1833 # write changes out explicitly, because nesting
1833 # write changes out explicitly, because nesting
1834 # wlock at runtime may prevent 'wlock.release()'
1834 # wlock at runtime may prevent 'wlock.release()'
1835 # after this block from doing so for subsequent
1835 # after this block from doing so for subsequent
1836 # changing files
1836 # changing files
1837 tr = self._repo.currenttransaction()
1837 tr = self._repo.currenttransaction()
1838 self._repo.dirstate.write(tr)
1838 self._repo.dirstate.write(tr)
1839
1839
1840 if poststatus:
1840 if poststatus:
1841 for ps in poststatus:
1841 for ps in poststatus:
1842 ps(self, status)
1842 ps(self, status)
1843 else:
1843 else:
1844 # in this case, writing changes out breaks
1844 # in this case, writing changes out breaks
1845 # consistency, because .hg/dirstate was
1845 # consistency, because .hg/dirstate was
1846 # already changed simultaneously after last
1846 # already changed simultaneously after last
1847 # caching (see also issue5584 for detail)
1847 # caching (see also issue5584 for detail)
1848 self._repo.ui.debug(
1848 self._repo.ui.debug(
1849 b'skip updating dirstate: identity mismatch\n'
1849 b'skip updating dirstate: identity mismatch\n'
1850 )
1850 )
1851 except error.LockError:
1851 except error.LockError:
1852 pass
1852 pass
1853 finally:
1853 finally:
1854 # Even if the wlock couldn't be grabbed, clear out the list.
1854 # Even if the wlock couldn't be grabbed, clear out the list.
1855 self._repo.clearpostdsstatus()
1855 self._repo.clearpostdsstatus()
1856
1856
1857 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1857 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1858 '''Gets the status from the dirstate -- internal use only.'''
1858 '''Gets the status from the dirstate -- internal use only.'''
1859 subrepos = []
1859 subrepos = []
1860 if b'.hgsub' in self:
1860 if b'.hgsub' in self:
1861 subrepos = sorted(self.substate)
1861 subrepos = sorted(self.substate)
1862 cmp, s = self._repo.dirstate.status(
1862 cmp, s = self._repo.dirstate.status(
1863 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1863 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1864 )
1864 )
1865
1865
1866 # check for any possibly clean files
1866 # check for any possibly clean files
1867 fixup = []
1867 fixup = []
1868 if cmp:
1868 if cmp:
1869 modified2, deleted2, fixup = self._checklookup(cmp)
1869 modified2, deleted2, fixup = self._checklookup(cmp)
1870 s.modified.extend(modified2)
1870 s.modified.extend(modified2)
1871 s.deleted.extend(deleted2)
1871 s.deleted.extend(deleted2)
1872
1872
1873 if fixup and clean:
1873 if fixup and clean:
1874 s.clean.extend(fixup)
1874 s.clean.extend(fixup)
1875
1875
1876 self._poststatusfixup(s, fixup)
1876 self._poststatusfixup(s, fixup)
1877
1877
1878 if match.always():
1878 if match.always():
1879 # cache for performance
1879 # cache for performance
1880 if s.unknown or s.ignored or s.clean:
1880 if s.unknown or s.ignored or s.clean:
1881 # "_status" is cached with list*=False in the normal route
1881 # "_status" is cached with list*=False in the normal route
1882 self._status = scmutil.status(
1882 self._status = scmutil.status(
1883 s.modified, s.added, s.removed, s.deleted, [], [], []
1883 s.modified, s.added, s.removed, s.deleted, [], [], []
1884 )
1884 )
1885 else:
1885 else:
1886 self._status = s
1886 self._status = s
1887
1887
1888 return s
1888 return s
1889
1889
1890 @propertycache
1890 @propertycache
1891 def _copies(self):
1891 def _copies(self):
1892 p1copies = {}
1892 p1copies = {}
1893 p2copies = {}
1893 p2copies = {}
1894 parents = self._repo.dirstate.parents()
1894 parents = self._repo.dirstate.parents()
1895 p1manifest = self._repo[parents[0]].manifest()
1895 p1manifest = self._repo[parents[0]].manifest()
1896 p2manifest = self._repo[parents[1]].manifest()
1896 p2manifest = self._repo[parents[1]].manifest()
1897 changedset = set(self.added()) | set(self.modified())
1897 changedset = set(self.added()) | set(self.modified())
1898 narrowmatch = self._repo.narrowmatch()
1898 narrowmatch = self._repo.narrowmatch()
1899 for dst, src in self._repo.dirstate.copies().items():
1899 for dst, src in self._repo.dirstate.copies().items():
1900 if dst not in changedset or not narrowmatch(dst):
1900 if dst not in changedset or not narrowmatch(dst):
1901 continue
1901 continue
1902 if src in p1manifest:
1902 if src in p1manifest:
1903 p1copies[dst] = src
1903 p1copies[dst] = src
1904 elif src in p2manifest:
1904 elif src in p2manifest:
1905 p2copies[dst] = src
1905 p2copies[dst] = src
1906 return p1copies, p2copies
1906 return p1copies, p2copies
1907
1907
1908 @propertycache
1908 @propertycache
1909 def _manifest(self):
1909 def _manifest(self):
1910 """generate a manifest corresponding to the values in self._status
1910 """generate a manifest corresponding to the values in self._status
1911
1911
1912 This reuse the file nodeid from parent, but we use special node
1912 This reuse the file nodeid from parent, but we use special node
1913 identifiers for added and modified files. This is used by manifests
1913 identifiers for added and modified files. This is used by manifests
1914 merge to see that files are different and by update logic to avoid
1914 merge to see that files are different and by update logic to avoid
1915 deleting newly added files.
1915 deleting newly added files.
1916 """
1916 """
1917 return self._buildstatusmanifest(self._status)
1917 return self._buildstatusmanifest(self._status)
1918
1918
1919 def _buildstatusmanifest(self, status):
1919 def _buildstatusmanifest(self, status):
1920 """Builds a manifest that includes the given status results."""
1920 """Builds a manifest that includes the given status results."""
1921 parents = self.parents()
1921 parents = self.parents()
1922
1922
1923 man = parents[0].manifest().copy()
1923 man = parents[0].manifest().copy()
1924
1924
1925 ff = self._flagfunc
1925 ff = self._flagfunc
1926 for i, l in (
1926 for i, l in (
1927 (addednodeid, status.added),
1927 (addednodeid, status.added),
1928 (modifiednodeid, status.modified),
1928 (modifiednodeid, status.modified),
1929 ):
1929 ):
1930 for f in l:
1930 for f in l:
1931 man[f] = i
1931 man[f] = i
1932 try:
1932 try:
1933 man.setflag(f, ff(f))
1933 man.setflag(f, ff(f))
1934 except OSError:
1934 except OSError:
1935 pass
1935 pass
1936
1936
1937 for f in status.deleted + status.removed:
1937 for f in status.deleted + status.removed:
1938 if f in man:
1938 if f in man:
1939 del man[f]
1939 del man[f]
1940
1940
1941 return man
1941 return man
1942
1942
1943 def _buildstatus(
1943 def _buildstatus(
1944 self, other, s, match, listignored, listclean, listunknown
1944 self, other, s, match, listignored, listclean, listunknown
1945 ):
1945 ):
1946 """build a status with respect to another context
1946 """build a status with respect to another context
1947
1947
1948 This includes logic for maintaining the fast path of status when
1948 This includes logic for maintaining the fast path of status when
1949 comparing the working directory against its parent, which is to skip
1949 comparing the working directory against its parent, which is to skip
1950 building a new manifest if self (working directory) is not comparing
1950 building a new manifest if self (working directory) is not comparing
1951 against its parent (repo['.']).
1951 against its parent (repo['.']).
1952 """
1952 """
1953 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1953 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1954 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1954 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1955 # might have accidentally ended up with the entire contents of the file
1955 # might have accidentally ended up with the entire contents of the file
1956 # they are supposed to be linking to.
1956 # they are supposed to be linking to.
1957 s.modified[:] = self._filtersuspectsymlink(s.modified)
1957 s.modified[:] = self._filtersuspectsymlink(s.modified)
1958 if other != self._repo[b'.']:
1958 if other != self._repo[b'.']:
1959 s = super(workingctx, self)._buildstatus(
1959 s = super(workingctx, self)._buildstatus(
1960 other, s, match, listignored, listclean, listunknown
1960 other, s, match, listignored, listclean, listunknown
1961 )
1961 )
1962 return s
1962 return s
1963
1963
1964 def _matchstatus(self, other, match):
1964 def _matchstatus(self, other, match):
1965 """override the match method with a filter for directory patterns
1965 """override the match method with a filter for directory patterns
1966
1966
1967 We use inheritance to customize the match.bad method only in cases of
1967 We use inheritance to customize the match.bad method only in cases of
1968 workingctx since it belongs only to the working directory when
1968 workingctx since it belongs only to the working directory when
1969 comparing against the parent changeset.
1969 comparing against the parent changeset.
1970
1970
1971 If we aren't comparing against the working directory's parent, then we
1971 If we aren't comparing against the working directory's parent, then we
1972 just use the default match object sent to us.
1972 just use the default match object sent to us.
1973 """
1973 """
1974 if other != self._repo[b'.']:
1974 if other != self._repo[b'.']:
1975
1975
1976 def bad(f, msg):
1976 def bad(f, msg):
1977 # 'f' may be a directory pattern from 'match.files()',
1977 # 'f' may be a directory pattern from 'match.files()',
1978 # so 'f not in ctx1' is not enough
1978 # so 'f not in ctx1' is not enough
1979 if f not in other and not other.hasdir(f):
1979 if f not in other and not other.hasdir(f):
1980 self._repo.ui.warn(
1980 self._repo.ui.warn(
1981 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1981 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1982 )
1982 )
1983
1983
1984 match.bad = bad
1984 match.bad = bad
1985 return match
1985 return match
1986
1986
1987 def walk(self, match):
1987 def walk(self, match):
1988 '''Generates matching file names.'''
1988 '''Generates matching file names.'''
1989 return sorted(
1989 return sorted(
1990 self._repo.dirstate.walk(
1990 self._repo.dirstate.walk(
1991 self._repo.narrowmatch(match),
1991 self._repo.narrowmatch(match),
1992 subrepos=sorted(self.substate),
1992 subrepos=sorted(self.substate),
1993 unknown=True,
1993 unknown=True,
1994 ignored=False,
1994 ignored=False,
1995 )
1995 )
1996 )
1996 )
1997
1997
1998 def matches(self, match):
1998 def matches(self, match):
1999 match = self._repo.narrowmatch(match)
1999 match = self._repo.narrowmatch(match)
2000 ds = self._repo.dirstate
2000 ds = self._repo.dirstate
2001 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2001 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2002
2002
2003 def markcommitted(self, node):
2003 def markcommitted(self, node):
2004 with self._repo.dirstate.parentchange():
2004 with self._repo.dirstate.parentchange():
2005 for f in self.modified() + self.added():
2005 for f in self.modified() + self.added():
2006 self._repo.dirstate.normal(f)
2006 self._repo.dirstate.normal(f)
2007 for f in self.removed():
2007 for f in self.removed():
2008 self._repo.dirstate.drop(f)
2008 self._repo.dirstate.drop(f)
2009 self._repo.dirstate.setparents(node)
2009 self._repo.dirstate.setparents(node)
2010 self._repo._quick_access_changeid_invalidate()
2010 self._repo._quick_access_changeid_invalidate()
2011
2011
2012 # write changes out explicitly, because nesting wlock at
2012 # write changes out explicitly, because nesting wlock at
2013 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2013 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2014 # from immediately doing so for subsequent changing files
2014 # from immediately doing so for subsequent changing files
2015 self._repo.dirstate.write(self._repo.currenttransaction())
2015 self._repo.dirstate.write(self._repo.currenttransaction())
2016
2016
2017 sparse.aftercommit(self._repo, node)
2017 sparse.aftercommit(self._repo, node)
2018
2018
2019 def mergestate(self, clean=False):
2019 def mergestate(self, clean=False):
2020 if clean:
2020 if clean:
2021 return mergestatemod.mergestate.clean(self._repo)
2021 return mergestatemod.mergestate.clean(self._repo)
2022 return mergestatemod.mergestate.read(self._repo)
2022 return mergestatemod.mergestate.read(self._repo)
2023
2023
2024
2024
2025 class committablefilectx(basefilectx):
2025 class committablefilectx(basefilectx):
2026 """A committablefilectx provides common functionality for a file context
2026 """A committablefilectx provides common functionality for a file context
2027 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2027 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2028
2028
2029 def __init__(self, repo, path, filelog=None, ctx=None):
2029 def __init__(self, repo, path, filelog=None, ctx=None):
2030 self._repo = repo
2030 self._repo = repo
2031 self._path = path
2031 self._path = path
2032 self._changeid = None
2032 self._changeid = None
2033 self._filerev = self._filenode = None
2033 self._filerev = self._filenode = None
2034
2034
2035 if filelog is not None:
2035 if filelog is not None:
2036 self._filelog = filelog
2036 self._filelog = filelog
2037 if ctx:
2037 if ctx:
2038 self._changectx = ctx
2038 self._changectx = ctx
2039
2039
2040 def __nonzero__(self):
2040 def __nonzero__(self):
2041 return True
2041 return True
2042
2042
2043 __bool__ = __nonzero__
2043 __bool__ = __nonzero__
2044
2044
2045 def linkrev(self):
2045 def linkrev(self):
2046 # linked to self._changectx no matter if file is modified or not
2046 # linked to self._changectx no matter if file is modified or not
2047 return self.rev()
2047 return self.rev()
2048
2048
2049 def renamed(self):
2049 def renamed(self):
2050 path = self.copysource()
2050 path = self.copysource()
2051 if not path:
2051 if not path:
2052 return None
2052 return None
2053 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2053 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2054
2054
2055 def parents(self):
2055 def parents(self):
2056 '''return parent filectxs, following copies if necessary'''
2056 '''return parent filectxs, following copies if necessary'''
2057
2057
2058 def filenode(ctx, path):
2058 def filenode(ctx, path):
2059 return ctx._manifest.get(path, nullid)
2059 return ctx._manifest.get(path, nullid)
2060
2060
2061 path = self._path
2061 path = self._path
2062 fl = self._filelog
2062 fl = self._filelog
2063 pcl = self._changectx._parents
2063 pcl = self._changectx._parents
2064 renamed = self.renamed()
2064 renamed = self.renamed()
2065
2065
2066 if renamed:
2066 if renamed:
2067 pl = [renamed + (None,)]
2067 pl = [renamed + (None,)]
2068 else:
2068 else:
2069 pl = [(path, filenode(pcl[0], path), fl)]
2069 pl = [(path, filenode(pcl[0], path), fl)]
2070
2070
2071 for pc in pcl[1:]:
2071 for pc in pcl[1:]:
2072 pl.append((path, filenode(pc, path), fl))
2072 pl.append((path, filenode(pc, path), fl))
2073
2073
2074 return [
2074 return [
2075 self._parentfilectx(p, fileid=n, filelog=l)
2075 self._parentfilectx(p, fileid=n, filelog=l)
2076 for p, n, l in pl
2076 for p, n, l in pl
2077 if n != nullid
2077 if n != nullid
2078 ]
2078 ]
2079
2079
2080 def children(self):
2080 def children(self):
2081 return []
2081 return []
2082
2082
2083
2083
2084 class workingfilectx(committablefilectx):
2084 class workingfilectx(committablefilectx):
2085 """A workingfilectx object makes access to data related to a particular
2085 """A workingfilectx object makes access to data related to a particular
2086 file in the working directory convenient."""
2086 file in the working directory convenient."""
2087
2087
2088 def __init__(self, repo, path, filelog=None, workingctx=None):
2088 def __init__(self, repo, path, filelog=None, workingctx=None):
2089 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2089 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2090
2090
2091 @propertycache
2091 @propertycache
2092 def _changectx(self):
2092 def _changectx(self):
2093 return workingctx(self._repo)
2093 return workingctx(self._repo)
2094
2094
2095 def data(self):
2095 def data(self):
2096 return self._repo.wread(self._path)
2096 return self._repo.wread(self._path)
2097
2097
2098 def copysource(self):
2098 def copysource(self):
2099 return self._repo.dirstate.copied(self._path)
2099 return self._repo.dirstate.copied(self._path)
2100
2100
2101 def size(self):
2101 def size(self):
2102 return self._repo.wvfs.lstat(self._path).st_size
2102 return self._repo.wvfs.lstat(self._path).st_size
2103
2103
2104 def lstat(self):
2104 def lstat(self):
2105 return self._repo.wvfs.lstat(self._path)
2105 return self._repo.wvfs.lstat(self._path)
2106
2106
2107 def date(self):
2107 def date(self):
2108 t, tz = self._changectx.date()
2108 t, tz = self._changectx.date()
2109 try:
2109 try:
2110 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2110 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2111 except OSError as err:
2111 except OSError as err:
2112 if err.errno != errno.ENOENT:
2112 if err.errno != errno.ENOENT:
2113 raise
2113 raise
2114 return (t, tz)
2114 return (t, tz)
2115
2115
2116 def exists(self):
2116 def exists(self):
2117 return self._repo.wvfs.exists(self._path)
2117 return self._repo.wvfs.exists(self._path)
2118
2118
2119 def lexists(self):
2119 def lexists(self):
2120 return self._repo.wvfs.lexists(self._path)
2120 return self._repo.wvfs.lexists(self._path)
2121
2121
2122 def audit(self):
2122 def audit(self):
2123 return self._repo.wvfs.audit(self._path)
2123 return self._repo.wvfs.audit(self._path)
2124
2124
2125 def cmp(self, fctx):
2125 def cmp(self, fctx):
2126 """compare with other file context
2126 """compare with other file context
2127
2127
2128 returns True if different than fctx.
2128 returns True if different than fctx.
2129 """
2129 """
2130 # fctx should be a filectx (not a workingfilectx)
2130 # fctx should be a filectx (not a workingfilectx)
2131 # invert comparison to reuse the same code path
2131 # invert comparison to reuse the same code path
2132 return fctx.cmp(self)
2132 return fctx.cmp(self)
2133
2133
2134 def remove(self, ignoremissing=False):
2134 def remove(self, ignoremissing=False):
2135 """wraps unlink for a repo's working directory"""
2135 """wraps unlink for a repo's working directory"""
2136 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2136 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2137 self._repo.wvfs.unlinkpath(
2137 self._repo.wvfs.unlinkpath(
2138 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2138 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2139 )
2139 )
2140
2140
2141 def write(self, data, flags, backgroundclose=False, **kwargs):
2141 def write(self, data, flags, backgroundclose=False, **kwargs):
2142 """wraps repo.wwrite"""
2142 """wraps repo.wwrite"""
2143 return self._repo.wwrite(
2143 return self._repo.wwrite(
2144 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2144 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2145 )
2145 )
2146
2146
2147 def markcopied(self, src):
2147 def markcopied(self, src):
2148 """marks this file a copy of `src`"""
2148 """marks this file a copy of `src`"""
2149 self._repo.dirstate.copy(src, self._path)
2149 self._repo.dirstate.copy(src, self._path)
2150
2150
2151 def clearunknown(self):
2151 def clearunknown(self):
2152 """Removes conflicting items in the working directory so that
2152 """Removes conflicting items in the working directory so that
2153 ``write()`` can be called successfully.
2153 ``write()`` can be called successfully.
2154 """
2154 """
2155 wvfs = self._repo.wvfs
2155 wvfs = self._repo.wvfs
2156 f = self._path
2156 f = self._path
2157 wvfs.audit(f)
2157 wvfs.audit(f)
2158 if self._repo.ui.configbool(
2158 if self._repo.ui.configbool(
2159 b'experimental', b'merge.checkpathconflicts'
2159 b'experimental', b'merge.checkpathconflicts'
2160 ):
2160 ):
2161 # remove files under the directory as they should already be
2161 # remove files under the directory as they should already be
2162 # warned and backed up
2162 # warned and backed up
2163 if wvfs.isdir(f) and not wvfs.islink(f):
2163 if wvfs.isdir(f) and not wvfs.islink(f):
2164 wvfs.rmtree(f, forcibly=True)
2164 wvfs.rmtree(f, forcibly=True)
2165 for p in reversed(list(pathutil.finddirs(f))):
2165 for p in reversed(list(pathutil.finddirs(f))):
2166 if wvfs.isfileorlink(p):
2166 if wvfs.isfileorlink(p):
2167 wvfs.unlink(p)
2167 wvfs.unlink(p)
2168 break
2168 break
2169 else:
2169 else:
2170 # don't remove files if path conflicts are not processed
2170 # don't remove files if path conflicts are not processed
2171 if wvfs.isdir(f) and not wvfs.islink(f):
2171 if wvfs.isdir(f) and not wvfs.islink(f):
2172 wvfs.removedirs(f)
2172 wvfs.removedirs(f)
2173
2173
2174 def setflags(self, l, x):
2174 def setflags(self, l, x):
2175 self._repo.wvfs.setflags(self._path, l, x)
2175 self._repo.wvfs.setflags(self._path, l, x)
2176
2176
2177
2177
2178 class overlayworkingctx(committablectx):
2178 class overlayworkingctx(committablectx):
2179 """Wraps another mutable context with a write-back cache that can be
2179 """Wraps another mutable context with a write-back cache that can be
2180 converted into a commit context.
2180 converted into a commit context.
2181
2181
2182 self._cache[path] maps to a dict with keys: {
2182 self._cache[path] maps to a dict with keys: {
2183 'exists': bool?
2183 'exists': bool?
2184 'date': date?
2184 'date': date?
2185 'data': str?
2185 'data': str?
2186 'flags': str?
2186 'flags': str?
2187 'copied': str? (path or None)
2187 'copied': str? (path or None)
2188 }
2188 }
2189 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2189 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2190 is `False`, the file was deleted.
2190 is `False`, the file was deleted.
2191 """
2191 """
2192
2192
2193 def __init__(self, repo):
2193 def __init__(self, repo):
2194 super(overlayworkingctx, self).__init__(repo)
2194 super(overlayworkingctx, self).__init__(repo)
2195 self.clean()
2195 self.clean()
2196
2196
2197 def setbase(self, wrappedctx):
2197 def setbase(self, wrappedctx):
2198 self._wrappedctx = wrappedctx
2198 self._wrappedctx = wrappedctx
2199 self._parents = [wrappedctx]
2199 self._parents = [wrappedctx]
2200 # Drop old manifest cache as it is now out of date.
2200 # Drop old manifest cache as it is now out of date.
2201 # This is necessary when, e.g., rebasing several nodes with one
2201 # This is necessary when, e.g., rebasing several nodes with one
2202 # ``overlayworkingctx`` (e.g. with --collapse).
2202 # ``overlayworkingctx`` (e.g. with --collapse).
2203 util.clearcachedproperty(self, b'_manifest')
2203 util.clearcachedproperty(self, b'_manifest')
2204
2204
2205 def setparents(self, p1node, p2node=nullid):
2205 def setparents(self, p1node, p2node=nullid):
2206 assert p1node == self._wrappedctx.node()
2206 assert p1node == self._wrappedctx.node()
2207 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2207 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2208
2208
2209 def data(self, path):
2209 def data(self, path):
2210 if self.isdirty(path):
2210 if self.isdirty(path):
2211 if self._cache[path][b'exists']:
2211 if self._cache[path][b'exists']:
2212 if self._cache[path][b'data'] is not None:
2212 if self._cache[path][b'data'] is not None:
2213 return self._cache[path][b'data']
2213 return self._cache[path][b'data']
2214 else:
2214 else:
2215 # Must fallback here, too, because we only set flags.
2215 # Must fallback here, too, because we only set flags.
2216 return self._wrappedctx[path].data()
2216 return self._wrappedctx[path].data()
2217 else:
2217 else:
2218 raise error.ProgrammingError(
2218 raise error.ProgrammingError(
2219 b"No such file or directory: %s" % path
2219 b"No such file or directory: %s" % path
2220 )
2220 )
2221 else:
2221 else:
2222 return self._wrappedctx[path].data()
2222 return self._wrappedctx[path].data()
2223
2223
2224 @propertycache
2224 @propertycache
2225 def _manifest(self):
2225 def _manifest(self):
2226 parents = self.parents()
2226 parents = self.parents()
2227 man = parents[0].manifest().copy()
2227 man = parents[0].manifest().copy()
2228
2228
2229 flag = self._flagfunc
2229 flag = self._flagfunc
2230 for path in self.added():
2230 for path in self.added():
2231 man[path] = addednodeid
2231 man[path] = addednodeid
2232 man.setflag(path, flag(path))
2232 man.setflag(path, flag(path))
2233 for path in self.modified():
2233 for path in self.modified():
2234 man[path] = modifiednodeid
2234 man[path] = modifiednodeid
2235 man.setflag(path, flag(path))
2235 man.setflag(path, flag(path))
2236 for path in self.removed():
2236 for path in self.removed():
2237 del man[path]
2237 del man[path]
2238 return man
2238 return man
2239
2239
2240 @propertycache
2240 @propertycache
2241 def _flagfunc(self):
2241 def _flagfunc(self):
2242 def f(path):
2242 def f(path):
2243 return self._cache[path][b'flags']
2243 return self._cache[path][b'flags']
2244
2244
2245 return f
2245 return f
2246
2246
2247 def files(self):
2247 def files(self):
2248 return sorted(self.added() + self.modified() + self.removed())
2248 return sorted(self.added() + self.modified() + self.removed())
2249
2249
2250 def modified(self):
2250 def modified(self):
2251 return [
2251 return [
2252 f
2252 f
2253 for f in self._cache.keys()
2253 for f in self._cache.keys()
2254 if self._cache[f][b'exists'] and self._existsinparent(f)
2254 if self._cache[f][b'exists'] and self._existsinparent(f)
2255 ]
2255 ]
2256
2256
2257 def added(self):
2257 def added(self):
2258 return [
2258 return [
2259 f
2259 f
2260 for f in self._cache.keys()
2260 for f in self._cache.keys()
2261 if self._cache[f][b'exists'] and not self._existsinparent(f)
2261 if self._cache[f][b'exists'] and not self._existsinparent(f)
2262 ]
2262 ]
2263
2263
2264 def removed(self):
2264 def removed(self):
2265 return [
2265 return [
2266 f
2266 f
2267 for f in self._cache.keys()
2267 for f in self._cache.keys()
2268 if not self._cache[f][b'exists'] and self._existsinparent(f)
2268 if not self._cache[f][b'exists'] and self._existsinparent(f)
2269 ]
2269 ]
2270
2270
2271 def p1copies(self):
2271 def p1copies(self):
2272 copies = {}
2272 copies = {}
2273 narrowmatch = self._repo.narrowmatch()
2273 narrowmatch = self._repo.narrowmatch()
2274 for f in self._cache.keys():
2274 for f in self._cache.keys():
2275 if not narrowmatch(f):
2275 if not narrowmatch(f):
2276 continue
2276 continue
2277 copies.pop(f, None) # delete if it exists
2277 copies.pop(f, None) # delete if it exists
2278 source = self._cache[f][b'copied']
2278 source = self._cache[f][b'copied']
2279 if source:
2279 if source:
2280 copies[f] = source
2280 copies[f] = source
2281 return copies
2281 return copies
2282
2282
2283 def p2copies(self):
2283 def p2copies(self):
2284 copies = {}
2284 copies = {}
2285 narrowmatch = self._repo.narrowmatch()
2285 narrowmatch = self._repo.narrowmatch()
2286 for f in self._cache.keys():
2286 for f in self._cache.keys():
2287 if not narrowmatch(f):
2287 if not narrowmatch(f):
2288 continue
2288 continue
2289 copies.pop(f, None) # delete if it exists
2289 copies.pop(f, None) # delete if it exists
2290 source = self._cache[f][b'copied']
2290 source = self._cache[f][b'copied']
2291 if source:
2291 if source:
2292 copies[f] = source
2292 copies[f] = source
2293 return copies
2293 return copies
2294
2294
2295 def isinmemory(self):
2295 def isinmemory(self):
2296 return True
2296 return True
2297
2297
2298 def filedate(self, path):
2298 def filedate(self, path):
2299 if self.isdirty(path):
2299 if self.isdirty(path):
2300 return self._cache[path][b'date']
2300 return self._cache[path][b'date']
2301 else:
2301 else:
2302 return self._wrappedctx[path].date()
2302 return self._wrappedctx[path].date()
2303
2303
2304 def markcopied(self, path, origin):
2304 def markcopied(self, path, origin):
2305 self._markdirty(
2305 self._markdirty(
2306 path,
2306 path,
2307 exists=True,
2307 exists=True,
2308 date=self.filedate(path),
2308 date=self.filedate(path),
2309 flags=self.flags(path),
2309 flags=self.flags(path),
2310 copied=origin,
2310 copied=origin,
2311 )
2311 )
2312
2312
2313 def copydata(self, path):
2313 def copydata(self, path):
2314 if self.isdirty(path):
2314 if self.isdirty(path):
2315 return self._cache[path][b'copied']
2315 return self._cache[path][b'copied']
2316 else:
2316 else:
2317 return None
2317 return None
2318
2318
2319 def flags(self, path):
2319 def flags(self, path):
2320 if self.isdirty(path):
2320 if self.isdirty(path):
2321 if self._cache[path][b'exists']:
2321 if self._cache[path][b'exists']:
2322 return self._cache[path][b'flags']
2322 return self._cache[path][b'flags']
2323 else:
2323 else:
2324 raise error.ProgrammingError(
2324 raise error.ProgrammingError(
2325 b"No such file or directory: %s" % path
2325 b"No such file or directory: %s" % path
2326 )
2326 )
2327 else:
2327 else:
2328 return self._wrappedctx[path].flags()
2328 return self._wrappedctx[path].flags()
2329
2329
2330 def __contains__(self, key):
2330 def __contains__(self, key):
2331 if key in self._cache:
2331 if key in self._cache:
2332 return self._cache[key][b'exists']
2332 return self._cache[key][b'exists']
2333 return key in self.p1()
2333 return key in self.p1()
2334
2334
2335 def _existsinparent(self, path):
2335 def _existsinparent(self, path):
2336 try:
2336 try:
2337 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2337 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2338 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2338 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2339 # with an ``exists()`` function.
2339 # with an ``exists()`` function.
2340 self._wrappedctx[path]
2340 self._wrappedctx[path]
2341 return True
2341 return True
2342 except error.ManifestLookupError:
2342 except error.ManifestLookupError:
2343 return False
2343 return False
2344
2344
2345 def _auditconflicts(self, path):
2345 def _auditconflicts(self, path):
2346 """Replicates conflict checks done by wvfs.write().
2346 """Replicates conflict checks done by wvfs.write().
2347
2347
2348 Since we never write to the filesystem and never call `applyupdates` in
2348 Since we never write to the filesystem and never call `applyupdates` in
2349 IMM, we'll never check that a path is actually writable -- e.g., because
2349 IMM, we'll never check that a path is actually writable -- e.g., because
2350 it adds `a/foo`, but `a` is actually a file in the other commit.
2350 it adds `a/foo`, but `a` is actually a file in the other commit.
2351 """
2351 """
2352
2352
2353 def fail(path, component):
2353 def fail(path, component):
2354 # p1() is the base and we're receiving "writes" for p2()'s
2354 # p1() is the base and we're receiving "writes" for p2()'s
2355 # files.
2355 # files.
2356 if b'l' in self.p1()[component].flags():
2356 if b'l' in self.p1()[component].flags():
2357 raise error.Abort(
2357 raise error.Abort(
2358 b"error: %s conflicts with symlink %s "
2358 b"error: %s conflicts with symlink %s "
2359 b"in %d." % (path, component, self.p1().rev())
2359 b"in %d." % (path, component, self.p1().rev())
2360 )
2360 )
2361 else:
2361 else:
2362 raise error.Abort(
2362 raise error.Abort(
2363 b"error: '%s' conflicts with file '%s' in "
2363 b"error: '%s' conflicts with file '%s' in "
2364 b"%d." % (path, component, self.p1().rev())
2364 b"%d." % (path, component, self.p1().rev())
2365 )
2365 )
2366
2366
2367 # Test that each new directory to be created to write this path from p2
2367 # Test that each new directory to be created to write this path from p2
2368 # is not a file in p1.
2368 # is not a file in p1.
2369 components = path.split(b'/')
2369 components = path.split(b'/')
2370 for i in pycompat.xrange(len(components)):
2370 for i in pycompat.xrange(len(components)):
2371 component = b"/".join(components[0:i])
2371 component = b"/".join(components[0:i])
2372 if component in self:
2372 if component in self:
2373 fail(path, component)
2373 fail(path, component)
2374
2374
2375 # Test the other direction -- that this path from p2 isn't a directory
2375 # Test the other direction -- that this path from p2 isn't a directory
2376 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2376 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2377 match = self.match([path], default=b'path')
2377 match = self.match([path], default=b'path')
2378 mfiles = list(self.p1().manifest().walk(match))
2378 mfiles = list(self.p1().manifest().walk(match))
2379 if len(mfiles) > 0:
2379 if len(mfiles) > 0:
2380 if len(mfiles) == 1 and mfiles[0] == path:
2380 if len(mfiles) == 1 and mfiles[0] == path:
2381 return
2381 return
2382 # omit the files which are deleted in current IMM wctx
2382 # omit the files which are deleted in current IMM wctx
2383 mfiles = [m for m in mfiles if m in self]
2383 mfiles = [m for m in mfiles if m in self]
2384 if not mfiles:
2384 if not mfiles:
2385 return
2385 return
2386 raise error.Abort(
2386 raise error.Abort(
2387 b"error: file '%s' cannot be written because "
2387 b"error: file '%s' cannot be written because "
2388 b" '%s/' is a directory in %s (containing %d "
2388 b" '%s/' is a directory in %s (containing %d "
2389 b"entries: %s)"
2389 b"entries: %s)"
2390 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2390 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2391 )
2391 )
2392
2392
2393 def write(self, path, data, flags=b'', **kwargs):
2393 def write(self, path, data, flags=b'', **kwargs):
2394 if data is None:
2394 if data is None:
2395 raise error.ProgrammingError(b"data must be non-None")
2395 raise error.ProgrammingError(b"data must be non-None")
2396 self._auditconflicts(path)
2396 self._auditconflicts(path)
2397 self._markdirty(
2397 self._markdirty(
2398 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2398 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2399 )
2399 )
2400
2400
2401 def setflags(self, path, l, x):
2401 def setflags(self, path, l, x):
2402 flag = b''
2402 flag = b''
2403 if l:
2403 if l:
2404 flag = b'l'
2404 flag = b'l'
2405 elif x:
2405 elif x:
2406 flag = b'x'
2406 flag = b'x'
2407 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2407 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2408
2408
2409 def remove(self, path):
2409 def remove(self, path):
2410 self._markdirty(path, exists=False)
2410 self._markdirty(path, exists=False)
2411
2411
2412 def exists(self, path):
2412 def exists(self, path):
2413 """exists behaves like `lexists`, but needs to follow symlinks and
2413 """exists behaves like `lexists`, but needs to follow symlinks and
2414 return False if they are broken.
2414 return False if they are broken.
2415 """
2415 """
2416 if self.isdirty(path):
2416 if self.isdirty(path):
2417 # If this path exists and is a symlink, "follow" it by calling
2417 # If this path exists and is a symlink, "follow" it by calling
2418 # exists on the destination path.
2418 # exists on the destination path.
2419 if (
2419 if (
2420 self._cache[path][b'exists']
2420 self._cache[path][b'exists']
2421 and b'l' in self._cache[path][b'flags']
2421 and b'l' in self._cache[path][b'flags']
2422 ):
2422 ):
2423 return self.exists(self._cache[path][b'data'].strip())
2423 return self.exists(self._cache[path][b'data'].strip())
2424 else:
2424 else:
2425 return self._cache[path][b'exists']
2425 return self._cache[path][b'exists']
2426
2426
2427 return self._existsinparent(path)
2427 return self._existsinparent(path)
2428
2428
2429 def lexists(self, path):
2429 def lexists(self, path):
2430 """lexists returns True if the path exists"""
2430 """lexists returns True if the path exists"""
2431 if self.isdirty(path):
2431 if self.isdirty(path):
2432 return self._cache[path][b'exists']
2432 return self._cache[path][b'exists']
2433
2433
2434 return self._existsinparent(path)
2434 return self._existsinparent(path)
2435
2435
2436 def size(self, path):
2436 def size(self, path):
2437 if self.isdirty(path):
2437 if self.isdirty(path):
2438 if self._cache[path][b'exists']:
2438 if self._cache[path][b'exists']:
2439 return len(self._cache[path][b'data'])
2439 return len(self._cache[path][b'data'])
2440 else:
2440 else:
2441 raise error.ProgrammingError(
2441 raise error.ProgrammingError(
2442 b"No such file or directory: %s" % path
2442 b"No such file or directory: %s" % path
2443 )
2443 )
2444 return self._wrappedctx[path].size()
2444 return self._wrappedctx[path].size()
2445
2445
2446 def tomemctx(
2446 def tomemctx(
2447 self,
2447 self,
2448 text,
2448 text,
2449 branch=None,
2449 branch=None,
2450 extra=None,
2450 extra=None,
2451 date=None,
2451 date=None,
2452 parents=None,
2452 parents=None,
2453 user=None,
2453 user=None,
2454 editor=None,
2454 editor=None,
2455 ):
2455 ):
2456 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2456 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2457 committed.
2457 committed.
2458
2458
2459 ``text`` is the commit message.
2459 ``text`` is the commit message.
2460 ``parents`` (optional) are rev numbers.
2460 ``parents`` (optional) are rev numbers.
2461 """
2461 """
2462 # Default parents to the wrapped context if not passed.
2462 # Default parents to the wrapped context if not passed.
2463 if parents is None:
2463 if parents is None:
2464 parents = self.parents()
2464 parents = self.parents()
2465 if len(parents) == 1:
2465 if len(parents) == 1:
2466 parents = (parents[0], None)
2466 parents = (parents[0], None)
2467
2467
2468 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2468 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2469 if parents[1] is None:
2469 if parents[1] is None:
2470 parents = (self._repo[parents[0]], None)
2470 parents = (self._repo[parents[0]], None)
2471 else:
2471 else:
2472 parents = (self._repo[parents[0]], self._repo[parents[1]])
2472 parents = (self._repo[parents[0]], self._repo[parents[1]])
2473
2473
2474 files = self.files()
2474 files = self.files()
2475
2475
2476 def getfile(repo, memctx, path):
2476 def getfile(repo, memctx, path):
2477 if self._cache[path][b'exists']:
2477 if self._cache[path][b'exists']:
2478 return memfilectx(
2478 return memfilectx(
2479 repo,
2479 repo,
2480 memctx,
2480 memctx,
2481 path,
2481 path,
2482 self._cache[path][b'data'],
2482 self._cache[path][b'data'],
2483 b'l' in self._cache[path][b'flags'],
2483 b'l' in self._cache[path][b'flags'],
2484 b'x' in self._cache[path][b'flags'],
2484 b'x' in self._cache[path][b'flags'],
2485 self._cache[path][b'copied'],
2485 self._cache[path][b'copied'],
2486 )
2486 )
2487 else:
2487 else:
2488 # Returning None, but including the path in `files`, is
2488 # Returning None, but including the path in `files`, is
2489 # necessary for memctx to register a deletion.
2489 # necessary for memctx to register a deletion.
2490 return None
2490 return None
2491
2491
2492 if branch is None:
2492 if branch is None:
2493 branch = self._wrappedctx.branch()
2493 branch = self._wrappedctx.branch()
2494
2494
2495 return memctx(
2495 return memctx(
2496 self._repo,
2496 self._repo,
2497 parents,
2497 parents,
2498 text,
2498 text,
2499 files,
2499 files,
2500 getfile,
2500 getfile,
2501 date=date,
2501 date=date,
2502 extra=extra,
2502 extra=extra,
2503 user=user,
2503 user=user,
2504 branch=branch,
2504 branch=branch,
2505 editor=editor,
2505 editor=editor,
2506 )
2506 )
2507
2507
2508 def tomemctx_for_amend(self, precursor):
2508 def tomemctx_for_amend(self, precursor):
2509 extra = precursor.extra().copy()
2509 extra = precursor.extra().copy()
2510 extra[b'amend_source'] = precursor.hex()
2510 extra[b'amend_source'] = precursor.hex()
2511 return self.tomemctx(
2511 return self.tomemctx(
2512 text=precursor.description(),
2512 text=precursor.description(),
2513 branch=precursor.branch(),
2513 branch=precursor.branch(),
2514 extra=extra,
2514 extra=extra,
2515 date=precursor.date(),
2515 date=precursor.date(),
2516 user=precursor.user(),
2516 user=precursor.user(),
2517 )
2517 )
2518
2518
2519 def isdirty(self, path):
2519 def isdirty(self, path):
2520 return path in self._cache
2520 return path in self._cache
2521
2521
2522 def isempty(self):
2522 def nofilechanges(self):
2523 # We need to discard any keys that are actually clean before the empty
2523 # We need to discard any keys that are actually clean before the empty
2524 # commit check.
2524 # commit check.
2525 self._compact()
2525 self._compact()
2526 return len(self._cache) == 0
2526 return len(self._cache) == 0
2527
2527
2528 def clean(self):
2528 def clean(self):
2529 self._cache = {}
2529 self._cache = {}
2530
2530
2531 def _compact(self):
2531 def _compact(self):
2532 """Removes keys from the cache that are actually clean, by comparing
2532 """Removes keys from the cache that are actually clean, by comparing
2533 them with the underlying context.
2533 them with the underlying context.
2534
2534
2535 This can occur during the merge process, e.g. by passing --tool :local
2535 This can occur during the merge process, e.g. by passing --tool :local
2536 to resolve a conflict.
2536 to resolve a conflict.
2537 """
2537 """
2538 keys = []
2538 keys = []
2539 # This won't be perfect, but can help performance significantly when
2539 # This won't be perfect, but can help performance significantly when
2540 # using things like remotefilelog.
2540 # using things like remotefilelog.
2541 scmutil.prefetchfiles(
2541 scmutil.prefetchfiles(
2542 self.repo(),
2542 self.repo(),
2543 [
2543 [
2544 (
2544 (
2545 self.p1().rev(),
2545 self.p1().rev(),
2546 scmutil.matchfiles(self.repo(), self._cache.keys()),
2546 scmutil.matchfiles(self.repo(), self._cache.keys()),
2547 )
2547 )
2548 ],
2548 ],
2549 )
2549 )
2550
2550
2551 for path in self._cache.keys():
2551 for path in self._cache.keys():
2552 cache = self._cache[path]
2552 cache = self._cache[path]
2553 try:
2553 try:
2554 underlying = self._wrappedctx[path]
2554 underlying = self._wrappedctx[path]
2555 if (
2555 if (
2556 underlying.data() == cache[b'data']
2556 underlying.data() == cache[b'data']
2557 and underlying.flags() == cache[b'flags']
2557 and underlying.flags() == cache[b'flags']
2558 ):
2558 ):
2559 keys.append(path)
2559 keys.append(path)
2560 except error.ManifestLookupError:
2560 except error.ManifestLookupError:
2561 # Path not in the underlying manifest (created).
2561 # Path not in the underlying manifest (created).
2562 continue
2562 continue
2563
2563
2564 for path in keys:
2564 for path in keys:
2565 del self._cache[path]
2565 del self._cache[path]
2566 return keys
2566 return keys
2567
2567
2568 def _markdirty(
2568 def _markdirty(
2569 self, path, exists, data=None, date=None, flags=b'', copied=None
2569 self, path, exists, data=None, date=None, flags=b'', copied=None
2570 ):
2570 ):
2571 # data not provided, let's see if we already have some; if not, let's
2571 # data not provided, let's see if we already have some; if not, let's
2572 # grab it from our underlying context, so that we always have data if
2572 # grab it from our underlying context, so that we always have data if
2573 # the file is marked as existing.
2573 # the file is marked as existing.
2574 if exists and data is None:
2574 if exists and data is None:
2575 oldentry = self._cache.get(path) or {}
2575 oldentry = self._cache.get(path) or {}
2576 data = oldentry.get(b'data')
2576 data = oldentry.get(b'data')
2577 if data is None:
2577 if data is None:
2578 data = self._wrappedctx[path].data()
2578 data = self._wrappedctx[path].data()
2579
2579
2580 self._cache[path] = {
2580 self._cache[path] = {
2581 b'exists': exists,
2581 b'exists': exists,
2582 b'data': data,
2582 b'data': data,
2583 b'date': date,
2583 b'date': date,
2584 b'flags': flags,
2584 b'flags': flags,
2585 b'copied': copied,
2585 b'copied': copied,
2586 }
2586 }
2587
2587
2588 def filectx(self, path, filelog=None):
2588 def filectx(self, path, filelog=None):
2589 return overlayworkingfilectx(
2589 return overlayworkingfilectx(
2590 self._repo, path, parent=self, filelog=filelog
2590 self._repo, path, parent=self, filelog=filelog
2591 )
2591 )
2592
2592
2593
2593
2594 class overlayworkingfilectx(committablefilectx):
2594 class overlayworkingfilectx(committablefilectx):
2595 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2595 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2596 cache, which can be flushed through later by calling ``flush()``."""
2596 cache, which can be flushed through later by calling ``flush()``."""
2597
2597
2598 def __init__(self, repo, path, filelog=None, parent=None):
2598 def __init__(self, repo, path, filelog=None, parent=None):
2599 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2599 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2600 self._repo = repo
2600 self._repo = repo
2601 self._parent = parent
2601 self._parent = parent
2602 self._path = path
2602 self._path = path
2603
2603
2604 def cmp(self, fctx):
2604 def cmp(self, fctx):
2605 return self.data() != fctx.data()
2605 return self.data() != fctx.data()
2606
2606
2607 def changectx(self):
2607 def changectx(self):
2608 return self._parent
2608 return self._parent
2609
2609
2610 def data(self):
2610 def data(self):
2611 return self._parent.data(self._path)
2611 return self._parent.data(self._path)
2612
2612
2613 def date(self):
2613 def date(self):
2614 return self._parent.filedate(self._path)
2614 return self._parent.filedate(self._path)
2615
2615
2616 def exists(self):
2616 def exists(self):
2617 return self.lexists()
2617 return self.lexists()
2618
2618
2619 def lexists(self):
2619 def lexists(self):
2620 return self._parent.exists(self._path)
2620 return self._parent.exists(self._path)
2621
2621
2622 def copysource(self):
2622 def copysource(self):
2623 return self._parent.copydata(self._path)
2623 return self._parent.copydata(self._path)
2624
2624
2625 def size(self):
2625 def size(self):
2626 return self._parent.size(self._path)
2626 return self._parent.size(self._path)
2627
2627
2628 def markcopied(self, origin):
2628 def markcopied(self, origin):
2629 self._parent.markcopied(self._path, origin)
2629 self._parent.markcopied(self._path, origin)
2630
2630
2631 def audit(self):
2631 def audit(self):
2632 pass
2632 pass
2633
2633
2634 def flags(self):
2634 def flags(self):
2635 return self._parent.flags(self._path)
2635 return self._parent.flags(self._path)
2636
2636
2637 def setflags(self, islink, isexec):
2637 def setflags(self, islink, isexec):
2638 return self._parent.setflags(self._path, islink, isexec)
2638 return self._parent.setflags(self._path, islink, isexec)
2639
2639
2640 def write(self, data, flags, backgroundclose=False, **kwargs):
2640 def write(self, data, flags, backgroundclose=False, **kwargs):
2641 return self._parent.write(self._path, data, flags, **kwargs)
2641 return self._parent.write(self._path, data, flags, **kwargs)
2642
2642
2643 def remove(self, ignoremissing=False):
2643 def remove(self, ignoremissing=False):
2644 return self._parent.remove(self._path)
2644 return self._parent.remove(self._path)
2645
2645
2646 def clearunknown(self):
2646 def clearunknown(self):
2647 pass
2647 pass
2648
2648
2649
2649
2650 class workingcommitctx(workingctx):
2650 class workingcommitctx(workingctx):
2651 """A workingcommitctx object makes access to data related to
2651 """A workingcommitctx object makes access to data related to
2652 the revision being committed convenient.
2652 the revision being committed convenient.
2653
2653
2654 This hides changes in the working directory, if they aren't
2654 This hides changes in the working directory, if they aren't
2655 committed in this context.
2655 committed in this context.
2656 """
2656 """
2657
2657
2658 def __init__(
2658 def __init__(
2659 self, repo, changes, text=b"", user=None, date=None, extra=None
2659 self, repo, changes, text=b"", user=None, date=None, extra=None
2660 ):
2660 ):
2661 super(workingcommitctx, self).__init__(
2661 super(workingcommitctx, self).__init__(
2662 repo, text, user, date, extra, changes
2662 repo, text, user, date, extra, changes
2663 )
2663 )
2664
2664
2665 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2665 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2666 """Return matched files only in ``self._status``
2666 """Return matched files only in ``self._status``
2667
2667
2668 Uncommitted files appear "clean" via this context, even if
2668 Uncommitted files appear "clean" via this context, even if
2669 they aren't actually so in the working directory.
2669 they aren't actually so in the working directory.
2670 """
2670 """
2671 if clean:
2671 if clean:
2672 clean = [f for f in self._manifest if f not in self._changedset]
2672 clean = [f for f in self._manifest if f not in self._changedset]
2673 else:
2673 else:
2674 clean = []
2674 clean = []
2675 return scmutil.status(
2675 return scmutil.status(
2676 [f for f in self._status.modified if match(f)],
2676 [f for f in self._status.modified if match(f)],
2677 [f for f in self._status.added if match(f)],
2677 [f for f in self._status.added if match(f)],
2678 [f for f in self._status.removed if match(f)],
2678 [f for f in self._status.removed if match(f)],
2679 [],
2679 [],
2680 [],
2680 [],
2681 [],
2681 [],
2682 clean,
2682 clean,
2683 )
2683 )
2684
2684
2685 @propertycache
2685 @propertycache
2686 def _changedset(self):
2686 def _changedset(self):
2687 """Return the set of files changed in this context
2687 """Return the set of files changed in this context
2688 """
2688 """
2689 changed = set(self._status.modified)
2689 changed = set(self._status.modified)
2690 changed.update(self._status.added)
2690 changed.update(self._status.added)
2691 changed.update(self._status.removed)
2691 changed.update(self._status.removed)
2692 return changed
2692 return changed
2693
2693
2694
2694
2695 def makecachingfilectxfn(func):
2695 def makecachingfilectxfn(func):
2696 """Create a filectxfn that caches based on the path.
2696 """Create a filectxfn that caches based on the path.
2697
2697
2698 We can't use util.cachefunc because it uses all arguments as the cache
2698 We can't use util.cachefunc because it uses all arguments as the cache
2699 key and this creates a cycle since the arguments include the repo and
2699 key and this creates a cycle since the arguments include the repo and
2700 memctx.
2700 memctx.
2701 """
2701 """
2702 cache = {}
2702 cache = {}
2703
2703
2704 def getfilectx(repo, memctx, path):
2704 def getfilectx(repo, memctx, path):
2705 if path not in cache:
2705 if path not in cache:
2706 cache[path] = func(repo, memctx, path)
2706 cache[path] = func(repo, memctx, path)
2707 return cache[path]
2707 return cache[path]
2708
2708
2709 return getfilectx
2709 return getfilectx
2710
2710
2711
2711
2712 def memfilefromctx(ctx):
2712 def memfilefromctx(ctx):
2713 """Given a context return a memfilectx for ctx[path]
2713 """Given a context return a memfilectx for ctx[path]
2714
2714
2715 This is a convenience method for building a memctx based on another
2715 This is a convenience method for building a memctx based on another
2716 context.
2716 context.
2717 """
2717 """
2718
2718
2719 def getfilectx(repo, memctx, path):
2719 def getfilectx(repo, memctx, path):
2720 fctx = ctx[path]
2720 fctx = ctx[path]
2721 copysource = fctx.copysource()
2721 copysource = fctx.copysource()
2722 return memfilectx(
2722 return memfilectx(
2723 repo,
2723 repo,
2724 memctx,
2724 memctx,
2725 path,
2725 path,
2726 fctx.data(),
2726 fctx.data(),
2727 islink=fctx.islink(),
2727 islink=fctx.islink(),
2728 isexec=fctx.isexec(),
2728 isexec=fctx.isexec(),
2729 copysource=copysource,
2729 copysource=copysource,
2730 )
2730 )
2731
2731
2732 return getfilectx
2732 return getfilectx
2733
2733
2734
2734
2735 def memfilefrompatch(patchstore):
2735 def memfilefrompatch(patchstore):
2736 """Given a patch (e.g. patchstore object) return a memfilectx
2736 """Given a patch (e.g. patchstore object) return a memfilectx
2737
2737
2738 This is a convenience method for building a memctx based on a patchstore.
2738 This is a convenience method for building a memctx based on a patchstore.
2739 """
2739 """
2740
2740
2741 def getfilectx(repo, memctx, path):
2741 def getfilectx(repo, memctx, path):
2742 data, mode, copysource = patchstore.getfile(path)
2742 data, mode, copysource = patchstore.getfile(path)
2743 if data is None:
2743 if data is None:
2744 return None
2744 return None
2745 islink, isexec = mode
2745 islink, isexec = mode
2746 return memfilectx(
2746 return memfilectx(
2747 repo,
2747 repo,
2748 memctx,
2748 memctx,
2749 path,
2749 path,
2750 data,
2750 data,
2751 islink=islink,
2751 islink=islink,
2752 isexec=isexec,
2752 isexec=isexec,
2753 copysource=copysource,
2753 copysource=copysource,
2754 )
2754 )
2755
2755
2756 return getfilectx
2756 return getfilectx
2757
2757
2758
2758
2759 class memctx(committablectx):
2759 class memctx(committablectx):
2760 """Use memctx to perform in-memory commits via localrepo.commitctx().
2760 """Use memctx to perform in-memory commits via localrepo.commitctx().
2761
2761
2762 Revision information is supplied at initialization time while
2762 Revision information is supplied at initialization time while
2763 related files data and is made available through a callback
2763 related files data and is made available through a callback
2764 mechanism. 'repo' is the current localrepo, 'parents' is a
2764 mechanism. 'repo' is the current localrepo, 'parents' is a
2765 sequence of two parent revisions identifiers (pass None for every
2765 sequence of two parent revisions identifiers (pass None for every
2766 missing parent), 'text' is the commit message and 'files' lists
2766 missing parent), 'text' is the commit message and 'files' lists
2767 names of files touched by the revision (normalized and relative to
2767 names of files touched by the revision (normalized and relative to
2768 repository root).
2768 repository root).
2769
2769
2770 filectxfn(repo, memctx, path) is a callable receiving the
2770 filectxfn(repo, memctx, path) is a callable receiving the
2771 repository, the current memctx object and the normalized path of
2771 repository, the current memctx object and the normalized path of
2772 requested file, relative to repository root. It is fired by the
2772 requested file, relative to repository root. It is fired by the
2773 commit function for every file in 'files', but calls order is
2773 commit function for every file in 'files', but calls order is
2774 undefined. If the file is available in the revision being
2774 undefined. If the file is available in the revision being
2775 committed (updated or added), filectxfn returns a memfilectx
2775 committed (updated or added), filectxfn returns a memfilectx
2776 object. If the file was removed, filectxfn return None for recent
2776 object. If the file was removed, filectxfn return None for recent
2777 Mercurial. Moved files are represented by marking the source file
2777 Mercurial. Moved files are represented by marking the source file
2778 removed and the new file added with copy information (see
2778 removed and the new file added with copy information (see
2779 memfilectx).
2779 memfilectx).
2780
2780
2781 user receives the committer name and defaults to current
2781 user receives the committer name and defaults to current
2782 repository username, date is the commit date in any format
2782 repository username, date is the commit date in any format
2783 supported by dateutil.parsedate() and defaults to current date, extra
2783 supported by dateutil.parsedate() and defaults to current date, extra
2784 is a dictionary of metadata or is left empty.
2784 is a dictionary of metadata or is left empty.
2785 """
2785 """
2786
2786
2787 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2787 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2788 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2788 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2789 # this field to determine what to do in filectxfn.
2789 # this field to determine what to do in filectxfn.
2790 _returnnoneformissingfiles = True
2790 _returnnoneformissingfiles = True
2791
2791
2792 def __init__(
2792 def __init__(
2793 self,
2793 self,
2794 repo,
2794 repo,
2795 parents,
2795 parents,
2796 text,
2796 text,
2797 files,
2797 files,
2798 filectxfn,
2798 filectxfn,
2799 user=None,
2799 user=None,
2800 date=None,
2800 date=None,
2801 extra=None,
2801 extra=None,
2802 branch=None,
2802 branch=None,
2803 editor=None,
2803 editor=None,
2804 ):
2804 ):
2805 super(memctx, self).__init__(
2805 super(memctx, self).__init__(
2806 repo, text, user, date, extra, branch=branch
2806 repo, text, user, date, extra, branch=branch
2807 )
2807 )
2808 self._rev = None
2808 self._rev = None
2809 self._node = None
2809 self._node = None
2810 parents = [(p or nullid) for p in parents]
2810 parents = [(p or nullid) for p in parents]
2811 p1, p2 = parents
2811 p1, p2 = parents
2812 self._parents = [self._repo[p] for p in (p1, p2)]
2812 self._parents = [self._repo[p] for p in (p1, p2)]
2813 files = sorted(set(files))
2813 files = sorted(set(files))
2814 self._files = files
2814 self._files = files
2815 self.substate = {}
2815 self.substate = {}
2816
2816
2817 if isinstance(filectxfn, patch.filestore):
2817 if isinstance(filectxfn, patch.filestore):
2818 filectxfn = memfilefrompatch(filectxfn)
2818 filectxfn = memfilefrompatch(filectxfn)
2819 elif not callable(filectxfn):
2819 elif not callable(filectxfn):
2820 # if store is not callable, wrap it in a function
2820 # if store is not callable, wrap it in a function
2821 filectxfn = memfilefromctx(filectxfn)
2821 filectxfn = memfilefromctx(filectxfn)
2822
2822
2823 # memoizing increases performance for e.g. vcs convert scenarios.
2823 # memoizing increases performance for e.g. vcs convert scenarios.
2824 self._filectxfn = makecachingfilectxfn(filectxfn)
2824 self._filectxfn = makecachingfilectxfn(filectxfn)
2825
2825
2826 if editor:
2826 if editor:
2827 self._text = editor(self._repo, self, [])
2827 self._text = editor(self._repo, self, [])
2828 self._repo.savecommitmessage(self._text)
2828 self._repo.savecommitmessage(self._text)
2829
2829
2830 def filectx(self, path, filelog=None):
2830 def filectx(self, path, filelog=None):
2831 """get a file context from the working directory
2831 """get a file context from the working directory
2832
2832
2833 Returns None if file doesn't exist and should be removed."""
2833 Returns None if file doesn't exist and should be removed."""
2834 return self._filectxfn(self._repo, self, path)
2834 return self._filectxfn(self._repo, self, path)
2835
2835
2836 def commit(self):
2836 def commit(self):
2837 """commit context to the repo"""
2837 """commit context to the repo"""
2838 return self._repo.commitctx(self)
2838 return self._repo.commitctx(self)
2839
2839
2840 @propertycache
2840 @propertycache
2841 def _manifest(self):
2841 def _manifest(self):
2842 """generate a manifest based on the return values of filectxfn"""
2842 """generate a manifest based on the return values of filectxfn"""
2843
2843
2844 # keep this simple for now; just worry about p1
2844 # keep this simple for now; just worry about p1
2845 pctx = self._parents[0]
2845 pctx = self._parents[0]
2846 man = pctx.manifest().copy()
2846 man = pctx.manifest().copy()
2847
2847
2848 for f in self._status.modified:
2848 for f in self._status.modified:
2849 man[f] = modifiednodeid
2849 man[f] = modifiednodeid
2850
2850
2851 for f in self._status.added:
2851 for f in self._status.added:
2852 man[f] = addednodeid
2852 man[f] = addednodeid
2853
2853
2854 for f in self._status.removed:
2854 for f in self._status.removed:
2855 if f in man:
2855 if f in man:
2856 del man[f]
2856 del man[f]
2857
2857
2858 return man
2858 return man
2859
2859
2860 @propertycache
2860 @propertycache
2861 def _status(self):
2861 def _status(self):
2862 """Calculate exact status from ``files`` specified at construction
2862 """Calculate exact status from ``files`` specified at construction
2863 """
2863 """
2864 man1 = self.p1().manifest()
2864 man1 = self.p1().manifest()
2865 p2 = self._parents[1]
2865 p2 = self._parents[1]
2866 # "1 < len(self._parents)" can't be used for checking
2866 # "1 < len(self._parents)" can't be used for checking
2867 # existence of the 2nd parent, because "memctx._parents" is
2867 # existence of the 2nd parent, because "memctx._parents" is
2868 # explicitly initialized by the list, of which length is 2.
2868 # explicitly initialized by the list, of which length is 2.
2869 if p2.node() != nullid:
2869 if p2.node() != nullid:
2870 man2 = p2.manifest()
2870 man2 = p2.manifest()
2871 managing = lambda f: f in man1 or f in man2
2871 managing = lambda f: f in man1 or f in man2
2872 else:
2872 else:
2873 managing = lambda f: f in man1
2873 managing = lambda f: f in man1
2874
2874
2875 modified, added, removed = [], [], []
2875 modified, added, removed = [], [], []
2876 for f in self._files:
2876 for f in self._files:
2877 if not managing(f):
2877 if not managing(f):
2878 added.append(f)
2878 added.append(f)
2879 elif self[f]:
2879 elif self[f]:
2880 modified.append(f)
2880 modified.append(f)
2881 else:
2881 else:
2882 removed.append(f)
2882 removed.append(f)
2883
2883
2884 return scmutil.status(modified, added, removed, [], [], [], [])
2884 return scmutil.status(modified, added, removed, [], [], [], [])
2885
2885
2886
2886
2887 class memfilectx(committablefilectx):
2887 class memfilectx(committablefilectx):
2888 """memfilectx represents an in-memory file to commit.
2888 """memfilectx represents an in-memory file to commit.
2889
2889
2890 See memctx and committablefilectx for more details.
2890 See memctx and committablefilectx for more details.
2891 """
2891 """
2892
2892
2893 def __init__(
2893 def __init__(
2894 self,
2894 self,
2895 repo,
2895 repo,
2896 changectx,
2896 changectx,
2897 path,
2897 path,
2898 data,
2898 data,
2899 islink=False,
2899 islink=False,
2900 isexec=False,
2900 isexec=False,
2901 copysource=None,
2901 copysource=None,
2902 ):
2902 ):
2903 """
2903 """
2904 path is the normalized file path relative to repository root.
2904 path is the normalized file path relative to repository root.
2905 data is the file content as a string.
2905 data is the file content as a string.
2906 islink is True if the file is a symbolic link.
2906 islink is True if the file is a symbolic link.
2907 isexec is True if the file is executable.
2907 isexec is True if the file is executable.
2908 copied is the source file path if current file was copied in the
2908 copied is the source file path if current file was copied in the
2909 revision being committed, or None."""
2909 revision being committed, or None."""
2910 super(memfilectx, self).__init__(repo, path, None, changectx)
2910 super(memfilectx, self).__init__(repo, path, None, changectx)
2911 self._data = data
2911 self._data = data
2912 if islink:
2912 if islink:
2913 self._flags = b'l'
2913 self._flags = b'l'
2914 elif isexec:
2914 elif isexec:
2915 self._flags = b'x'
2915 self._flags = b'x'
2916 else:
2916 else:
2917 self._flags = b''
2917 self._flags = b''
2918 self._copysource = copysource
2918 self._copysource = copysource
2919
2919
2920 def copysource(self):
2920 def copysource(self):
2921 return self._copysource
2921 return self._copysource
2922
2922
2923 def cmp(self, fctx):
2923 def cmp(self, fctx):
2924 return self.data() != fctx.data()
2924 return self.data() != fctx.data()
2925
2925
2926 def data(self):
2926 def data(self):
2927 return self._data
2927 return self._data
2928
2928
2929 def remove(self, ignoremissing=False):
2929 def remove(self, ignoremissing=False):
2930 """wraps unlink for a repo's working directory"""
2930 """wraps unlink for a repo's working directory"""
2931 # need to figure out what to do here
2931 # need to figure out what to do here
2932 del self._changectx[self._path]
2932 del self._changectx[self._path]
2933
2933
2934 def write(self, data, flags, **kwargs):
2934 def write(self, data, flags, **kwargs):
2935 """wraps repo.wwrite"""
2935 """wraps repo.wwrite"""
2936 self._data = data
2936 self._data = data
2937
2937
2938
2938
2939 class metadataonlyctx(committablectx):
2939 class metadataonlyctx(committablectx):
2940 """Like memctx but it's reusing the manifest of different commit.
2940 """Like memctx but it's reusing the manifest of different commit.
2941 Intended to be used by lightweight operations that are creating
2941 Intended to be used by lightweight operations that are creating
2942 metadata-only changes.
2942 metadata-only changes.
2943
2943
2944 Revision information is supplied at initialization time. 'repo' is the
2944 Revision information is supplied at initialization time. 'repo' is the
2945 current localrepo, 'ctx' is original revision which manifest we're reuisng
2945 current localrepo, 'ctx' is original revision which manifest we're reuisng
2946 'parents' is a sequence of two parent revisions identifiers (pass None for
2946 'parents' is a sequence of two parent revisions identifiers (pass None for
2947 every missing parent), 'text' is the commit.
2947 every missing parent), 'text' is the commit.
2948
2948
2949 user receives the committer name and defaults to current repository
2949 user receives the committer name and defaults to current repository
2950 username, date is the commit date in any format supported by
2950 username, date is the commit date in any format supported by
2951 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2951 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2952 metadata or is left empty.
2952 metadata or is left empty.
2953 """
2953 """
2954
2954
2955 def __init__(
2955 def __init__(
2956 self,
2956 self,
2957 repo,
2957 repo,
2958 originalctx,
2958 originalctx,
2959 parents=None,
2959 parents=None,
2960 text=None,
2960 text=None,
2961 user=None,
2961 user=None,
2962 date=None,
2962 date=None,
2963 extra=None,
2963 extra=None,
2964 editor=None,
2964 editor=None,
2965 ):
2965 ):
2966 if text is None:
2966 if text is None:
2967 text = originalctx.description()
2967 text = originalctx.description()
2968 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2968 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2969 self._rev = None
2969 self._rev = None
2970 self._node = None
2970 self._node = None
2971 self._originalctx = originalctx
2971 self._originalctx = originalctx
2972 self._manifestnode = originalctx.manifestnode()
2972 self._manifestnode = originalctx.manifestnode()
2973 if parents is None:
2973 if parents is None:
2974 parents = originalctx.parents()
2974 parents = originalctx.parents()
2975 else:
2975 else:
2976 parents = [repo[p] for p in parents if p is not None]
2976 parents = [repo[p] for p in parents if p is not None]
2977 parents = parents[:]
2977 parents = parents[:]
2978 while len(parents) < 2:
2978 while len(parents) < 2:
2979 parents.append(repo[nullid])
2979 parents.append(repo[nullid])
2980 p1, p2 = self._parents = parents
2980 p1, p2 = self._parents = parents
2981
2981
2982 # sanity check to ensure that the reused manifest parents are
2982 # sanity check to ensure that the reused manifest parents are
2983 # manifests of our commit parents
2983 # manifests of our commit parents
2984 mp1, mp2 = self.manifestctx().parents
2984 mp1, mp2 = self.manifestctx().parents
2985 if p1 != nullid and p1.manifestnode() != mp1:
2985 if p1 != nullid and p1.manifestnode() != mp1:
2986 raise RuntimeError(
2986 raise RuntimeError(
2987 r"can't reuse the manifest: its p1 "
2987 r"can't reuse the manifest: its p1 "
2988 r"doesn't match the new ctx p1"
2988 r"doesn't match the new ctx p1"
2989 )
2989 )
2990 if p2 != nullid and p2.manifestnode() != mp2:
2990 if p2 != nullid and p2.manifestnode() != mp2:
2991 raise RuntimeError(
2991 raise RuntimeError(
2992 r"can't reuse the manifest: "
2992 r"can't reuse the manifest: "
2993 r"its p2 doesn't match the new ctx p2"
2993 r"its p2 doesn't match the new ctx p2"
2994 )
2994 )
2995
2995
2996 self._files = originalctx.files()
2996 self._files = originalctx.files()
2997 self.substate = {}
2997 self.substate = {}
2998
2998
2999 if editor:
2999 if editor:
3000 self._text = editor(self._repo, self, [])
3000 self._text = editor(self._repo, self, [])
3001 self._repo.savecommitmessage(self._text)
3001 self._repo.savecommitmessage(self._text)
3002
3002
3003 def manifestnode(self):
3003 def manifestnode(self):
3004 return self._manifestnode
3004 return self._manifestnode
3005
3005
3006 @property
3006 @property
3007 def _manifestctx(self):
3007 def _manifestctx(self):
3008 return self._repo.manifestlog[self._manifestnode]
3008 return self._repo.manifestlog[self._manifestnode]
3009
3009
3010 def filectx(self, path, filelog=None):
3010 def filectx(self, path, filelog=None):
3011 return self._originalctx.filectx(path, filelog=filelog)
3011 return self._originalctx.filectx(path, filelog=filelog)
3012
3012
3013 def commit(self):
3013 def commit(self):
3014 """commit context to the repo"""
3014 """commit context to the repo"""
3015 return self._repo.commitctx(self)
3015 return self._repo.commitctx(self)
3016
3016
3017 @property
3017 @property
3018 def _manifest(self):
3018 def _manifest(self):
3019 return self._originalctx.manifest()
3019 return self._originalctx.manifest()
3020
3020
3021 @propertycache
3021 @propertycache
3022 def _status(self):
3022 def _status(self):
3023 """Calculate exact status from ``files`` specified in the ``origctx``
3023 """Calculate exact status from ``files`` specified in the ``origctx``
3024 and parents manifests.
3024 and parents manifests.
3025 """
3025 """
3026 man1 = self.p1().manifest()
3026 man1 = self.p1().manifest()
3027 p2 = self._parents[1]
3027 p2 = self._parents[1]
3028 # "1 < len(self._parents)" can't be used for checking
3028 # "1 < len(self._parents)" can't be used for checking
3029 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3029 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3030 # explicitly initialized by the list, of which length is 2.
3030 # explicitly initialized by the list, of which length is 2.
3031 if p2.node() != nullid:
3031 if p2.node() != nullid:
3032 man2 = p2.manifest()
3032 man2 = p2.manifest()
3033 managing = lambda f: f in man1 or f in man2
3033 managing = lambda f: f in man1 or f in man2
3034 else:
3034 else:
3035 managing = lambda f: f in man1
3035 managing = lambda f: f in man1
3036
3036
3037 modified, added, removed = [], [], []
3037 modified, added, removed = [], [], []
3038 for f in self._files:
3038 for f in self._files:
3039 if not managing(f):
3039 if not managing(f):
3040 added.append(f)
3040 added.append(f)
3041 elif f in self:
3041 elif f in self:
3042 modified.append(f)
3042 modified.append(f)
3043 else:
3043 else:
3044 removed.append(f)
3044 removed.append(f)
3045
3045
3046 return scmutil.status(modified, added, removed, [], [], [], [])
3046 return scmutil.status(modified, added, removed, [], [], [], [])
3047
3047
3048
3048
3049 class arbitraryfilectx(object):
3049 class arbitraryfilectx(object):
3050 """Allows you to use filectx-like functions on a file in an arbitrary
3050 """Allows you to use filectx-like functions on a file in an arbitrary
3051 location on disk, possibly not in the working directory.
3051 location on disk, possibly not in the working directory.
3052 """
3052 """
3053
3053
3054 def __init__(self, path, repo=None):
3054 def __init__(self, path, repo=None):
3055 # Repo is optional because contrib/simplemerge uses this class.
3055 # Repo is optional because contrib/simplemerge uses this class.
3056 self._repo = repo
3056 self._repo = repo
3057 self._path = path
3057 self._path = path
3058
3058
3059 def cmp(self, fctx):
3059 def cmp(self, fctx):
3060 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3060 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3061 # path if either side is a symlink.
3061 # path if either side is a symlink.
3062 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3062 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3063 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3063 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3064 # Add a fast-path for merge if both sides are disk-backed.
3064 # Add a fast-path for merge if both sides are disk-backed.
3065 # Note that filecmp uses the opposite return values (True if same)
3065 # Note that filecmp uses the opposite return values (True if same)
3066 # from our cmp functions (True if different).
3066 # from our cmp functions (True if different).
3067 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3067 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3068 return self.data() != fctx.data()
3068 return self.data() != fctx.data()
3069
3069
3070 def path(self):
3070 def path(self):
3071 return self._path
3071 return self._path
3072
3072
3073 def flags(self):
3073 def flags(self):
3074 return b''
3074 return b''
3075
3075
3076 def data(self):
3076 def data(self):
3077 return util.readfile(self._path)
3077 return util.readfile(self._path)
3078
3078
3079 def decodeddata(self):
3079 def decodeddata(self):
3080 with open(self._path, b"rb") as f:
3080 with open(self._path, b"rb") as f:
3081 return f.read()
3081 return f.read()
3082
3082
3083 def remove(self):
3083 def remove(self):
3084 util.unlink(self._path)
3084 util.unlink(self._path)
3085
3085
3086 def write(self, data, flags, **kwargs):
3086 def write(self, data, flags, **kwargs):
3087 assert not flags
3087 assert not flags
3088 with open(self._path, b"wb") as f:
3088 with open(self._path, b"wb") as f:
3089 f.write(data)
3089 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now