##// END OF EJS Templates
rebase: support "history-editing-backup" config option...
Sushil khanchi -
r38835:2002c193 default
parent child Browse files
Show More
@@ -0,0 +1,150 b''
1 $ cat << EOF >> $HGRCPATH
2 > [extensions]
3 > rebase=
4 > EOF
5
6 ==========================================
7 Test history-editing-backup config option |
8 ==========================================
9 Test with Pre-obsmarker rebase:
10 1) When config option is not set:
11 $ hg init repo1
12 $ cd repo1
13 $ echo a>a
14 $ hg ci -qAma
15 $ echo b>b
16 $ hg ci -qAmb
17 $ echo c>c
18 $ hg ci -qAmc
19 $ hg up 0 -q
20 $ echo d>d
21 $ hg ci -qAmd
22 $ echo e>e
23 $ hg ci -qAme
24 $ hg log -GT "{rev}: {firstline(desc)}\n"
25 @ 4: e
26 |
27 o 3: d
28 |
29 | o 2: c
30 | |
31 | o 1: b
32 |/
33 o 0: a
34
35 $ hg rebase -s 1 -d .
36 rebasing 1:d2ae7f538514 "b"
37 rebasing 2:177f92b77385 "c"
38 saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/d2ae7f538514-c7ed7a78-rebase.hg
39 $ hg log -GT "{rev}: {firstline(desc)}\n"
40 o 4: c
41 |
42 o 3: b
43 |
44 @ 2: e
45 |
46 o 1: d
47 |
48 o 0: a
49
50
51 2) When config option is set:
52 $ cat << EOF >> $HGRCPATH
53 > [ui]
54 > history-editing-backup = False
55 > EOF
56
57 $ echo f>f
58 $ hg ci -Aqmf
59 $ echo g>g
60 $ hg ci -Aqmg
61 $ hg log -GT "{rev}: {firstline(desc)}\n"
62 @ 6: g
63 |
64 o 5: f
65 |
66 | o 4: c
67 | |
68 | o 3: b
69 |/
70 o 2: e
71 |
72 o 1: d
73 |
74 o 0: a
75
76 $ hg rebase -s 3 -d .
77 rebasing 3:05bff2a95b12 "b"
78 rebasing 4:1762bde4404d "c"
79
80 $ hg log -GT "{rev}: {firstline(desc)}\n"
81 o 6: c
82 |
83 o 5: b
84 |
85 @ 4: g
86 |
87 o 3: f
88 |
89 o 2: e
90 |
91 o 1: d
92 |
93 o 0: a
94
95 Test when rebased revisions are stripped during abort:
96 ======================================================
97
98 $ echo conflict > c
99 $ hg ci -Am "conflict with c"
100 adding c
101 created new head
102 $ hg log -GT "{rev}: {firstline(desc)}\n"
103 @ 7: conflict with c
104 |
105 | o 6: c
106 | |
107 | o 5: b
108 |/
109 o 4: g
110 |
111 o 3: f
112 |
113 o 2: e
114 |
115 o 1: d
116 |
117 o 0: a
118
119 When history-editing-backup = True:
120 $ cat << EOF >> $HGRCPATH
121 > [ui]
122 > history-editing-backup = True
123 > EOF
124 $ hg rebase -s 5 -d .
125 rebasing 5:1f8148a544ee "b"
126 rebasing 6:f8bc7d28e573 "c"
127 merging c
128 warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
129 unresolved conflicts (see hg resolve, then hg rebase --continue)
130 [1]
131 $ hg rebase --abort
132 saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/818c1a43c916-2b644d96-backup.hg
133 rebase aborted
134
135 When history-editing-backup = False:
136 $ cat << EOF >> $HGRCPATH
137 > [ui]
138 > history-editing-backup = False
139 > EOF
140 $ hg rebase -s 5 -d .
141 rebasing 5:1f8148a544ee "b"
142 rebasing 6:f8bc7d28e573 "c"
143 merging c
144 warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
145 unresolved conflicts (see hg resolve, then hg rebase --continue)
146 [1]
147 $ hg rebase --abort
148 rebase aborted
149 $ cd ..
150
@@ -1,1911 +1,1922 b''
1 1 # rebase.py - rebasing feature for mercurial
2 2 #
3 3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to move sets of revisions to a different ancestor
9 9
10 10 This extension lets you rebase changesets in an existing Mercurial
11 11 repository.
12 12
13 13 For more information:
14 14 https://mercurial-scm.org/wiki/RebaseExtension
15 15 '''
16 16
17 17 from __future__ import absolute_import
18 18
19 19 import errno
20 20 import os
21 21
22 22 from mercurial.i18n import _
23 23 from mercurial.node import (
24 24 nullrev,
25 25 short,
26 26 )
27 27 from mercurial import (
28 28 bookmarks,
29 29 cmdutil,
30 30 commands,
31 31 copies,
32 32 destutil,
33 33 dirstateguard,
34 34 error,
35 35 extensions,
36 36 hg,
37 37 merge as mergemod,
38 38 mergeutil,
39 39 obsolete,
40 40 obsutil,
41 41 patch,
42 42 phases,
43 43 pycompat,
44 44 registrar,
45 45 repair,
46 46 revset,
47 47 revsetlang,
48 48 scmutil,
49 49 smartset,
50 50 state as statemod,
51 51 util,
52 52 )
53 53
54 54 # The following constants are used throughout the rebase module. The ordering of
55 55 # their values must be maintained.
56 56
57 57 # Indicates that a revision needs to be rebased
58 58 revtodo = -1
59 59 revtodostr = '-1'
60 60
61 61 # legacy revstates no longer needed in current code
62 62 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
63 63 legacystates = {'-2', '-3', '-4', '-5'}
64 64
65 65 cmdtable = {}
66 66 command = registrar.command(cmdtable)
67 67 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
68 68 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
69 69 # be specifying the version(s) of Mercurial they are tested with, or
70 70 # leave the attribute unspecified.
71 71 testedwith = 'ships-with-hg-core'
72 72
73 73 def _nothingtorebase():
74 74 return 1
75 75
76 76 def _savegraft(ctx, extra):
77 77 s = ctx.extra().get('source', None)
78 78 if s is not None:
79 79 extra['source'] = s
80 80 s = ctx.extra().get('intermediate-source', None)
81 81 if s is not None:
82 82 extra['intermediate-source'] = s
83 83
84 84 def _savebranch(ctx, extra):
85 85 extra['branch'] = ctx.branch()
86 86
87 87 def _destrebase(repo, sourceset, destspace=None):
88 88 """small wrapper around destmerge to pass the right extra args
89 89
90 90 Please wrap destutil.destmerge instead."""
91 91 return destutil.destmerge(repo, action='rebase', sourceset=sourceset,
92 92 onheadcheck=False, destspace=destspace)
93 93
94 94 revsetpredicate = registrar.revsetpredicate()
95 95
96 96 @revsetpredicate('_destrebase')
97 97 def _revsetdestrebase(repo, subset, x):
98 98 # ``_rebasedefaultdest()``
99 99
100 100 # default destination for rebase.
101 101 # # XXX: Currently private because I expect the signature to change.
102 102 # # XXX: - bailing out in case of ambiguity vs returning all data.
103 103 # i18n: "_rebasedefaultdest" is a keyword
104 104 sourceset = None
105 105 if x is not None:
106 106 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
107 107 return subset & smartset.baseset([_destrebase(repo, sourceset)])
108 108
109 109 @revsetpredicate('_destautoorphanrebase')
110 110 def _revsetdestautoorphanrebase(repo, subset, x):
111 111 """automatic rebase destination for a single orphan revision"""
112 112 unfi = repo.unfiltered()
113 113 obsoleted = unfi.revs('obsolete()')
114 114
115 115 src = revset.getset(repo, subset, x).first()
116 116
117 117 # Empty src or already obsoleted - Do not return a destination
118 118 if not src or src in obsoleted:
119 119 return smartset.baseset()
120 120 dests = destutil.orphanpossibledestination(repo, src)
121 121 if len(dests) > 1:
122 122 raise error.Abort(
123 123 _("ambiguous automatic rebase: %r could end up on any of %r") % (
124 124 src, dests))
125 125 # We have zero or one destination, so we can just return here.
126 126 return smartset.baseset(dests)
127 127
128 128 def _ctxdesc(ctx):
129 129 """short description for a context"""
130 130 desc = '%d:%s "%s"' % (ctx.rev(), ctx,
131 131 ctx.description().split('\n', 1)[0])
132 132 repo = ctx.repo()
133 133 names = []
134 134 for nsname, ns in repo.names.iteritems():
135 135 if nsname == 'branches':
136 136 continue
137 137 names.extend(ns.names(repo, ctx.node()))
138 138 if names:
139 139 desc += ' (%s)' % ' '.join(names)
140 140 return desc
141 141
142 142 class rebaseruntime(object):
143 143 """This class is a container for rebase runtime state"""
144 144 def __init__(self, repo, ui, inmemory=False, opts=None):
145 145 if opts is None:
146 146 opts = {}
147 147
148 148 # prepared: whether we have rebasestate prepared or not. Currently it
149 149 # decides whether "self.repo" is unfiltered or not.
150 150 # The rebasestate has explicit hash to hash instructions not depending
151 151 # on visibility. If rebasestate exists (in-memory or on-disk), use
152 152 # unfiltered repo to avoid visibility issues.
153 153 # Before knowing rebasestate (i.e. when starting a new rebase (not
154 154 # --continue or --abort)), the original repo should be used so
155 155 # visibility-dependent revsets are correct.
156 156 self.prepared = False
157 157 self._repo = repo
158 158
159 159 self.ui = ui
160 160 self.opts = opts
161 161 self.originalwd = None
162 162 self.external = nullrev
163 163 # Mapping between the old revision id and either what is the new rebased
164 164 # revision or what needs to be done with the old revision. The state
165 165 # dict will be what contains most of the rebase progress state.
166 166 self.state = {}
167 167 self.activebookmark = None
168 168 self.destmap = {}
169 169 self.skipped = set()
170 170
171 171 self.collapsef = opts.get('collapse', False)
172 172 self.collapsemsg = cmdutil.logmessage(ui, opts)
173 173 self.date = opts.get('date', None)
174 174
175 175 e = opts.get('extrafn') # internal, used by e.g. hgsubversion
176 176 self.extrafns = [_savegraft]
177 177 if e:
178 178 self.extrafns = [e]
179 179
180 180 self.keepf = opts.get('keep', False)
181 181 self.keepbranchesf = opts.get('keepbranches', False)
182 182 self.obsoletenotrebased = {}
183 183 self.obsoletewithoutsuccessorindestination = set()
184 184 self.inmemory = inmemory
185 185 self.stateobj = statemod.cmdstate(repo, 'rebasestate')
186 186
187 187 @property
188 188 def repo(self):
189 189 if self.prepared:
190 190 return self._repo.unfiltered()
191 191 else:
192 192 return self._repo
193 193
194 194 def storestatus(self, tr=None):
195 195 """Store the current status to allow recovery"""
196 196 if tr:
197 197 tr.addfilegenerator('rebasestate', ('rebasestate',),
198 198 self._writestatus, location='plain')
199 199 else:
200 200 with self.repo.vfs("rebasestate", "w") as f:
201 201 self._writestatus(f)
202 202
203 203 def _writestatus(self, f):
204 204 repo = self.repo
205 205 assert repo.filtername is None
206 206 f.write(repo[self.originalwd].hex() + '\n')
207 207 # was "dest". we now write dest per src root below.
208 208 f.write('\n')
209 209 f.write(repo[self.external].hex() + '\n')
210 210 f.write('%d\n' % int(self.collapsef))
211 211 f.write('%d\n' % int(self.keepf))
212 212 f.write('%d\n' % int(self.keepbranchesf))
213 213 f.write('%s\n' % (self.activebookmark or ''))
214 214 destmap = self.destmap
215 215 for d, v in self.state.iteritems():
216 216 oldrev = repo[d].hex()
217 217 if v >= 0:
218 218 newrev = repo[v].hex()
219 219 else:
220 220 newrev = "%d" % v
221 221 destnode = repo[destmap[d]].hex()
222 222 f.write("%s:%s:%s\n" % (oldrev, newrev, destnode))
223 223 repo.ui.debug('rebase status stored\n')
224 224
225 225 def restorestatus(self):
226 226 """Restore a previously stored status"""
227 227 if not self.stateobj.exists():
228 228 cmdutil.wrongtooltocontinue(self.repo, _('rebase'))
229 229
230 230 data = self._read()
231 231 self.repo.ui.debug('rebase status resumed\n')
232 232
233 233 self.originalwd = data['originalwd']
234 234 self.destmap = data['destmap']
235 235 self.state = data['state']
236 236 self.skipped = data['skipped']
237 237 self.collapsef = data['collapse']
238 238 self.keepf = data['keep']
239 239 self.keepbranchesf = data['keepbranches']
240 240 self.external = data['external']
241 241 self.activebookmark = data['activebookmark']
242 242
243 243 def _read(self):
244 244 self.prepared = True
245 245 repo = self.repo
246 246 assert repo.filtername is None
247 247 data = {'keepbranches': None, 'collapse': None, 'activebookmark': None,
248 248 'external': nullrev, 'keep': None, 'originalwd': None}
249 249 legacydest = None
250 250 state = {}
251 251 destmap = {}
252 252
253 253 if True:
254 254 f = repo.vfs("rebasestate")
255 255 for i, l in enumerate(f.read().splitlines()):
256 256 if i == 0:
257 257 data['originalwd'] = repo[l].rev()
258 258 elif i == 1:
259 259 # this line should be empty in newer version. but legacy
260 260 # clients may still use it
261 261 if l:
262 262 legacydest = repo[l].rev()
263 263 elif i == 2:
264 264 data['external'] = repo[l].rev()
265 265 elif i == 3:
266 266 data['collapse'] = bool(int(l))
267 267 elif i == 4:
268 268 data['keep'] = bool(int(l))
269 269 elif i == 5:
270 270 data['keepbranches'] = bool(int(l))
271 271 elif i == 6 and not (len(l) == 81 and ':' in l):
272 272 # line 6 is a recent addition, so for backwards
273 273 # compatibility check that the line doesn't look like the
274 274 # oldrev:newrev lines
275 275 data['activebookmark'] = l
276 276 else:
277 277 args = l.split(':')
278 278 oldrev = repo[args[0]].rev()
279 279 newrev = args[1]
280 280 if newrev in legacystates:
281 281 continue
282 282 if len(args) > 2:
283 283 destrev = repo[args[2]].rev()
284 284 else:
285 285 destrev = legacydest
286 286 destmap[oldrev] = destrev
287 287 if newrev == revtodostr:
288 288 state[oldrev] = revtodo
289 289 # Legacy compat special case
290 290 else:
291 291 state[oldrev] = repo[newrev].rev()
292 292
293 293 if data['keepbranches'] is None:
294 294 raise error.Abort(_('.hg/rebasestate is incomplete'))
295 295
296 296 data['destmap'] = destmap
297 297 data['state'] = state
298 298 skipped = set()
299 299 # recompute the set of skipped revs
300 300 if not data['collapse']:
301 301 seen = set(destmap.values())
302 302 for old, new in sorted(state.items()):
303 303 if new != revtodo and new in seen:
304 304 skipped.add(old)
305 305 seen.add(new)
306 306 data['skipped'] = skipped
307 307 repo.ui.debug('computed skipped revs: %s\n' %
308 308 (' '.join('%d' % r for r in sorted(skipped)) or ''))
309 309
310 310 return data
311 311
312 312 def _handleskippingobsolete(self, obsoleterevs, destmap):
313 313 """Compute structures necessary for skipping obsolete revisions
314 314
315 315 obsoleterevs: iterable of all obsolete revisions in rebaseset
316 316 destmap: {srcrev: destrev} destination revisions
317 317 """
318 318 self.obsoletenotrebased = {}
319 319 if not self.ui.configbool('experimental', 'rebaseskipobsolete'):
320 320 return
321 321 obsoleteset = set(obsoleterevs)
322 322 (self.obsoletenotrebased,
323 323 self.obsoletewithoutsuccessorindestination,
324 324 obsoleteextinctsuccessors) = _computeobsoletenotrebased(
325 325 self.repo, obsoleteset, destmap)
326 326 skippedset = set(self.obsoletenotrebased)
327 327 skippedset.update(self.obsoletewithoutsuccessorindestination)
328 328 skippedset.update(obsoleteextinctsuccessors)
329 329 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
330 330
331 331 def _prepareabortorcontinue(self, isabort, backup=True, suppwarns=False):
332 332 try:
333 333 self.restorestatus()
334 334 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
335 335 except error.RepoLookupError:
336 336 if isabort:
337 337 clearstatus(self.repo)
338 338 clearcollapsemsg(self.repo)
339 339 self.repo.ui.warn(_('rebase aborted (no revision is removed,'
340 340 ' only broken state is cleared)\n'))
341 341 return 0
342 342 else:
343 343 msg = _('cannot continue inconsistent rebase')
344 344 hint = _('use "hg rebase --abort" to clear broken state')
345 345 raise error.Abort(msg, hint=hint)
346 346 if isabort:
347 347 return abort(self.repo, self.originalwd, self.destmap, self.state,
348 348 activebookmark=self.activebookmark, backup=backup,
349 349 suppwarns=suppwarns)
350 350
351 351 def _preparenewrebase(self, destmap):
352 352 if not destmap:
353 353 return _nothingtorebase()
354 354
355 355 rebaseset = destmap.keys()
356 356 allowunstable = obsolete.isenabled(self.repo, obsolete.allowunstableopt)
357 357 if (not (self.keepf or allowunstable)
358 358 and self.repo.revs('first(children(%ld) - %ld)',
359 359 rebaseset, rebaseset)):
360 360 raise error.Abort(
361 361 _("can't remove original changesets with"
362 362 " unrebased descendants"),
363 363 hint=_('use --keep to keep original changesets'))
364 364
365 365 result = buildstate(self.repo, destmap, self.collapsef)
366 366
367 367 if not result:
368 368 # Empty state built, nothing to rebase
369 369 self.ui.status(_('nothing to rebase\n'))
370 370 return _nothingtorebase()
371 371
372 372 for root in self.repo.set('roots(%ld)', rebaseset):
373 373 if not self.keepf and not root.mutable():
374 374 raise error.Abort(_("can't rebase public changeset %s")
375 375 % root,
376 376 hint=_("see 'hg help phases' for details"))
377 377
378 378 (self.originalwd, self.destmap, self.state) = result
379 379 if self.collapsef:
380 380 dests = set(self.destmap.values())
381 381 if len(dests) != 1:
382 382 raise error.Abort(
383 383 _('--collapse does not work with multiple destinations'))
384 384 destrev = next(iter(dests))
385 385 destancestors = self.repo.changelog.ancestors([destrev],
386 386 inclusive=True)
387 387 self.external = externalparent(self.repo, self.state, destancestors)
388 388
389 389 for destrev in sorted(set(destmap.values())):
390 390 dest = self.repo[destrev]
391 391 if dest.closesbranch() and not self.keepbranchesf:
392 392 self.ui.status(_('reopening closed branch head %s\n') % dest)
393 393
394 394 self.prepared = True
395 395
396 396 def _assignworkingcopy(self):
397 397 if self.inmemory:
398 398 from mercurial.context import overlayworkingctx
399 399 self.wctx = overlayworkingctx(self.repo)
400 400 self.repo.ui.debug("rebasing in-memory\n")
401 401 else:
402 402 self.wctx = self.repo[None]
403 403 self.repo.ui.debug("rebasing on disk\n")
404 404 self.repo.ui.log("rebase", "", rebase_imm_used=self.inmemory)
405 405
406 406 def _performrebase(self, tr):
407 407 self._assignworkingcopy()
408 408 repo, ui = self.repo, self.ui
409 409 if self.keepbranchesf:
410 410 # insert _savebranch at the start of extrafns so if
411 411 # there's a user-provided extrafn it can clobber branch if
412 412 # desired
413 413 self.extrafns.insert(0, _savebranch)
414 414 if self.collapsef:
415 415 branches = set()
416 416 for rev in self.state:
417 417 branches.add(repo[rev].branch())
418 418 if len(branches) > 1:
419 419 raise error.Abort(_('cannot collapse multiple named '
420 420 'branches'))
421 421
422 422 # Calculate self.obsoletenotrebased
423 423 obsrevs = _filterobsoleterevs(self.repo, self.state)
424 424 self._handleskippingobsolete(obsrevs, self.destmap)
425 425
426 426 # Keep track of the active bookmarks in order to reset them later
427 427 self.activebookmark = self.activebookmark or repo._activebookmark
428 428 if self.activebookmark:
429 429 bookmarks.deactivate(repo)
430 430
431 431 # Store the state before we begin so users can run 'hg rebase --abort'
432 432 # if we fail before the transaction closes.
433 433 self.storestatus()
434 434 if tr:
435 435 # When using single transaction, store state when transaction
436 436 # commits.
437 437 self.storestatus(tr)
438 438
439 439 cands = [k for k, v in self.state.iteritems() if v == revtodo]
440 440 p = repo.ui.makeprogress(_("rebasing"), unit=_('changesets'),
441 441 total=len(cands))
442 442 def progress(ctx):
443 443 p.increment(item=("%d:%s" % (ctx.rev(), ctx)))
444 444 allowdivergence = self.ui.configbool(
445 445 'experimental', 'evolution.allowdivergence')
446 446 for subset in sortsource(self.destmap):
447 447 sortedrevs = self.repo.revs('sort(%ld, -topo)', subset)
448 448 if not allowdivergence:
449 449 sortedrevs -= self.repo.revs(
450 450 'descendants(%ld) and not %ld',
451 451 self.obsoletewithoutsuccessorindestination,
452 452 self.obsoletewithoutsuccessorindestination,
453 453 )
454 454 for rev in sortedrevs:
455 455 self._rebasenode(tr, rev, allowdivergence, progress)
456 456 p.complete()
457 457 ui.note(_('rebase merging completed\n'))
458 458
459 459 def _concludenode(self, rev, p1, p2, editor, commitmsg=None):
460 460 '''Commit the wd changes with parents p1 and p2.
461 461
462 462 Reuse commit info from rev but also store useful information in extra.
463 463 Return node of committed revision.'''
464 464 repo = self.repo
465 465 ctx = repo[rev]
466 466 if commitmsg is None:
467 467 commitmsg = ctx.description()
468 468 date = self.date
469 469 if date is None:
470 470 date = ctx.date()
471 471 extra = {'rebase_source': ctx.hex()}
472 472 for c in self.extrafns:
473 473 c(ctx, extra)
474 474 keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch()
475 475 destphase = max(ctx.phase(), phases.draft)
476 476 overrides = {('phases', 'new-commit'): destphase}
477 477 if keepbranch:
478 478 overrides[('ui', 'allowemptycommit')] = True
479 479 with repo.ui.configoverride(overrides, 'rebase'):
480 480 if self.inmemory:
481 481 newnode = commitmemorynode(repo, p1, p2,
482 482 wctx=self.wctx,
483 483 extra=extra,
484 484 commitmsg=commitmsg,
485 485 editor=editor,
486 486 user=ctx.user(),
487 487 date=date)
488 488 mergemod.mergestate.clean(repo)
489 489 else:
490 490 newnode = commitnode(repo, p1, p2,
491 491 extra=extra,
492 492 commitmsg=commitmsg,
493 493 editor=editor,
494 494 user=ctx.user(),
495 495 date=date)
496 496
497 497 if newnode is None:
498 498 # If it ended up being a no-op commit, then the normal
499 499 # merge state clean-up path doesn't happen, so do it
500 500 # here. Fix issue5494
501 501 mergemod.mergestate.clean(repo)
502 502 return newnode
503 503
504 504 def _rebasenode(self, tr, rev, allowdivergence, progressfn):
505 505 repo, ui, opts = self.repo, self.ui, self.opts
506 506 dest = self.destmap[rev]
507 507 ctx = repo[rev]
508 508 desc = _ctxdesc(ctx)
509 509 if self.state[rev] == rev:
510 510 ui.status(_('already rebased %s\n') % desc)
511 511 elif (not allowdivergence
512 512 and rev in self.obsoletewithoutsuccessorindestination):
513 513 msg = _('note: not rebasing %s and its descendants as '
514 514 'this would cause divergence\n') % desc
515 515 repo.ui.status(msg)
516 516 self.skipped.add(rev)
517 517 elif rev in self.obsoletenotrebased:
518 518 succ = self.obsoletenotrebased[rev]
519 519 if succ is None:
520 520 msg = _('note: not rebasing %s, it has no '
521 521 'successor\n') % desc
522 522 else:
523 523 succdesc = _ctxdesc(repo[succ])
524 524 msg = (_('note: not rebasing %s, already in '
525 525 'destination as %s\n') % (desc, succdesc))
526 526 repo.ui.status(msg)
527 527 # Make clearrebased aware state[rev] is not a true successor
528 528 self.skipped.add(rev)
529 529 # Record rev as moved to its desired destination in self.state.
530 530 # This helps bookmark and working parent movement.
531 531 dest = max(adjustdest(repo, rev, self.destmap, self.state,
532 532 self.skipped))
533 533 self.state[rev] = dest
534 534 elif self.state[rev] == revtodo:
535 535 ui.status(_('rebasing %s\n') % desc)
536 536 progressfn(ctx)
537 537 p1, p2, base = defineparents(repo, rev, self.destmap,
538 538 self.state, self.skipped,
539 539 self.obsoletenotrebased)
540 540 if len(repo[None].parents()) == 2:
541 541 repo.ui.debug('resuming interrupted rebase\n')
542 542 else:
543 543 overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
544 544 with ui.configoverride(overrides, 'rebase'):
545 545 stats = rebasenode(repo, rev, p1, base, self.collapsef,
546 546 dest, wctx=self.wctx)
547 547 if stats.unresolvedcount > 0:
548 548 if self.inmemory:
549 549 raise error.InMemoryMergeConflictsError()
550 550 else:
551 551 raise error.InterventionRequired(
552 552 _('unresolved conflicts (see hg '
553 553 'resolve, then hg rebase --continue)'))
554 554 if not self.collapsef:
555 555 merging = p2 != nullrev
556 556 editform = cmdutil.mergeeditform(merging, 'rebase')
557 557 editor = cmdutil.getcommiteditor(editform=editform,
558 558 **pycompat.strkwargs(opts))
559 559 newnode = self._concludenode(rev, p1, p2, editor)
560 560 else:
561 561 # Skip commit if we are collapsing
562 562 if self.inmemory:
563 563 self.wctx.setbase(repo[p1])
564 564 else:
565 565 repo.setparents(repo[p1].node())
566 566 newnode = None
567 567 # Update the state
568 568 if newnode is not None:
569 569 self.state[rev] = repo[newnode].rev()
570 570 ui.debug('rebased as %s\n' % short(newnode))
571 571 else:
572 572 if not self.collapsef:
573 573 ui.warn(_('note: rebase of %d:%s created no changes '
574 574 'to commit\n') % (rev, ctx))
575 575 self.skipped.add(rev)
576 576 self.state[rev] = p1
577 577 ui.debug('next revision set to %d\n' % p1)
578 578 else:
579 579 ui.status(_('already rebased %s as %s\n') %
580 580 (desc, repo[self.state[rev]]))
581 581 if not tr:
582 582 # When not using single transaction, store state after each
583 583 # commit is completely done. On InterventionRequired, we thus
584 584 # won't store the status. Instead, we'll hit the "len(parents) == 2"
585 585 # case and realize that the commit was in progress.
586 586 self.storestatus()
587 587
588 def _finishrebase(self):
588 def _finishrebase(self, backup=True):
589 """
590 backup: if False, no backup will be stored when stripping rebased
591 revisions
592 """
589 593 repo, ui, opts = self.repo, self.ui, self.opts
590 594 fm = ui.formatter('rebase', opts)
591 595 fm.startitem()
592 596 if self.collapsef:
593 597 p1, p2, _base = defineparents(repo, min(self.state), self.destmap,
594 598 self.state, self.skipped,
595 599 self.obsoletenotrebased)
596 600 editopt = opts.get('edit')
597 601 editform = 'rebase.collapse'
598 602 if self.collapsemsg:
599 603 commitmsg = self.collapsemsg
600 604 else:
601 605 commitmsg = 'Collapsed revision'
602 606 for rebased in sorted(self.state):
603 607 if rebased not in self.skipped:
604 608 commitmsg += '\n* %s' % repo[rebased].description()
605 609 editopt = True
606 610 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
607 611 revtoreuse = max(self.state)
608 612
609 613 newnode = self._concludenode(revtoreuse, p1, self.external,
610 614 editor, commitmsg=commitmsg)
611 615
612 616 if newnode is not None:
613 617 newrev = repo[newnode].rev()
614 618 for oldrev in self.state:
615 619 self.state[oldrev] = newrev
616 620
617 621 if 'qtip' in repo.tags():
618 622 updatemq(repo, self.state, self.skipped,
619 623 **pycompat.strkwargs(opts))
620 624
621 625 # restore original working directory
622 626 # (we do this before stripping)
623 627 newwd = self.state.get(self.originalwd, self.originalwd)
624 628 if newwd < 0:
625 629 # original directory is a parent of rebase set root or ignored
626 630 newwd = self.originalwd
627 631 if newwd not in [c.rev() for c in repo[None].parents()]:
628 632 ui.note(_("update back to initial working directory parent\n"))
629 633 hg.updaterepo(repo, newwd, overwrite=False)
630 634
631 635 collapsedas = None
632 636 if self.collapsef and not self.keepf:
633 637 collapsedas = newnode
634 638 clearrebased(ui, repo, self.destmap, self.state, self.skipped,
635 collapsedas, self.keepf, fm=fm)
639 collapsedas, self.keepf, fm=fm, backup=backup)
636 640
637 641 clearstatus(repo)
638 642 clearcollapsemsg(repo)
639 643
640 644 ui.note(_("rebase completed\n"))
641 645 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
642 646 if self.skipped:
643 647 skippedlen = len(self.skipped)
644 648 ui.note(_("%d revisions have been skipped\n") % skippedlen)
645 649 fm.end()
646 650
647 651 if (self.activebookmark and self.activebookmark in repo._bookmarks and
648 652 repo['.'].node() == repo._bookmarks[self.activebookmark]):
649 653 bookmarks.activate(repo, self.activebookmark)
650 654
651 655 @command('rebase',
652 656 [('s', 'source', '',
653 657 _('rebase the specified changeset and descendants'), _('REV')),
654 658 ('b', 'base', '',
655 659 _('rebase everything from branching point of specified changeset'),
656 660 _('REV')),
657 661 ('r', 'rev', [],
658 662 _('rebase these revisions'),
659 663 _('REV')),
660 664 ('d', 'dest', '',
661 665 _('rebase onto the specified changeset'), _('REV')),
662 666 ('', 'collapse', False, _('collapse the rebased changesets')),
663 667 ('m', 'message', '',
664 668 _('use text as collapse commit message'), _('TEXT')),
665 669 ('e', 'edit', False, _('invoke editor on commit messages')),
666 670 ('l', 'logfile', '',
667 671 _('read collapse commit message from file'), _('FILE')),
668 672 ('k', 'keep', False, _('keep original changesets')),
669 673 ('', 'keepbranches', False, _('keep original branch names')),
670 674 ('D', 'detach', False, _('(DEPRECATED)')),
671 675 ('i', 'interactive', False, _('(DEPRECATED)')),
672 676 ('t', 'tool', '', _('specify merge tool')),
673 677 ('c', 'continue', False, _('continue an interrupted rebase')),
674 678 ('a', 'abort', False, _('abort an interrupted rebase')),
675 679 ('', 'auto-orphans', '', _('automatically rebase orphan revisions '
676 680 'in the specified revset (EXPERIMENTAL)')),
677 681 ] + cmdutil.dryrunopts + cmdutil.formatteropts + cmdutil.confirmopts,
678 682 _('[-s REV | -b REV] [-d REV] [OPTION]'))
679 683 def rebase(ui, repo, **opts):
680 684 """move changeset (and descendants) to a different branch
681 685
682 686 Rebase uses repeated merging to graft changesets from one part of
683 687 history (the source) onto another (the destination). This can be
684 688 useful for linearizing *local* changes relative to a master
685 689 development tree.
686 690
687 691 Published commits cannot be rebased (see :hg:`help phases`).
688 692 To copy commits, see :hg:`help graft`.
689 693
690 694 If you don't specify a destination changeset (``-d/--dest``), rebase
691 695 will use the same logic as :hg:`merge` to pick a destination. if
692 696 the current branch contains exactly one other head, the other head
693 697 is merged with by default. Otherwise, an explicit revision with
694 698 which to merge with must be provided. (destination changeset is not
695 699 modified by rebasing, but new changesets are added as its
696 700 descendants.)
697 701
698 702 Here are the ways to select changesets:
699 703
700 704 1. Explicitly select them using ``--rev``.
701 705
702 706 2. Use ``--source`` to select a root changeset and include all of its
703 707 descendants.
704 708
705 709 3. Use ``--base`` to select a changeset; rebase will find ancestors
706 710 and their descendants which are not also ancestors of the destination.
707 711
708 712 4. If you do not specify any of ``--rev``, ``source``, or ``--base``,
709 713 rebase will use ``--base .`` as above.
710 714
711 715 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
712 716 can be used in ``--dest``. Destination would be calculated per source
713 717 revision with ``SRC`` substituted by that single source revision and
714 718 ``ALLSRC`` substituted by all source revisions.
715 719
716 720 Rebase will destroy original changesets unless you use ``--keep``.
717 721 It will also move your bookmarks (even if you do).
718 722
719 723 Some changesets may be dropped if they do not contribute changes
720 724 (e.g. merges from the destination branch).
721 725
722 726 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
723 727 a named branch with two heads. You will need to explicitly specify source
724 728 and/or destination.
725 729
726 730 If you need to use a tool to automate merge/conflict decisions, you
727 731 can specify one with ``--tool``, see :hg:`help merge-tools`.
728 732 As a caveat: the tool will not be used to mediate when a file was
729 733 deleted, there is no hook presently available for this.
730 734
731 735 If a rebase is interrupted to manually resolve a conflict, it can be
732 736 continued with --continue/-c or aborted with --abort/-a.
733 737
734 738 .. container:: verbose
735 739
736 740 Examples:
737 741
738 742 - move "local changes" (current commit back to branching point)
739 743 to the current branch tip after a pull::
740 744
741 745 hg rebase
742 746
743 747 - move a single changeset to the stable branch::
744 748
745 749 hg rebase -r 5f493448 -d stable
746 750
747 751 - splice a commit and all its descendants onto another part of history::
748 752
749 753 hg rebase --source c0c3 --dest 4cf9
750 754
751 755 - rebase everything on a branch marked by a bookmark onto the
752 756 default branch::
753 757
754 758 hg rebase --base myfeature --dest default
755 759
756 760 - collapse a sequence of changes into a single commit::
757 761
758 762 hg rebase --collapse -r 1520:1525 -d .
759 763
760 764 - move a named branch while preserving its name::
761 765
762 766 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
763 767
764 768 - stabilize orphaned changesets so history looks linear::
765 769
766 770 hg rebase -r 'orphan()-obsolete()'\
767 771 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
768 772 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
769 773
770 774 Configuration Options:
771 775
772 776 You can make rebase require a destination if you set the following config
773 777 option::
774 778
775 779 [commands]
776 780 rebase.requiredest = True
777 781
778 782 By default, rebase will close the transaction after each commit. For
779 783 performance purposes, you can configure rebase to use a single transaction
780 784 across the entire rebase. WARNING: This setting introduces a significant
781 785 risk of losing the work you've done in a rebase if the rebase aborts
782 786 unexpectedly::
783 787
784 788 [rebase]
785 789 singletransaction = True
786 790
787 791 By default, rebase writes to the working copy, but you can configure it to
788 792 run in-memory for for better performance, and to allow it to run if the
789 793 working copy is dirty::
790 794
791 795 [rebase]
792 796 experimental.inmemory = True
793 797
794 798 Return Values:
795 799
796 800 Returns 0 on success, 1 if nothing to rebase or there are
797 801 unresolved conflicts.
798 802
799 803 """
800 804 opts = pycompat.byteskwargs(opts)
801 805 inmemory = ui.configbool('rebase', 'experimental.inmemory')
802 806 dryrun = opts.get('dry_run')
803 807 if dryrun:
804 808 if opts.get('abort'):
805 809 raise error.Abort(_('cannot specify both --dry-run and --abort'))
806 810 if opts.get('continue'):
807 811 raise error.Abort(_('cannot specify both --dry-run and --continue'))
808 812 if opts.get('confirm'):
809 813 dryrun = True
810 814 if opts.get('dry_run'):
811 815 raise error.Abort(_('cannot specify both --confirm and --dry-run'))
812 816 if opts.get('abort'):
813 817 raise error.Abort(_('cannot specify both --confirm and --abort'))
814 818 if opts.get('continue'):
815 819 raise error.Abort(_('cannot specify both --confirm and --continue'))
816 820
817 821 if (opts.get('continue') or opts.get('abort') or
818 822 repo.currenttransaction() is not None):
819 823 # in-memory rebase is not compatible with resuming rebases.
820 824 # (Or if it is run within a transaction, since the restart logic can
821 825 # fail the entire transaction.)
822 826 inmemory = False
823 827
824 828 if opts.get('auto_orphans'):
825 829 for key in opts:
826 830 if key != 'auto_orphans' and opts.get(key):
827 831 raise error.Abort(_('--auto-orphans is incompatible with %s') %
828 832 ('--' + key))
829 833 userrevs = list(repo.revs(opts.get('auto_orphans')))
830 834 opts['rev'] = [revsetlang.formatspec('%ld and orphan()', userrevs)]
831 835 opts['dest'] = '_destautoorphanrebase(SRC)'
836 backup = ui.configbool('ui', 'history-editing-backup')
837 opts['backup'] = backup
832 838
833 839 if dryrun:
834 840 return _dryrunrebase(ui, repo, opts)
835 841 elif inmemory:
836 842 try:
837 843 # in-memory merge doesn't support conflicts, so if we hit any, abort
838 844 # and re-run as an on-disk merge.
839 845 overrides = {('rebase', 'singletransaction'): True}
840 846 with ui.configoverride(overrides, 'rebase'):
841 847 return _dorebase(ui, repo, opts, inmemory=inmemory)
842 848 except error.InMemoryMergeConflictsError:
843 849 ui.warn(_('hit merge conflicts; re-running rebase without in-memory'
844 850 ' merge\n'))
845 851 _dorebase(ui, repo, {'abort': True})
846 852 return _dorebase(ui, repo, opts, inmemory=False)
847 853 else:
848 854 return _dorebase(ui, repo, opts)
849 855
850 856 def _dryrunrebase(ui, repo, opts):
851 857 rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts)
852 858 confirm = opts.get('confirm')
859 backup = opts.get('backup')
853 860 if confirm:
854 861 ui.status(_('starting in-memory rebase\n'))
855 862 else:
856 863 ui.status(_('starting dry-run rebase; repository will not be '
857 864 'changed\n'))
858 865 with repo.wlock(), repo.lock():
859 866 needsabort = True
860 867 try:
861 868 overrides = {('rebase', 'singletransaction'): True}
862 869 with ui.configoverride(overrides, 'rebase'):
863 870 _origrebase(ui, repo, opts, rbsrt, inmemory=True,
864 871 leaveunfinished=True)
865 872 except error.InMemoryMergeConflictsError:
866 873 ui.status(_('hit a merge conflict\n'))
867 874 return 1
868 875 else:
869 876 if confirm:
870 877 ui.status(_('rebase completed successfully\n'))
871 878 if not ui.promptchoice(_(b'apply changes (yn)?'
872 879 b'$$ &Yes $$ &No')):
873 880 # finish unfinished rebase
874 rbsrt._finishrebase()
881 rbsrt._finishrebase(backup=backup)
875 882 else:
876 883 rbsrt._prepareabortorcontinue(isabort=True, backup=False,
877 884 suppwarns=True)
878 885 needsabort = False
879 886 else:
880 887 ui.status(_('dry-run rebase completed successfully; run without'
881 888 ' -n/--dry-run to perform this rebase\n'))
882 889 return 0
883 890 finally:
884 891 if needsabort:
885 892 # no need to store backup in case of dryrun
886 893 rbsrt._prepareabortorcontinue(isabort=True, backup=False,
887 894 suppwarns=True)
888 895
889 896 def _dorebase(ui, repo, opts, inmemory=False):
890 897 rbsrt = rebaseruntime(repo, ui, inmemory, opts)
891 898 return _origrebase(ui, repo, opts, rbsrt, inmemory=inmemory)
892 899
893 900 def _origrebase(ui, repo, opts, rbsrt, inmemory=False, leaveunfinished=False):
894 901 with repo.wlock(), repo.lock():
895 902 # Validate input and define rebasing points
896 903 destf = opts.get('dest', None)
897 904 srcf = opts.get('source', None)
898 905 basef = opts.get('base', None)
899 906 revf = opts.get('rev', [])
900 907 # search default destination in this space
901 908 # used in the 'hg pull --rebase' case, see issue 5214.
902 909 destspace = opts.get('_destspace')
903 910 contf = opts.get('continue')
904 911 abortf = opts.get('abort')
912 backup = opts.get('backup')
905 913 if opts.get('interactive'):
906 914 try:
907 915 if extensions.find('histedit'):
908 916 enablehistedit = ''
909 917 except KeyError:
910 918 enablehistedit = " --config extensions.histedit="
911 919 help = "hg%s help -e histedit" % enablehistedit
912 920 msg = _("interactive history editing is supported by the "
913 921 "'histedit' extension (see \"%s\")") % help
914 922 raise error.Abort(msg)
915 923
916 924 if rbsrt.collapsemsg and not rbsrt.collapsef:
917 925 raise error.Abort(
918 926 _('message can only be specified with collapse'))
919 927
920 928 if contf or abortf:
921 929 if contf and abortf:
922 930 raise error.Abort(_('cannot use both abort and continue'))
923 931 if rbsrt.collapsef:
924 932 raise error.Abort(
925 933 _('cannot use collapse with continue or abort'))
926 934 if srcf or basef or destf:
927 935 raise error.Abort(
928 936 _('abort and continue do not allow specifying revisions'))
929 937 if abortf and opts.get('tool', False):
930 938 ui.warn(_('tool option will be ignored\n'))
931 939 if contf:
932 940 ms = mergemod.mergestate.read(repo)
933 941 mergeutil.checkunresolved(ms)
934 942
935 retcode = rbsrt._prepareabortorcontinue(abortf)
943 retcode = rbsrt._prepareabortorcontinue(abortf, backup=backup)
936 944 if retcode is not None:
937 945 return retcode
938 946 else:
939 947 destmap = _definedestmap(ui, repo, inmemory, destf, srcf, basef,
940 948 revf, destspace=destspace)
941 949 retcode = rbsrt._preparenewrebase(destmap)
942 950 if retcode is not None:
943 951 return retcode
944 952 storecollapsemsg(repo, rbsrt.collapsemsg)
945 953
946 954 tr = None
947 955
948 956 singletr = ui.configbool('rebase', 'singletransaction')
949 957 if singletr:
950 958 tr = repo.transaction('rebase')
951 959
952 960 # If `rebase.singletransaction` is enabled, wrap the entire operation in
953 961 # one transaction here. Otherwise, transactions are obtained when
954 962 # committing each node, which is slower but allows partial success.
955 963 with util.acceptintervention(tr):
956 964 # Same logic for the dirstate guard, except we don't create one when
957 965 # rebasing in-memory (it's not needed).
958 966 dsguard = None
959 967 if singletr and not inmemory:
960 968 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
961 969 with util.acceptintervention(dsguard):
962 970 rbsrt._performrebase(tr)
963 971 if not leaveunfinished:
964 rbsrt._finishrebase()
972 rbsrt._finishrebase(backup=backup)
965 973
966 974 def _definedestmap(ui, repo, inmemory, destf=None, srcf=None, basef=None,
967 975 revf=None, destspace=None):
968 976 """use revisions argument to define destmap {srcrev: destrev}"""
969 977 if revf is None:
970 978 revf = []
971 979
972 980 # destspace is here to work around issues with `hg pull --rebase` see
973 981 # issue5214 for details
974 982 if srcf and basef:
975 983 raise error.Abort(_('cannot specify both a source and a base'))
976 984 if revf and basef:
977 985 raise error.Abort(_('cannot specify both a revision and a base'))
978 986 if revf and srcf:
979 987 raise error.Abort(_('cannot specify both a revision and a source'))
980 988
981 989 if not inmemory:
982 990 cmdutil.checkunfinished(repo)
983 991 cmdutil.bailifchanged(repo)
984 992
985 993 if ui.configbool('commands', 'rebase.requiredest') and not destf:
986 994 raise error.Abort(_('you must specify a destination'),
987 995 hint=_('use: hg rebase -d REV'))
988 996
989 997 dest = None
990 998
991 999 if revf:
992 1000 rebaseset = scmutil.revrange(repo, revf)
993 1001 if not rebaseset:
994 1002 ui.status(_('empty "rev" revision set - nothing to rebase\n'))
995 1003 return None
996 1004 elif srcf:
997 1005 src = scmutil.revrange(repo, [srcf])
998 1006 if not src:
999 1007 ui.status(_('empty "source" revision set - nothing to rebase\n'))
1000 1008 return None
1001 1009 rebaseset = repo.revs('(%ld)::', src)
1002 1010 assert rebaseset
1003 1011 else:
1004 1012 base = scmutil.revrange(repo, [basef or '.'])
1005 1013 if not base:
1006 1014 ui.status(_('empty "base" revision set - '
1007 1015 "can't compute rebase set\n"))
1008 1016 return None
1009 1017 if destf:
1010 1018 # --base does not support multiple destinations
1011 1019 dest = scmutil.revsingle(repo, destf)
1012 1020 else:
1013 1021 dest = repo[_destrebase(repo, base, destspace=destspace)]
1014 1022 destf = bytes(dest)
1015 1023
1016 1024 roots = [] # selected children of branching points
1017 1025 bpbase = {} # {branchingpoint: [origbase]}
1018 1026 for b in base: # group bases by branching points
1019 1027 bp = repo.revs('ancestor(%d, %d)', b, dest.rev()).first()
1020 1028 bpbase[bp] = bpbase.get(bp, []) + [b]
1021 1029 if None in bpbase:
1022 1030 # emulate the old behavior, showing "nothing to rebase" (a better
1023 1031 # behavior may be abort with "cannot find branching point" error)
1024 1032 bpbase.clear()
1025 1033 for bp, bs in bpbase.iteritems(): # calculate roots
1026 1034 roots += list(repo.revs('children(%d) & ancestors(%ld)', bp, bs))
1027 1035
1028 1036 rebaseset = repo.revs('%ld::', roots)
1029 1037
1030 1038 if not rebaseset:
1031 1039 # transform to list because smartsets are not comparable to
1032 1040 # lists. This should be improved to honor laziness of
1033 1041 # smartset.
1034 1042 if list(base) == [dest.rev()]:
1035 1043 if basef:
1036 1044 ui.status(_('nothing to rebase - %s is both "base"'
1037 1045 ' and destination\n') % dest)
1038 1046 else:
1039 1047 ui.status(_('nothing to rebase - working directory '
1040 1048 'parent is also destination\n'))
1041 1049 elif not repo.revs('%ld - ::%d', base, dest.rev()):
1042 1050 if basef:
1043 1051 ui.status(_('nothing to rebase - "base" %s is '
1044 1052 'already an ancestor of destination '
1045 1053 '%s\n') %
1046 1054 ('+'.join(bytes(repo[r]) for r in base),
1047 1055 dest))
1048 1056 else:
1049 1057 ui.status(_('nothing to rebase - working '
1050 1058 'directory parent is already an '
1051 1059 'ancestor of destination %s\n') % dest)
1052 1060 else: # can it happen?
1053 1061 ui.status(_('nothing to rebase from %s to %s\n') %
1054 1062 ('+'.join(bytes(repo[r]) for r in base), dest))
1055 1063 return None
1056 1064
1057 1065 rebasingwcp = repo['.'].rev() in rebaseset
1058 1066 ui.log("rebase", "", rebase_rebasing_wcp=rebasingwcp)
1059 1067 if inmemory and rebasingwcp:
1060 1068 # Check these since we did not before.
1061 1069 cmdutil.checkunfinished(repo)
1062 1070 cmdutil.bailifchanged(repo)
1063 1071
1064 1072 if not destf:
1065 1073 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1066 1074 destf = bytes(dest)
1067 1075
1068 1076 allsrc = revsetlang.formatspec('%ld', rebaseset)
1069 1077 alias = {'ALLSRC': allsrc}
1070 1078
1071 1079 if dest is None:
1072 1080 try:
1073 1081 # fast path: try to resolve dest without SRC alias
1074 1082 dest = scmutil.revsingle(repo, destf, localalias=alias)
1075 1083 except error.RepoLookupError:
1076 1084 # multi-dest path: resolve dest for each SRC separately
1077 1085 destmap = {}
1078 1086 for r in rebaseset:
1079 1087 alias['SRC'] = revsetlang.formatspec('%d', r)
1080 1088 # use repo.anyrevs instead of scmutil.revsingle because we
1081 1089 # don't want to abort if destset is empty.
1082 1090 destset = repo.anyrevs([destf], user=True, localalias=alias)
1083 1091 size = len(destset)
1084 1092 if size == 1:
1085 1093 destmap[r] = destset.first()
1086 1094 elif size == 0:
1087 1095 ui.note(_('skipping %s - empty destination\n') % repo[r])
1088 1096 else:
1089 1097 raise error.Abort(_('rebase destination for %s is not '
1090 1098 'unique') % repo[r])
1091 1099
1092 1100 if dest is not None:
1093 1101 # single-dest case: assign dest to each rev in rebaseset
1094 1102 destrev = dest.rev()
1095 1103 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1096 1104
1097 1105 if not destmap:
1098 1106 ui.status(_('nothing to rebase - empty destination\n'))
1099 1107 return None
1100 1108
1101 1109 return destmap
1102 1110
1103 1111 def externalparent(repo, state, destancestors):
1104 1112 """Return the revision that should be used as the second parent
1105 1113 when the revisions in state is collapsed on top of destancestors.
1106 1114 Abort if there is more than one parent.
1107 1115 """
1108 1116 parents = set()
1109 1117 source = min(state)
1110 1118 for rev in state:
1111 1119 if rev == source:
1112 1120 continue
1113 1121 for p in repo[rev].parents():
1114 1122 if (p.rev() not in state
1115 1123 and p.rev() not in destancestors):
1116 1124 parents.add(p.rev())
1117 1125 if not parents:
1118 1126 return nullrev
1119 1127 if len(parents) == 1:
1120 1128 return parents.pop()
1121 1129 raise error.Abort(_('unable to collapse on top of %d, there is more '
1122 1130 'than one external parent: %s') %
1123 1131 (max(destancestors),
1124 1132 ', '.join("%d" % p for p in sorted(parents))))
1125 1133
1126 1134 def commitmemorynode(repo, p1, p2, wctx, editor, extra, user, date, commitmsg):
1127 1135 '''Commit the memory changes with parents p1 and p2.
1128 1136 Return node of committed revision.'''
1129 1137 # Replicates the empty check in ``repo.commit``.
1130 1138 if wctx.isempty() and not repo.ui.configbool('ui', 'allowemptycommit'):
1131 1139 return None
1132 1140
1133 1141 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1134 1142 # ``branch`` (used when passing ``--keepbranches``).
1135 1143 branch = repo[p1].branch()
1136 1144 if 'branch' in extra:
1137 1145 branch = extra['branch']
1138 1146
1139 1147 memctx = wctx.tomemctx(commitmsg, parents=(p1, p2), date=date,
1140 1148 extra=extra, user=user, branch=branch, editor=editor)
1141 1149 commitres = repo.commitctx(memctx)
1142 1150 wctx.clean() # Might be reused
1143 1151 return commitres
1144 1152
1145 1153 def commitnode(repo, p1, p2, editor, extra, user, date, commitmsg):
1146 1154 '''Commit the wd changes with parents p1 and p2.
1147 1155 Return node of committed revision.'''
1148 1156 dsguard = util.nullcontextmanager()
1149 1157 if not repo.ui.configbool('rebase', 'singletransaction'):
1150 1158 dsguard = dirstateguard.dirstateguard(repo, 'rebase')
1151 1159 with dsguard:
1152 1160 repo.setparents(repo[p1].node(), repo[p2].node())
1153 1161
1154 1162 # Commit might fail if unresolved files exist
1155 1163 newnode = repo.commit(text=commitmsg, user=user, date=date,
1156 1164 extra=extra, editor=editor)
1157 1165
1158 1166 repo.dirstate.setbranch(repo[newnode].branch())
1159 1167 return newnode
1160 1168
1161 1169 def rebasenode(repo, rev, p1, base, collapse, dest, wctx):
1162 1170 'Rebase a single revision rev on top of p1 using base as merge ancestor'
1163 1171 # Merge phase
1164 1172 # Update to destination and merge it with local
1165 1173 if wctx.isinmemory():
1166 1174 wctx.setbase(repo[p1])
1167 1175 else:
1168 1176 if repo['.'].rev() != p1:
1169 1177 repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
1170 1178 mergemod.update(repo, p1, False, True)
1171 1179 else:
1172 1180 repo.ui.debug(" already in destination\n")
1173 1181 # This is, alas, necessary to invalidate workingctx's manifest cache,
1174 1182 # as well as other data we litter on it in other places.
1175 1183 wctx = repo[None]
1176 1184 repo.dirstate.write(repo.currenttransaction())
1177 1185 repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
1178 1186 if base is not None:
1179 1187 repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
1180 1188 # When collapsing in-place, the parent is the common ancestor, we
1181 1189 # have to allow merging with it.
1182 1190 stats = mergemod.update(repo, rev, True, True, base, collapse,
1183 1191 labels=['dest', 'source'], wc=wctx)
1184 1192 if collapse:
1185 1193 copies.duplicatecopies(repo, wctx, rev, dest)
1186 1194 else:
1187 1195 # If we're not using --collapse, we need to
1188 1196 # duplicate copies between the revision we're
1189 1197 # rebasing and its first parent, but *not*
1190 1198 # duplicate any copies that have already been
1191 1199 # performed in the destination.
1192 1200 p1rev = repo[rev].p1().rev()
1193 1201 copies.duplicatecopies(repo, wctx, rev, p1rev, skiprev=dest)
1194 1202 return stats
1195 1203
1196 1204 def adjustdest(repo, rev, destmap, state, skipped):
1197 1205 """adjust rebase destination given the current rebase state
1198 1206
1199 1207 rev is what is being rebased. Return a list of two revs, which are the
1200 1208 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1201 1209 nullrev, return dest without adjustment for it.
1202 1210
1203 1211 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1204 1212 to B1, and E's destination will be adjusted from F to B1.
1205 1213
1206 1214 B1 <- written during rebasing B
1207 1215 |
1208 1216 F <- original destination of B, E
1209 1217 |
1210 1218 | E <- rev, which is being rebased
1211 1219 | |
1212 1220 | D <- prev, one parent of rev being checked
1213 1221 | |
1214 1222 | x <- skipped, ex. no successor or successor in (::dest)
1215 1223 | |
1216 1224 | C <- rebased as C', different destination
1217 1225 | |
1218 1226 | B <- rebased as B1 C'
1219 1227 |/ |
1220 1228 A G <- destination of C, different
1221 1229
1222 1230 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1223 1231 first move C to C1, G to G1, and when it's checking H, the adjusted
1224 1232 destinations will be [C1, G1].
1225 1233
1226 1234 H C1 G1
1227 1235 /| | /
1228 1236 F G |/
1229 1237 K | | -> K
1230 1238 | C D |
1231 1239 | |/ |
1232 1240 | B | ...
1233 1241 |/ |/
1234 1242 A A
1235 1243
1236 1244 Besides, adjust dest according to existing rebase information. For example,
1237 1245
1238 1246 B C D B needs to be rebased on top of C, C needs to be rebased on top
1239 1247 \|/ of D. We will rebase C first.
1240 1248 A
1241 1249
1242 1250 C' After rebasing C, when considering B's destination, use C'
1243 1251 | instead of the original C.
1244 1252 B D
1245 1253 \ /
1246 1254 A
1247 1255 """
1248 1256 # pick already rebased revs with same dest from state as interesting source
1249 1257 dest = destmap[rev]
1250 1258 source = [s for s, d in state.items()
1251 1259 if d > 0 and destmap[s] == dest and s not in skipped]
1252 1260
1253 1261 result = []
1254 1262 for prev in repo.changelog.parentrevs(rev):
1255 1263 adjusted = dest
1256 1264 if prev != nullrev:
1257 1265 candidate = repo.revs('max(%ld and (::%d))', source, prev).first()
1258 1266 if candidate is not None:
1259 1267 adjusted = state[candidate]
1260 1268 if adjusted == dest and dest in state:
1261 1269 adjusted = state[dest]
1262 1270 if adjusted == revtodo:
1263 1271 # sortsource should produce an order that makes this impossible
1264 1272 raise error.ProgrammingError(
1265 1273 'rev %d should be rebased already at this time' % dest)
1266 1274 result.append(adjusted)
1267 1275 return result
1268 1276
1269 1277 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1270 1278 """
1271 1279 Abort if rebase will create divergence or rebase is noop because of markers
1272 1280
1273 1281 `rebaseobsrevs`: set of obsolete revision in source
1274 1282 `rebaseobsskipped`: set of revisions from source skipped because they have
1275 1283 successors in destination or no non-obsolete successor.
1276 1284 """
1277 1285 # Obsolete node with successors not in dest leads to divergence
1278 1286 divergenceok = ui.configbool('experimental',
1279 1287 'evolution.allowdivergence')
1280 1288 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1281 1289
1282 1290 if divergencebasecandidates and not divergenceok:
1283 1291 divhashes = (bytes(repo[r])
1284 1292 for r in divergencebasecandidates)
1285 1293 msg = _("this rebase will cause "
1286 1294 "divergences from: %s")
1287 1295 h = _("to force the rebase please set "
1288 1296 "experimental.evolution.allowdivergence=True")
1289 1297 raise error.Abort(msg % (",".join(divhashes),), hint=h)
1290 1298
1291 1299 def successorrevs(unfi, rev):
1292 1300 """yield revision numbers for successors of rev"""
1293 1301 assert unfi.filtername is None
1294 1302 nodemap = unfi.changelog.nodemap
1295 1303 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1296 1304 if s in nodemap:
1297 1305 yield nodemap[s]
1298 1306
1299 1307 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1300 1308 """Return new parents and optionally a merge base for rev being rebased
1301 1309
1302 1310 The destination specified by "dest" cannot always be used directly because
1303 1311 previously rebase result could affect destination. For example,
1304 1312
1305 1313 D E rebase -r C+D+E -d B
1306 1314 |/ C will be rebased to C'
1307 1315 B C D's new destination will be C' instead of B
1308 1316 |/ E's new destination will be C' instead of B
1309 1317 A
1310 1318
1311 1319 The new parents of a merge is slightly more complicated. See the comment
1312 1320 block below.
1313 1321 """
1314 1322 # use unfiltered changelog since successorrevs may return filtered nodes
1315 1323 assert repo.filtername is None
1316 1324 cl = repo.changelog
1317 1325 isancestor = cl.isancestorrev
1318 1326
1319 1327 dest = destmap[rev]
1320 1328 oldps = repo.changelog.parentrevs(rev) # old parents
1321 1329 newps = [nullrev, nullrev] # new parents
1322 1330 dests = adjustdest(repo, rev, destmap, state, skipped)
1323 1331 bases = list(oldps) # merge base candidates, initially just old parents
1324 1332
1325 1333 if all(r == nullrev for r in oldps[1:]):
1326 1334 # For non-merge changeset, just move p to adjusted dest as requested.
1327 1335 newps[0] = dests[0]
1328 1336 else:
1329 1337 # For merge changeset, if we move p to dests[i] unconditionally, both
1330 1338 # parents may change and the end result looks like "the merge loses a
1331 1339 # parent", which is a surprise. This is a limit because "--dest" only
1332 1340 # accepts one dest per src.
1333 1341 #
1334 1342 # Therefore, only move p with reasonable conditions (in this order):
1335 1343 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1336 1344 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1337 1345 #
1338 1346 # Comparing with adjustdest, the logic here does some additional work:
1339 1347 # 1. decide which parents will not be moved towards dest
1340 1348 # 2. if the above decision is "no", should a parent still be moved
1341 1349 # because it was rebased?
1342 1350 #
1343 1351 # For example:
1344 1352 #
1345 1353 # C # "rebase -r C -d D" is an error since none of the parents
1346 1354 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1347 1355 # A B D # B (using rule "2."), since B will be rebased.
1348 1356 #
1349 1357 # The loop tries to be not rely on the fact that a Mercurial node has
1350 1358 # at most 2 parents.
1351 1359 for i, p in enumerate(oldps):
1352 1360 np = p # new parent
1353 1361 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1354 1362 np = dests[i]
1355 1363 elif p in state and state[p] > 0:
1356 1364 np = state[p]
1357 1365
1358 1366 # "bases" only record "special" merge bases that cannot be
1359 1367 # calculated from changelog DAG (i.e. isancestor(p, np) is False).
1360 1368 # For example:
1361 1369 #
1362 1370 # B' # rebase -s B -d D, when B was rebased to B'. dest for C
1363 1371 # | C # is B', but merge base for C is B, instead of
1364 1372 # D | # changelog.ancestor(C, B') == A. If changelog DAG and
1365 1373 # | B # "state" edges are merged (so there will be an edge from
1366 1374 # |/ # B to B'), the merge base is still ancestor(C, B') in
1367 1375 # A # the merged graph.
1368 1376 #
1369 1377 # Also see https://bz.mercurial-scm.org/show_bug.cgi?id=1950#c8
1370 1378 # which uses "virtual null merge" to explain this situation.
1371 1379 if isancestor(p, np):
1372 1380 bases[i] = nullrev
1373 1381
1374 1382 # If one parent becomes an ancestor of the other, drop the ancestor
1375 1383 for j, x in enumerate(newps[:i]):
1376 1384 if x == nullrev:
1377 1385 continue
1378 1386 if isancestor(np, x): # CASE-1
1379 1387 np = nullrev
1380 1388 elif isancestor(x, np): # CASE-2
1381 1389 newps[j] = np
1382 1390 np = nullrev
1383 1391 # New parents forming an ancestor relationship does not
1384 1392 # mean the old parents have a similar relationship. Do not
1385 1393 # set bases[x] to nullrev.
1386 1394 bases[j], bases[i] = bases[i], bases[j]
1387 1395
1388 1396 newps[i] = np
1389 1397
1390 1398 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1391 1399 # base. If only p2 changes, merging using unchanged p1 as merge base is
1392 1400 # suboptimal. Therefore swap parents to make the merge sane.
1393 1401 if newps[1] != nullrev and oldps[0] == newps[0]:
1394 1402 assert len(newps) == 2 and len(oldps) == 2
1395 1403 newps.reverse()
1396 1404 bases.reverse()
1397 1405
1398 1406 # No parent change might be an error because we fail to make rev a
1399 1407 # descendent of requested dest. This can happen, for example:
1400 1408 #
1401 1409 # C # rebase -r C -d D
1402 1410 # /| # None of A and B will be changed to D and rebase fails.
1403 1411 # A B D
1404 1412 if set(newps) == set(oldps) and dest not in newps:
1405 1413 raise error.Abort(_('cannot rebase %d:%s without '
1406 1414 'moving at least one of its parents')
1407 1415 % (rev, repo[rev]))
1408 1416
1409 1417 # Source should not be ancestor of dest. The check here guarantees it's
1410 1418 # impossible. With multi-dest, the initial check does not cover complex
1411 1419 # cases since we don't have abstractions to dry-run rebase cheaply.
1412 1420 if any(p != nullrev and isancestor(rev, p) for p in newps):
1413 1421 raise error.Abort(_('source is ancestor of destination'))
1414 1422
1415 1423 # "rebasenode" updates to new p1, use the corresponding merge base.
1416 1424 if bases[0] != nullrev:
1417 1425 base = bases[0]
1418 1426 else:
1419 1427 base = None
1420 1428
1421 1429 # Check if the merge will contain unwanted changes. That may happen if
1422 1430 # there are multiple special (non-changelog ancestor) merge bases, which
1423 1431 # cannot be handled well by the 3-way merge algorithm. For example:
1424 1432 #
1425 1433 # F
1426 1434 # /|
1427 1435 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1428 1436 # | | # as merge base, the difference between D and F will include
1429 1437 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1430 1438 # |/ # chosen, the rebased F will contain B.
1431 1439 # A Z
1432 1440 #
1433 1441 # But our merge base candidates (D and E in above case) could still be
1434 1442 # better than the default (ancestor(F, Z) == null). Therefore still
1435 1443 # pick one (so choose p1 above).
1436 1444 if sum(1 for b in bases if b != nullrev) > 1:
1437 1445 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1438 1446 for i, base in enumerate(bases):
1439 1447 if base == nullrev:
1440 1448 continue
1441 1449 # Revisions in the side (not chosen as merge base) branch that
1442 1450 # might contain "surprising" contents
1443 1451 siderevs = list(repo.revs('((%ld-%d) %% (%d+%d))',
1444 1452 bases, base, base, dest))
1445 1453
1446 1454 # If those revisions are covered by rebaseset, the result is good.
1447 1455 # A merge in rebaseset would be considered to cover its ancestors.
1448 1456 if siderevs:
1449 1457 rebaseset = [r for r, d in state.items()
1450 1458 if d > 0 and r not in obsskipped]
1451 1459 merges = [r for r in rebaseset
1452 1460 if cl.parentrevs(r)[1] != nullrev]
1453 1461 unwanted[i] = list(repo.revs('%ld - (::%ld) - %ld',
1454 1462 siderevs, merges, rebaseset))
1455 1463
1456 1464 # Choose a merge base that has a minimal number of unwanted revs.
1457 1465 l, i = min((len(revs), i)
1458 1466 for i, revs in enumerate(unwanted) if revs is not None)
1459 1467 base = bases[i]
1460 1468
1461 1469 # newps[0] should match merge base if possible. Currently, if newps[i]
1462 1470 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1463 1471 # the other's ancestor. In that case, it's fine to not swap newps here.
1464 1472 # (see CASE-1 and CASE-2 above)
1465 1473 if i != 0 and newps[i] != nullrev:
1466 1474 newps[0], newps[i] = newps[i], newps[0]
1467 1475
1468 1476 # The merge will include unwanted revisions. Abort now. Revisit this if
1469 1477 # we have a more advanced merge algorithm that handles multiple bases.
1470 1478 if l > 0:
1471 1479 unwanteddesc = _(' or ').join(
1472 1480 (', '.join('%d:%s' % (r, repo[r]) for r in revs)
1473 1481 for revs in unwanted if revs is not None))
1474 1482 raise error.Abort(
1475 1483 _('rebasing %d:%s will include unwanted changes from %s')
1476 1484 % (rev, repo[rev], unwanteddesc))
1477 1485
1478 1486 repo.ui.debug(" future parents are %d and %d\n" % tuple(newps))
1479 1487
1480 1488 return newps[0], newps[1], base
1481 1489
1482 1490 def isagitpatch(repo, patchname):
1483 1491 'Return true if the given patch is in git format'
1484 1492 mqpatch = os.path.join(repo.mq.path, patchname)
1485 1493 for line in patch.linereader(open(mqpatch, 'rb')):
1486 1494 if line.startswith('diff --git'):
1487 1495 return True
1488 1496 return False
1489 1497
1490 1498 def updatemq(repo, state, skipped, **opts):
1491 1499 'Update rebased mq patches - finalize and then import them'
1492 1500 mqrebase = {}
1493 1501 mq = repo.mq
1494 1502 original_series = mq.fullseries[:]
1495 1503 skippedpatches = set()
1496 1504
1497 1505 for p in mq.applied:
1498 1506 rev = repo[p.node].rev()
1499 1507 if rev in state:
1500 1508 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
1501 1509 (rev, p.name))
1502 1510 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1503 1511 else:
1504 1512 # Applied but not rebased, not sure this should happen
1505 1513 skippedpatches.add(p.name)
1506 1514
1507 1515 if mqrebase:
1508 1516 mq.finish(repo, mqrebase.keys())
1509 1517
1510 1518 # We must start import from the newest revision
1511 1519 for rev in sorted(mqrebase, reverse=True):
1512 1520 if rev not in skipped:
1513 1521 name, isgit = mqrebase[rev]
1514 1522 repo.ui.note(_('updating mq patch %s to %d:%s\n') %
1515 1523 (name, state[rev], repo[state[rev]]))
1516 1524 mq.qimport(repo, (), patchname=name, git=isgit,
1517 1525 rev=["%d" % state[rev]])
1518 1526 else:
1519 1527 # Rebased and skipped
1520 1528 skippedpatches.add(mqrebase[rev][0])
1521 1529
1522 1530 # Patches were either applied and rebased and imported in
1523 1531 # order, applied and removed or unapplied. Discard the removed
1524 1532 # ones while preserving the original series order and guards.
1525 1533 newseries = [s for s in original_series
1526 1534 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
1527 1535 mq.fullseries[:] = newseries
1528 1536 mq.seriesdirty = True
1529 1537 mq.savedirty()
1530 1538
1531 1539 def storecollapsemsg(repo, collapsemsg):
1532 1540 'Store the collapse message to allow recovery'
1533 1541 collapsemsg = collapsemsg or ''
1534 1542 f = repo.vfs("last-message.txt", "w")
1535 1543 f.write("%s\n" % collapsemsg)
1536 1544 f.close()
1537 1545
1538 1546 def clearcollapsemsg(repo):
1539 1547 'Remove collapse message file'
1540 1548 repo.vfs.unlinkpath("last-message.txt", ignoremissing=True)
1541 1549
1542 1550 def restorecollapsemsg(repo, isabort):
1543 1551 'Restore previously stored collapse message'
1544 1552 try:
1545 1553 f = repo.vfs("last-message.txt")
1546 1554 collapsemsg = f.readline().strip()
1547 1555 f.close()
1548 1556 except IOError as err:
1549 1557 if err.errno != errno.ENOENT:
1550 1558 raise
1551 1559 if isabort:
1552 1560 # Oh well, just abort like normal
1553 1561 collapsemsg = ''
1554 1562 else:
1555 1563 raise error.Abort(_('missing .hg/last-message.txt for rebase'))
1556 1564 return collapsemsg
1557 1565
1558 1566 def clearstatus(repo):
1559 1567 'Remove the status files'
1560 1568 # Make sure the active transaction won't write the state file
1561 1569 tr = repo.currenttransaction()
1562 1570 if tr:
1563 1571 tr.removefilegenerator('rebasestate')
1564 1572 repo.vfs.unlinkpath("rebasestate", ignoremissing=True)
1565 1573
1566 1574 def needupdate(repo, state):
1567 1575 '''check whether we should `update --clean` away from a merge, or if
1568 1576 somehow the working dir got forcibly updated, e.g. by older hg'''
1569 1577 parents = [p.rev() for p in repo[None].parents()]
1570 1578
1571 1579 # Are we in a merge state at all?
1572 1580 if len(parents) < 2:
1573 1581 return False
1574 1582
1575 1583 # We should be standing on the first as-of-yet unrebased commit.
1576 1584 firstunrebased = min([old for old, new in state.iteritems()
1577 1585 if new == nullrev])
1578 1586 if firstunrebased in parents:
1579 1587 return True
1580 1588
1581 1589 return False
1582 1590
1583 1591 def abort(repo, originalwd, destmap, state, activebookmark=None, backup=True,
1584 1592 suppwarns=False):
1585 1593 '''Restore the repository to its original state. Additional args:
1586 1594
1587 1595 activebookmark: the name of the bookmark that should be active after the
1588 1596 restore'''
1589 1597
1590 1598 try:
1591 1599 # If the first commits in the rebased set get skipped during the rebase,
1592 1600 # their values within the state mapping will be the dest rev id. The
1593 1601 # rebased list must must not contain the dest rev (issue4896)
1594 1602 rebased = [s for r, s in state.items()
1595 1603 if s >= 0 and s != r and s != destmap[r]]
1596 1604 immutable = [d for d in rebased if not repo[d].mutable()]
1597 1605 cleanup = True
1598 1606 if immutable:
1599 1607 repo.ui.warn(_("warning: can't clean up public changesets %s\n")
1600 1608 % ', '.join(bytes(repo[r]) for r in immutable),
1601 1609 hint=_("see 'hg help phases' for details"))
1602 1610 cleanup = False
1603 1611
1604 1612 descendants = set()
1605 1613 if rebased:
1606 1614 descendants = set(repo.changelog.descendants(rebased))
1607 1615 if descendants - set(rebased):
1608 1616 repo.ui.warn(_("warning: new changesets detected on destination "
1609 1617 "branch, can't strip\n"))
1610 1618 cleanup = False
1611 1619
1612 1620 if cleanup:
1613 1621 shouldupdate = False
1614 1622 if rebased:
1615 1623 strippoints = [
1616 1624 c.node() for c in repo.set('roots(%ld)', rebased)]
1617 1625
1618 1626 updateifonnodes = set(rebased)
1619 1627 updateifonnodes.update(destmap.values())
1620 1628 updateifonnodes.add(originalwd)
1621 1629 shouldupdate = repo['.'].rev() in updateifonnodes
1622 1630
1623 1631 # Update away from the rebase if necessary
1624 1632 if shouldupdate or needupdate(repo, state):
1625 1633 mergemod.update(repo, originalwd, False, True)
1626 1634
1627 1635 # Strip from the first rebased revision
1628 1636 if rebased:
1629 1637 repair.strip(repo.ui, repo, strippoints, backup=backup)
1630 1638
1631 1639 if activebookmark and activebookmark in repo._bookmarks:
1632 1640 bookmarks.activate(repo, activebookmark)
1633 1641
1634 1642 finally:
1635 1643 clearstatus(repo)
1636 1644 clearcollapsemsg(repo)
1637 1645 if not suppwarns:
1638 1646 repo.ui.warn(_('rebase aborted\n'))
1639 1647 return 0
1640 1648
1641 1649 def sortsource(destmap):
1642 1650 """yield source revisions in an order that we only rebase things once
1643 1651
1644 1652 If source and destination overlaps, we should filter out revisions
1645 1653 depending on other revisions which hasn't been rebased yet.
1646 1654
1647 1655 Yield a sorted list of revisions each time.
1648 1656
1649 1657 For example, when rebasing A to B, B to C. This function yields [B], then
1650 1658 [A], indicating B needs to be rebased first.
1651 1659
1652 1660 Raise if there is a cycle so the rebase is impossible.
1653 1661 """
1654 1662 srcset = set(destmap)
1655 1663 while srcset:
1656 1664 srclist = sorted(srcset)
1657 1665 result = []
1658 1666 for r in srclist:
1659 1667 if destmap[r] not in srcset:
1660 1668 result.append(r)
1661 1669 if not result:
1662 1670 raise error.Abort(_('source and destination form a cycle'))
1663 1671 srcset -= set(result)
1664 1672 yield result
1665 1673
1666 1674 def buildstate(repo, destmap, collapse):
1667 1675 '''Define which revisions are going to be rebased and where
1668 1676
1669 1677 repo: repo
1670 1678 destmap: {srcrev: destrev}
1671 1679 '''
1672 1680 rebaseset = destmap.keys()
1673 1681 originalwd = repo['.'].rev()
1674 1682
1675 1683 # This check isn't strictly necessary, since mq detects commits over an
1676 1684 # applied patch. But it prevents messing up the working directory when
1677 1685 # a partially completed rebase is blocked by mq.
1678 1686 if 'qtip' in repo.tags():
1679 1687 mqapplied = set(repo[s.node].rev() for s in repo.mq.applied)
1680 1688 if set(destmap.values()) & mqapplied:
1681 1689 raise error.Abort(_('cannot rebase onto an applied mq patch'))
1682 1690
1683 1691 # Get "cycle" error early by exhausting the generator.
1684 1692 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
1685 1693 if not sortedsrc:
1686 1694 raise error.Abort(_('no matching revisions'))
1687 1695
1688 1696 # Only check the first batch of revisions to rebase not depending on other
1689 1697 # rebaseset. This means "source is ancestor of destination" for the second
1690 1698 # (and following) batches of revisions are not checked here. We rely on
1691 1699 # "defineparents" to do that check.
1692 1700 roots = list(repo.set('roots(%ld)', sortedsrc[0]))
1693 1701 if not roots:
1694 1702 raise error.Abort(_('no matching revisions'))
1695 1703 def revof(r):
1696 1704 return r.rev()
1697 1705 roots = sorted(roots, key=revof)
1698 1706 state = dict.fromkeys(rebaseset, revtodo)
1699 1707 emptyrebase = (len(sortedsrc) == 1)
1700 1708 for root in roots:
1701 1709 dest = repo[destmap[root.rev()]]
1702 1710 commonbase = root.ancestor(dest)
1703 1711 if commonbase == root:
1704 1712 raise error.Abort(_('source is ancestor of destination'))
1705 1713 if commonbase == dest:
1706 1714 wctx = repo[None]
1707 1715 if dest == wctx.p1():
1708 1716 # when rebasing to '.', it will use the current wd branch name
1709 1717 samebranch = root.branch() == wctx.branch()
1710 1718 else:
1711 1719 samebranch = root.branch() == dest.branch()
1712 1720 if not collapse and samebranch and dest in root.parents():
1713 1721 # mark the revision as done by setting its new revision
1714 1722 # equal to its old (current) revisions
1715 1723 state[root.rev()] = root.rev()
1716 1724 repo.ui.debug('source is a child of destination\n')
1717 1725 continue
1718 1726
1719 1727 emptyrebase = False
1720 1728 repo.ui.debug('rebase onto %s starting from %s\n' % (dest, root))
1721 1729 if emptyrebase:
1722 1730 return None
1723 1731 for rev in sorted(state):
1724 1732 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
1725 1733 # if all parents of this revision are done, then so is this revision
1726 1734 if parents and all((state.get(p) == p for p in parents)):
1727 1735 state[rev] = rev
1728 1736 return originalwd, destmap, state
1729 1737
1730 1738 def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None,
1731 keepf=False, fm=None):
1739 keepf=False, fm=None, backup=True):
1732 1740 """dispose of rebased revision at the end of the rebase
1733 1741
1734 1742 If `collapsedas` is not None, the rebase was a collapse whose result if the
1735 1743 `collapsedas` node.
1736 1744
1737 1745 If `keepf` is not True, the rebase has --keep set and no nodes should be
1738 1746 removed (but bookmarks still need to be moved).
1747
1748 If `backup` is False, no backup will be stored when stripping rebased
1749 revisions.
1739 1750 """
1740 1751 tonode = repo.changelog.node
1741 1752 replacements = {}
1742 1753 moves = {}
1743 1754 for rev, newrev in sorted(state.items()):
1744 1755 if newrev >= 0 and newrev != rev:
1745 1756 oldnode = tonode(rev)
1746 1757 newnode = collapsedas or tonode(newrev)
1747 1758 moves[oldnode] = newnode
1748 1759 if not keepf:
1749 1760 if rev in skipped:
1750 1761 succs = ()
1751 1762 else:
1752 1763 succs = (newnode,)
1753 1764 replacements[oldnode] = succs
1754 scmutil.cleanupnodes(repo, replacements, 'rebase', moves)
1765 scmutil.cleanupnodes(repo, replacements, 'rebase', moves, backup=backup)
1755 1766 if fm:
1756 1767 hf = fm.hexfunc
1757 1768 fl = fm.formatlist
1758 1769 fd = fm.formatdict
1759 1770 nodechanges = fd({hf(oldn): fl([hf(n) for n in newn], name='node')
1760 1771 for oldn, newn in replacements.iteritems()},
1761 1772 key="oldnode", value="newnodes")
1762 1773 fm.data(nodechanges=nodechanges)
1763 1774
1764 1775 def pullrebase(orig, ui, repo, *args, **opts):
1765 1776 'Call rebase after pull if the latter has been invoked with --rebase'
1766 1777 ret = None
1767 1778 if opts.get(r'rebase'):
1768 1779 if ui.configbool('commands', 'rebase.requiredest'):
1769 1780 msg = _('rebase destination required by configuration')
1770 1781 hint = _('use hg pull followed by hg rebase -d DEST')
1771 1782 raise error.Abort(msg, hint=hint)
1772 1783
1773 1784 with repo.wlock(), repo.lock():
1774 1785 if opts.get(r'update'):
1775 1786 del opts[r'update']
1776 1787 ui.debug('--update and --rebase are not compatible, ignoring '
1777 1788 'the update flag\n')
1778 1789
1779 1790 cmdutil.checkunfinished(repo)
1780 1791 cmdutil.bailifchanged(repo, hint=_('cannot pull with rebase: '
1781 1792 'please commit or shelve your changes first'))
1782 1793
1783 1794 revsprepull = len(repo)
1784 1795 origpostincoming = commands.postincoming
1785 1796 def _dummy(*args, **kwargs):
1786 1797 pass
1787 1798 commands.postincoming = _dummy
1788 1799 try:
1789 1800 ret = orig(ui, repo, *args, **opts)
1790 1801 finally:
1791 1802 commands.postincoming = origpostincoming
1792 1803 revspostpull = len(repo)
1793 1804 if revspostpull > revsprepull:
1794 1805 # --rev option from pull conflict with rebase own --rev
1795 1806 # dropping it
1796 1807 if r'rev' in opts:
1797 1808 del opts[r'rev']
1798 1809 # positional argument from pull conflicts with rebase's own
1799 1810 # --source.
1800 1811 if r'source' in opts:
1801 1812 del opts[r'source']
1802 1813 # revsprepull is the len of the repo, not revnum of tip.
1803 1814 destspace = list(repo.changelog.revs(start=revsprepull))
1804 1815 opts[r'_destspace'] = destspace
1805 1816 try:
1806 1817 rebase(ui, repo, **opts)
1807 1818 except error.NoMergeDestAbort:
1808 1819 # we can maybe update instead
1809 1820 rev, _a, _b = destutil.destupdate(repo)
1810 1821 if rev == repo['.'].rev():
1811 1822 ui.status(_('nothing to rebase\n'))
1812 1823 else:
1813 1824 ui.status(_('nothing to rebase - updating instead\n'))
1814 1825 # not passing argument to get the bare update behavior
1815 1826 # with warning and trumpets
1816 1827 commands.update(ui, repo)
1817 1828 else:
1818 1829 if opts.get(r'tool'):
1819 1830 raise error.Abort(_('--tool can only be used with --rebase'))
1820 1831 ret = orig(ui, repo, *args, **opts)
1821 1832
1822 1833 return ret
1823 1834
1824 1835 def _filterobsoleterevs(repo, revs):
1825 1836 """returns a set of the obsolete revisions in revs"""
1826 1837 return set(r for r in revs if repo[r].obsolete())
1827 1838
1828 1839 def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
1829 1840 """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
1830 1841
1831 1842 `obsoletenotrebased` is a mapping mapping obsolete => successor for all
1832 1843 obsolete nodes to be rebased given in `rebaseobsrevs`.
1833 1844
1834 1845 `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
1835 1846 without a successor in destination.
1836 1847
1837 1848 `obsoleteextinctsuccessors` is a set of obsolete revisions with only
1838 1849 obsolete successors.
1839 1850 """
1840 1851 obsoletenotrebased = {}
1841 1852 obsoletewithoutsuccessorindestination = set([])
1842 1853 obsoleteextinctsuccessors = set([])
1843 1854
1844 1855 assert repo.filtername is None
1845 1856 cl = repo.changelog
1846 1857 nodemap = cl.nodemap
1847 1858 extinctrevs = set(repo.revs('extinct()'))
1848 1859 for srcrev in rebaseobsrevs:
1849 1860 srcnode = cl.node(srcrev)
1850 1861 # XXX: more advanced APIs are required to handle split correctly
1851 1862 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
1852 1863 # obsutil.allsuccessors includes node itself
1853 1864 successors.remove(srcnode)
1854 1865 succrevs = {nodemap[s] for s in successors if s in nodemap}
1855 1866 if succrevs.issubset(extinctrevs):
1856 1867 # all successors are extinct
1857 1868 obsoleteextinctsuccessors.add(srcrev)
1858 1869 if not successors:
1859 1870 # no successor
1860 1871 obsoletenotrebased[srcrev] = None
1861 1872 else:
1862 1873 dstrev = destmap[srcrev]
1863 1874 for succrev in succrevs:
1864 1875 if cl.isancestorrev(succrev, dstrev):
1865 1876 obsoletenotrebased[srcrev] = succrev
1866 1877 break
1867 1878 else:
1868 1879 # If 'srcrev' has a successor in rebase set but none in
1869 1880 # destination (which would be catched above), we shall skip it
1870 1881 # and its descendants to avoid divergence.
1871 1882 if any(s in destmap for s in succrevs):
1872 1883 obsoletewithoutsuccessorindestination.add(srcrev)
1873 1884
1874 1885 return (
1875 1886 obsoletenotrebased,
1876 1887 obsoletewithoutsuccessorindestination,
1877 1888 obsoleteextinctsuccessors,
1878 1889 )
1879 1890
1880 1891 def summaryhook(ui, repo):
1881 1892 if not repo.vfs.exists('rebasestate'):
1882 1893 return
1883 1894 try:
1884 1895 rbsrt = rebaseruntime(repo, ui, {})
1885 1896 rbsrt.restorestatus()
1886 1897 state = rbsrt.state
1887 1898 except error.RepoLookupError:
1888 1899 # i18n: column positioning for "hg summary"
1889 1900 msg = _('rebase: (use "hg rebase --abort" to clear broken state)\n')
1890 1901 ui.write(msg)
1891 1902 return
1892 1903 numrebased = len([i for i in state.itervalues() if i >= 0])
1893 1904 # i18n: column positioning for "hg summary"
1894 1905 ui.write(_('rebase: %s, %s (rebase --continue)\n') %
1895 1906 (ui.label(_('%d rebased'), 'rebase.rebased') % numrebased,
1896 1907 ui.label(_('%d remaining'), 'rebase.remaining') %
1897 1908 (len(state) - numrebased)))
1898 1909
1899 1910 def uisetup(ui):
1900 1911 #Replace pull with a decorator to provide --rebase option
1901 1912 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
1902 1913 entry[1].append(('', 'rebase', None,
1903 1914 _("rebase working directory to branch head")))
1904 1915 entry[1].append(('t', 'tool', '',
1905 1916 _("specify merge tool for rebase")))
1906 1917 cmdutil.summaryhooks.add('rebase', summaryhook)
1907 1918 cmdutil.unfinishedstates.append(
1908 1919 ['rebasestate', False, False, _('rebase in progress'),
1909 1920 _("use 'hg rebase --continue' or 'hg rebase --abort'")])
1910 1921 cmdutil.afterresolvedstates.append(
1911 1922 ['rebasestate', _('hg rebase --continue')])
@@ -1,437 +1,437 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 hex,
17 17 short,
18 18 )
19 19 from . import (
20 20 bundle2,
21 21 changegroup,
22 22 discovery,
23 23 error,
24 24 exchange,
25 25 obsolete,
26 26 obsutil,
27 27 pycompat,
28 28 util,
29 29 )
30 30 from .utils import (
31 31 stringutil,
32 32 )
33 33
34 34 def backupbundle(repo, bases, heads, node, suffix, compress=True,
35 35 obsolescence=True):
36 36 """create a bundle with the specified revisions as a backup"""
37 37
38 38 backupdir = "strip-backup"
39 39 vfs = repo.vfs
40 40 if not vfs.isdir(backupdir):
41 41 vfs.mkdir(backupdir)
42 42
43 43 # Include a hash of all the nodes in the filename for uniqueness
44 44 allcommits = repo.set('%ln::%ln', bases, heads)
45 45 allhashes = sorted(c.hex() for c in allcommits)
46 46 totalhash = hashlib.sha1(''.join(allhashes)).digest()
47 47 name = "%s/%s-%s-%s.hg" % (backupdir, short(node),
48 48 hex(totalhash[:4]), suffix)
49 49
50 50 cgversion = changegroup.localversion(repo)
51 51 comp = None
52 52 if cgversion != '01':
53 53 bundletype = "HG20"
54 54 if compress:
55 55 comp = 'BZ'
56 56 elif compress:
57 57 bundletype = "HG10BZ"
58 58 else:
59 59 bundletype = "HG10UN"
60 60
61 61 outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
62 62 contentopts = {
63 63 'cg.version': cgversion,
64 64 'obsolescence': obsolescence,
65 65 'phases': True,
66 66 }
67 67 return bundle2.writenewbundle(repo.ui, repo, 'strip', name, bundletype,
68 68 outgoing, contentopts, vfs, compression=comp)
69 69
70 70 def _collectfiles(repo, striprev):
71 71 """find out the filelogs affected by the strip"""
72 72 files = set()
73 73
74 74 for x in pycompat.xrange(striprev, len(repo)):
75 75 files.update(repo[x].files())
76 76
77 77 return sorted(files)
78 78
79 79 def _collectrevlog(revlog, striprev):
80 80 _, brokenset = revlog.getstrippoint(striprev)
81 81 return [revlog.linkrev(r) for r in brokenset]
82 82
83 83 def _collectmanifest(repo, striprev):
84 84 return _collectrevlog(repo.manifestlog._revlog, striprev)
85 85
86 86 def _collectbrokencsets(repo, files, striprev):
87 87 """return the changesets which will be broken by the truncation"""
88 88 s = set()
89 89
90 90 s.update(_collectmanifest(repo, striprev))
91 91 for fname in files:
92 92 s.update(_collectrevlog(repo.file(fname), striprev))
93 93
94 94 return s
95 95
96 96 def strip(ui, repo, nodelist, backup=True, topic='backup'):
97 97 # This function requires the caller to lock the repo, but it operates
98 98 # within a transaction of its own, and thus requires there to be no current
99 99 # transaction when it is called.
100 100 if repo.currenttransaction() is not None:
101 101 raise error.ProgrammingError('cannot strip from inside a transaction')
102 102
103 103 # Simple way to maintain backwards compatibility for this
104 104 # argument.
105 105 if backup in ['none', 'strip']:
106 106 backup = False
107 107
108 108 repo = repo.unfiltered()
109 109 repo.destroying()
110 110
111 111 cl = repo.changelog
112 112 # TODO handle undo of merge sets
113 113 if isinstance(nodelist, str):
114 114 nodelist = [nodelist]
115 115 striplist = [cl.rev(node) for node in nodelist]
116 116 striprev = min(striplist)
117 117
118 118 files = _collectfiles(repo, striprev)
119 119 saverevs = _collectbrokencsets(repo, files, striprev)
120 120
121 121 # Some revisions with rev > striprev may not be descendants of striprev.
122 122 # We have to find these revisions and put them in a bundle, so that
123 123 # we can restore them after the truncations.
124 124 # To create the bundle we use repo.changegroupsubset which requires
125 125 # the list of heads and bases of the set of interesting revisions.
126 126 # (head = revision in the set that has no descendant in the set;
127 127 # base = revision in the set that has no ancestor in the set)
128 128 tostrip = set(striplist)
129 129 saveheads = set(saverevs)
130 130 for r in cl.revs(start=striprev + 1):
131 131 if any(p in tostrip for p in cl.parentrevs(r)):
132 132 tostrip.add(r)
133 133
134 134 if r not in tostrip:
135 135 saverevs.add(r)
136 136 saveheads.difference_update(cl.parentrevs(r))
137 137 saveheads.add(r)
138 138 saveheads = [cl.node(r) for r in saveheads]
139 139
140 140 # compute base nodes
141 141 if saverevs:
142 142 descendants = set(cl.descendants(saverevs))
143 143 saverevs.difference_update(descendants)
144 144 savebases = [cl.node(r) for r in saverevs]
145 145 stripbases = [cl.node(r) for r in tostrip]
146 146
147 147 stripobsidx = obsmarkers = ()
148 148 if repo.ui.configbool('devel', 'strip-obsmarkers'):
149 149 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
150 150 if obsmarkers:
151 151 stripobsidx = [i for i, m in enumerate(repo.obsstore)
152 152 if m in obsmarkers]
153 153
154 154 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
155 155 # is much faster
156 156 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
157 157 if newbmtarget:
158 158 newbmtarget = repo[newbmtarget.first()].node()
159 159 else:
160 160 newbmtarget = '.'
161 161
162 162 bm = repo._bookmarks
163 163 updatebm = []
164 164 for m in bm:
165 165 rev = repo[bm[m]].rev()
166 166 if rev in tostrip:
167 167 updatebm.append(m)
168 168
169 169 # create a changegroup for all the branches we need to keep
170 170 backupfile = None
171 171 vfs = repo.vfs
172 172 node = nodelist[-1]
173 173 if backup:
174 174 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
175 175 repo.ui.status(_("saved backup bundle to %s\n") %
176 176 vfs.join(backupfile))
177 177 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
178 178 vfs.join(backupfile))
179 179 tmpbundlefile = None
180 180 if saveheads:
181 181 # do not compress temporary bundle if we remove it from disk later
182 182 #
183 183 # We do not include obsolescence, it might re-introduce prune markers
184 184 # we are trying to strip. This is harmless since the stripped markers
185 185 # are already backed up and we did not touched the markers for the
186 186 # saved changesets.
187 187 tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
188 188 compress=False, obsolescence=False)
189 189
190 190 with ui.uninterruptable():
191 191 try:
192 192 with repo.transaction("strip") as tr:
193 193 offset = len(tr.entries)
194 194
195 195 tr.startgroup()
196 196 cl.strip(striprev, tr)
197 197 stripmanifest(repo, striprev, tr, files)
198 198
199 199 for fn in files:
200 200 repo.file(fn).strip(striprev, tr)
201 201 tr.endgroup()
202 202
203 203 for i in pycompat.xrange(offset, len(tr.entries)):
204 204 file, troffset, ignore = tr.entries[i]
205 205 with repo.svfs(file, 'a', checkambig=True) as fp:
206 206 fp.truncate(troffset)
207 207 if troffset == 0:
208 208 repo.store.markremoved(file)
209 209
210 210 deleteobsmarkers(repo.obsstore, stripobsidx)
211 211 del repo.obsstore
212 212 repo.invalidatevolatilesets()
213 213 repo._phasecache.filterunknown(repo)
214 214
215 215 if tmpbundlefile:
216 216 ui.note(_("adding branch\n"))
217 217 f = vfs.open(tmpbundlefile, "rb")
218 218 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
219 219 if not repo.ui.verbose:
220 220 # silence internal shuffling chatter
221 221 repo.ui.pushbuffer()
222 222 tmpbundleurl = 'bundle:' + vfs.join(tmpbundlefile)
223 223 txnname = 'strip'
224 224 if not isinstance(gen, bundle2.unbundle20):
225 225 txnname = "strip\n%s" % util.hidepassword(tmpbundleurl)
226 226 with repo.transaction(txnname) as tr:
227 227 bundle2.applybundle(repo, gen, tr, source='strip',
228 228 url=tmpbundleurl)
229 229 if not repo.ui.verbose:
230 230 repo.ui.popbuffer()
231 231 f.close()
232 232
233 233 with repo.transaction('repair') as tr:
234 234 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
235 235 bm.applychanges(repo, tr, bmchanges)
236 236
237 237 # remove undo files
238 238 for undovfs, undofile in repo.undofiles():
239 239 try:
240 240 undovfs.unlink(undofile)
241 241 except OSError as e:
242 242 if e.errno != errno.ENOENT:
243 243 ui.warn(_('error removing %s: %s\n') %
244 244 (undovfs.join(undofile),
245 245 stringutil.forcebytestr(e)))
246 246
247 247 except: # re-raises
248 248 if backupfile:
249 249 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
250 250 % vfs.join(backupfile))
251 251 if tmpbundlefile:
252 252 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
253 253 % vfs.join(tmpbundlefile))
254 254 ui.warn(_("(fix the problem, then recover the changesets with "
255 255 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
256 256 raise
257 257 else:
258 258 if tmpbundlefile:
259 259 # Remove temporary bundle only if there were no exceptions
260 260 vfs.unlink(tmpbundlefile)
261 261
262 262 repo.destroyed()
263 263 # return the backup file path (or None if 'backup' was False) so
264 264 # extensions can use it
265 265 return backupfile
266 266
267 267 def safestriproots(ui, repo, nodes):
268 268 """return list of roots of nodes where descendants are covered by nodes"""
269 269 torev = repo.unfiltered().changelog.rev
270 270 revs = set(torev(n) for n in nodes)
271 271 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
272 272 # orphaned = affected - wanted
273 273 # affected = descendants(roots(wanted))
274 274 # wanted = revs
275 275 tostrip = set(repo.revs('%ld-(::((roots(%ld)::)-%ld))', revs, revs, revs))
276 276 notstrip = revs - tostrip
277 277 if notstrip:
278 278 nodestr = ', '.join(sorted(short(repo[n].node()) for n in notstrip))
279 279 ui.warn(_('warning: orphaned descendants detected, '
280 280 'not stripping %s\n') % nodestr)
281 281 return [c.node() for c in repo.set('roots(%ld)', tostrip)]
282 282
283 283 class stripcallback(object):
284 284 """used as a transaction postclose callback"""
285 285
286 286 def __init__(self, ui, repo, backup, topic):
287 287 self.ui = ui
288 288 self.repo = repo
289 289 self.backup = backup
290 290 self.topic = topic or 'backup'
291 291 self.nodelist = []
292 292
293 293 def addnodes(self, nodes):
294 294 self.nodelist.extend(nodes)
295 295
296 296 def __call__(self, tr):
297 297 roots = safestriproots(self.ui, self.repo, self.nodelist)
298 298 if roots:
299 299 strip(self.ui, self.repo, roots, self.backup, self.topic)
300 300
301 def delayedstrip(ui, repo, nodelist, topic=None):
301 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
302 302 """like strip, but works inside transaction and won't strip irreverent revs
303 303
304 304 nodelist must explicitly contain all descendants. Otherwise a warning will
305 305 be printed that some nodes are not stripped.
306 306
307 Always do a backup. The last non-None "topic" will be used as the backup
308 topic name. The default backup topic name is "backup".
307 Will do a backup if `backup` is True. The last non-None "topic" will be
308 used as the backup topic name. The default backup topic name is "backup".
309 309 """
310 310 tr = repo.currenttransaction()
311 311 if not tr:
312 312 nodes = safestriproots(ui, repo, nodelist)
313 return strip(ui, repo, nodes, True, topic)
313 return strip(ui, repo, nodes, backup=backup, topic=topic)
314 314 # transaction postclose callbacks are called in alphabet order.
315 315 # use '\xff' as prefix so we are likely to be called last.
316 316 callback = tr.getpostclose('\xffstrip')
317 317 if callback is None:
318 callback = stripcallback(ui, repo, True, topic)
318 callback = stripcallback(ui, repo, backup=backup, topic=topic)
319 319 tr.addpostclose('\xffstrip', callback)
320 320 if topic:
321 321 callback.topic = topic
322 322 callback.addnodes(nodelist)
323 323
324 324 def stripmanifest(repo, striprev, tr, files):
325 325 revlog = repo.manifestlog._revlog
326 326 revlog.strip(striprev, tr)
327 327 striptrees(repo, tr, striprev, files)
328 328
329 329 def striptrees(repo, tr, striprev, files):
330 330 if 'treemanifest' in repo.requirements: # safe but unnecessary
331 331 # otherwise
332 332 for unencoded, encoded, size in repo.store.datafiles():
333 333 if (unencoded.startswith('meta/') and
334 334 unencoded.endswith('00manifest.i')):
335 335 dir = unencoded[5:-12]
336 336 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
337 337
338 338 def rebuildfncache(ui, repo):
339 339 """Rebuilds the fncache file from repo history.
340 340
341 341 Missing entries will be added. Extra entries will be removed.
342 342 """
343 343 repo = repo.unfiltered()
344 344
345 345 if 'fncache' not in repo.requirements:
346 346 ui.warn(_('(not rebuilding fncache because repository does not '
347 347 'support fncache)\n'))
348 348 return
349 349
350 350 with repo.lock():
351 351 fnc = repo.store.fncache
352 352 # Trigger load of fncache.
353 353 if 'irrelevant' in fnc:
354 354 pass
355 355
356 356 oldentries = set(fnc.entries)
357 357 newentries = set()
358 358 seenfiles = set()
359 359
360 360 progress = ui.makeprogress(_('rebuilding'), unit=_('changesets'),
361 361 total=len(repo))
362 362 for rev in repo:
363 363 progress.update(rev)
364 364
365 365 ctx = repo[rev]
366 366 for f in ctx.files():
367 367 # This is to minimize I/O.
368 368 if f in seenfiles:
369 369 continue
370 370 seenfiles.add(f)
371 371
372 372 i = 'data/%s.i' % f
373 373 d = 'data/%s.d' % f
374 374
375 375 if repo.store._exists(i):
376 376 newentries.add(i)
377 377 if repo.store._exists(d):
378 378 newentries.add(d)
379 379
380 380 progress.complete()
381 381
382 382 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
383 383 for dir in util.dirs(seenfiles):
384 384 i = 'meta/%s/00manifest.i' % dir
385 385 d = 'meta/%s/00manifest.d' % dir
386 386
387 387 if repo.store._exists(i):
388 388 newentries.add(i)
389 389 if repo.store._exists(d):
390 390 newentries.add(d)
391 391
392 392 addcount = len(newentries - oldentries)
393 393 removecount = len(oldentries - newentries)
394 394 for p in sorted(oldentries - newentries):
395 395 ui.write(_('removing %s\n') % p)
396 396 for p in sorted(newentries - oldentries):
397 397 ui.write(_('adding %s\n') % p)
398 398
399 399 if addcount or removecount:
400 400 ui.write(_('%d items added, %d removed from fncache\n') %
401 401 (addcount, removecount))
402 402 fnc.entries = newentries
403 403 fnc._dirty = True
404 404
405 405 with repo.transaction('fncache') as tr:
406 406 fnc.write(tr)
407 407 else:
408 408 ui.write(_('fncache already up to date\n'))
409 409
410 410 def deleteobsmarkers(obsstore, indices):
411 411 """Delete some obsmarkers from obsstore and return how many were deleted
412 412
413 413 'indices' is a list of ints which are the indices
414 414 of the markers to be deleted.
415 415
416 416 Every invocation of this function completely rewrites the obsstore file,
417 417 skipping the markers we want to be removed. The new temporary file is
418 418 created, remaining markers are written there and on .close() this file
419 419 gets atomically renamed to obsstore, thus guaranteeing consistency."""
420 420 if not indices:
421 421 # we don't want to rewrite the obsstore with the same content
422 422 return
423 423
424 424 left = []
425 425 current = obsstore._all
426 426 n = 0
427 427 for i, m in enumerate(current):
428 428 if i in indices:
429 429 n += 1
430 430 continue
431 431 left.append(m)
432 432
433 433 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
434 434 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
435 435 newobsstorefile.write(bytes)
436 436 newobsstorefile.close()
437 437 return n
@@ -1,1700 +1,1701 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28
29 29 from . import (
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 revsetlang,
39 39 similar,
40 40 url,
41 41 util,
42 42 vfs,
43 43 )
44 44
45 45 from .utils import (
46 46 procutil,
47 47 stringutil,
48 48 )
49 49
50 50 if pycompat.iswindows:
51 51 from . import scmwindows as scmplatform
52 52 else:
53 53 from . import scmposix as scmplatform
54 54
55 55 termsize = scmplatform.termsize
56 56
57 57 class status(tuple):
58 58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
59 59 and 'ignored' properties are only relevant to the working copy.
60 60 '''
61 61
62 62 __slots__ = ()
63 63
64 64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
65 65 clean):
66 66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
67 67 ignored, clean))
68 68
69 69 @property
70 70 def modified(self):
71 71 '''files that have been modified'''
72 72 return self[0]
73 73
74 74 @property
75 75 def added(self):
76 76 '''files that have been added'''
77 77 return self[1]
78 78
79 79 @property
80 80 def removed(self):
81 81 '''files that have been removed'''
82 82 return self[2]
83 83
84 84 @property
85 85 def deleted(self):
86 86 '''files that are in the dirstate, but have been deleted from the
87 87 working copy (aka "missing")
88 88 '''
89 89 return self[3]
90 90
91 91 @property
92 92 def unknown(self):
93 93 '''files not in the dirstate that are not ignored'''
94 94 return self[4]
95 95
96 96 @property
97 97 def ignored(self):
98 98 '''files not in the dirstate that are ignored (by _dirignore())'''
99 99 return self[5]
100 100
101 101 @property
102 102 def clean(self):
103 103 '''files that have not been modified'''
104 104 return self[6]
105 105
106 106 def __repr__(self, *args, **kwargs):
107 107 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
108 108 r'unknown=%s, ignored=%s, clean=%s>') %
109 109 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
110 110
111 111 def itersubrepos(ctx1, ctx2):
112 112 """find subrepos in ctx1 or ctx2"""
113 113 # Create a (subpath, ctx) mapping where we prefer subpaths from
114 114 # ctx1. The subpaths from ctx2 are important when the .hgsub file
115 115 # has been modified (in ctx2) but not yet committed (in ctx1).
116 116 subpaths = dict.fromkeys(ctx2.substate, ctx2)
117 117 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
118 118
119 119 missing = set()
120 120
121 121 for subpath in ctx2.substate:
122 122 if subpath not in ctx1.substate:
123 123 del subpaths[subpath]
124 124 missing.add(subpath)
125 125
126 126 for subpath, ctx in sorted(subpaths.iteritems()):
127 127 yield subpath, ctx.sub(subpath)
128 128
129 129 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
130 130 # status and diff will have an accurate result when it does
131 131 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
132 132 # against itself.
133 133 for subpath in missing:
134 134 yield subpath, ctx2.nullsub(subpath, ctx1)
135 135
136 136 def nochangesfound(ui, repo, excluded=None):
137 137 '''Report no changes for push/pull, excluded is None or a list of
138 138 nodes excluded from the push/pull.
139 139 '''
140 140 secretlist = []
141 141 if excluded:
142 142 for n in excluded:
143 143 ctx = repo[n]
144 144 if ctx.phase() >= phases.secret and not ctx.extinct():
145 145 secretlist.append(n)
146 146
147 147 if secretlist:
148 148 ui.status(_("no changes found (ignored %d secret changesets)\n")
149 149 % len(secretlist))
150 150 else:
151 151 ui.status(_("no changes found\n"))
152 152
153 153 def callcatch(ui, func):
154 154 """call func() with global exception handling
155 155
156 156 return func() if no exception happens. otherwise do some error handling
157 157 and return an exit code accordingly. does not handle all exceptions.
158 158 """
159 159 try:
160 160 try:
161 161 return func()
162 162 except: # re-raises
163 163 ui.traceback()
164 164 raise
165 165 # Global exception handling, alphabetically
166 166 # Mercurial-specific first, followed by built-in and library exceptions
167 167 except error.LockHeld as inst:
168 168 if inst.errno == errno.ETIMEDOUT:
169 169 reason = _('timed out waiting for lock held by %r') % inst.locker
170 170 else:
171 171 reason = _('lock held by %r') % inst.locker
172 172 ui.error(_("abort: %s: %s\n") % (
173 173 inst.desc or stringutil.forcebytestr(inst.filename), reason))
174 174 if not inst.locker:
175 175 ui.error(_("(lock might be very busy)\n"))
176 176 except error.LockUnavailable as inst:
177 177 ui.error(_("abort: could not lock %s: %s\n") %
178 178 (inst.desc or stringutil.forcebytestr(inst.filename),
179 179 encoding.strtolocal(inst.strerror)))
180 180 except error.OutOfBandError as inst:
181 181 if inst.args:
182 182 msg = _("abort: remote error:\n")
183 183 else:
184 184 msg = _("abort: remote error\n")
185 185 ui.error(msg)
186 186 if inst.args:
187 187 ui.error(''.join(inst.args))
188 188 if inst.hint:
189 189 ui.error('(%s)\n' % inst.hint)
190 190 except error.RepoError as inst:
191 191 ui.error(_("abort: %s!\n") % inst)
192 192 if inst.hint:
193 193 ui.error(_("(%s)\n") % inst.hint)
194 194 except error.ResponseError as inst:
195 195 ui.error(_("abort: %s") % inst.args[0])
196 196 msg = inst.args[1]
197 197 if isinstance(msg, type(u'')):
198 198 msg = pycompat.sysbytes(msg)
199 199 if not isinstance(msg, bytes):
200 200 ui.error(" %r\n" % (msg,))
201 201 elif not msg:
202 202 ui.error(_(" empty string\n"))
203 203 else:
204 204 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
205 205 except error.CensoredNodeError as inst:
206 206 ui.error(_("abort: file censored %s!\n") % inst)
207 207 except error.RevlogError as inst:
208 208 ui.error(_("abort: %s!\n") % inst)
209 209 except error.InterventionRequired as inst:
210 210 ui.error("%s\n" % inst)
211 211 if inst.hint:
212 212 ui.error(_("(%s)\n") % inst.hint)
213 213 return 1
214 214 except error.WdirUnsupported:
215 215 ui.error(_("abort: working directory revision cannot be specified\n"))
216 216 except error.Abort as inst:
217 217 ui.error(_("abort: %s\n") % inst)
218 218 if inst.hint:
219 219 ui.error(_("(%s)\n") % inst.hint)
220 220 except ImportError as inst:
221 221 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
222 222 m = stringutil.forcebytestr(inst).split()[-1]
223 223 if m in "mpatch bdiff".split():
224 224 ui.error(_("(did you forget to compile extensions?)\n"))
225 225 elif m in "zlib".split():
226 226 ui.error(_("(is your Python install correct?)\n"))
227 227 except IOError as inst:
228 228 if util.safehasattr(inst, "code"):
229 229 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
230 230 elif util.safehasattr(inst, "reason"):
231 231 try: # usually it is in the form (errno, strerror)
232 232 reason = inst.reason.args[1]
233 233 except (AttributeError, IndexError):
234 234 # it might be anything, for example a string
235 235 reason = inst.reason
236 236 if isinstance(reason, pycompat.unicode):
237 237 # SSLError of Python 2.7.9 contains a unicode
238 238 reason = encoding.unitolocal(reason)
239 239 ui.error(_("abort: error: %s\n") % reason)
240 240 elif (util.safehasattr(inst, "args")
241 241 and inst.args and inst.args[0] == errno.EPIPE):
242 242 pass
243 243 elif getattr(inst, "strerror", None):
244 244 if getattr(inst, "filename", None):
245 245 ui.error(_("abort: %s: %s\n") % (
246 246 encoding.strtolocal(inst.strerror),
247 247 stringutil.forcebytestr(inst.filename)))
248 248 else:
249 249 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
250 250 else:
251 251 raise
252 252 except OSError as inst:
253 253 if getattr(inst, "filename", None) is not None:
254 254 ui.error(_("abort: %s: '%s'\n") % (
255 255 encoding.strtolocal(inst.strerror),
256 256 stringutil.forcebytestr(inst.filename)))
257 257 else:
258 258 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
259 259 except MemoryError:
260 260 ui.error(_("abort: out of memory\n"))
261 261 except SystemExit as inst:
262 262 # Commands shouldn't sys.exit directly, but give a return code.
263 263 # Just in case catch this and and pass exit code to caller.
264 264 return inst.code
265 265 except socket.error as inst:
266 266 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
267 267
268 268 return -1
269 269
270 270 def checknewlabel(repo, lbl, kind):
271 271 # Do not use the "kind" parameter in ui output.
272 272 # It makes strings difficult to translate.
273 273 if lbl in ['tip', '.', 'null']:
274 274 raise error.Abort(_("the name '%s' is reserved") % lbl)
275 275 for c in (':', '\0', '\n', '\r'):
276 276 if c in lbl:
277 277 raise error.Abort(
278 278 _("%r cannot be used in a name") % pycompat.bytestr(c))
279 279 try:
280 280 int(lbl)
281 281 raise error.Abort(_("cannot use an integer as a name"))
282 282 except ValueError:
283 283 pass
284 284 if lbl.strip() != lbl:
285 285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
286 286
287 287 def checkfilename(f):
288 288 '''Check that the filename f is an acceptable filename for a tracked file'''
289 289 if '\r' in f or '\n' in f:
290 290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
291 291 % pycompat.bytestr(f))
292 292
293 293 def checkportable(ui, f):
294 294 '''Check if filename f is portable and warn or abort depending on config'''
295 295 checkfilename(f)
296 296 abort, warn = checkportabilityalert(ui)
297 297 if abort or warn:
298 298 msg = util.checkwinfilename(f)
299 299 if msg:
300 300 msg = "%s: %s" % (msg, procutil.shellquote(f))
301 301 if abort:
302 302 raise error.Abort(msg)
303 303 ui.warn(_("warning: %s\n") % msg)
304 304
305 305 def checkportabilityalert(ui):
306 306 '''check if the user's config requests nothing, a warning, or abort for
307 307 non-portable filenames'''
308 308 val = ui.config('ui', 'portablefilenames')
309 309 lval = val.lower()
310 310 bval = stringutil.parsebool(val)
311 311 abort = pycompat.iswindows or lval == 'abort'
312 312 warn = bval or lval == 'warn'
313 313 if bval is None and not (warn or abort or lval == 'ignore'):
314 314 raise error.ConfigError(
315 315 _("ui.portablefilenames value is invalid ('%s')") % val)
316 316 return abort, warn
317 317
318 318 class casecollisionauditor(object):
319 319 def __init__(self, ui, abort, dirstate):
320 320 self._ui = ui
321 321 self._abort = abort
322 322 allfiles = '\0'.join(dirstate._map)
323 323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
324 324 self._dirstate = dirstate
325 325 # The purpose of _newfiles is so that we don't complain about
326 326 # case collisions if someone were to call this object with the
327 327 # same filename twice.
328 328 self._newfiles = set()
329 329
330 330 def __call__(self, f):
331 331 if f in self._newfiles:
332 332 return
333 333 fl = encoding.lower(f)
334 334 if fl in self._loweredfiles and f not in self._dirstate:
335 335 msg = _('possible case-folding collision for %s') % f
336 336 if self._abort:
337 337 raise error.Abort(msg)
338 338 self._ui.warn(_("warning: %s\n") % msg)
339 339 self._loweredfiles.add(fl)
340 340 self._newfiles.add(f)
341 341
342 342 def filteredhash(repo, maxrev):
343 343 """build hash of filtered revisions in the current repoview.
344 344
345 345 Multiple caches perform up-to-date validation by checking that the
346 346 tiprev and tipnode stored in the cache file match the current repository.
347 347 However, this is not sufficient for validating repoviews because the set
348 348 of revisions in the view may change without the repository tiprev and
349 349 tipnode changing.
350 350
351 351 This function hashes all the revs filtered from the view and returns
352 352 that SHA-1 digest.
353 353 """
354 354 cl = repo.changelog
355 355 if not cl.filteredrevs:
356 356 return None
357 357 key = None
358 358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
359 359 if revs:
360 360 s = hashlib.sha1()
361 361 for rev in revs:
362 362 s.update('%d;' % rev)
363 363 key = s.digest()
364 364 return key
365 365
366 366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
367 367 '''yield every hg repository under path, always recursively.
368 368 The recurse flag will only control recursion into repo working dirs'''
369 369 def errhandler(err):
370 370 if err.filename == path:
371 371 raise err
372 372 samestat = getattr(os.path, 'samestat', None)
373 373 if followsym and samestat is not None:
374 374 def adddir(dirlst, dirname):
375 375 dirstat = os.stat(dirname)
376 376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
377 377 if not match:
378 378 dirlst.append(dirstat)
379 379 return not match
380 380 else:
381 381 followsym = False
382 382
383 383 if (seen_dirs is None) and followsym:
384 384 seen_dirs = []
385 385 adddir(seen_dirs, path)
386 386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
387 387 dirs.sort()
388 388 if '.hg' in dirs:
389 389 yield root # found a repository
390 390 qroot = os.path.join(root, '.hg', 'patches')
391 391 if os.path.isdir(os.path.join(qroot, '.hg')):
392 392 yield qroot # we have a patch queue repo here
393 393 if recurse:
394 394 # avoid recursing inside the .hg directory
395 395 dirs.remove('.hg')
396 396 else:
397 397 dirs[:] = [] # don't descend further
398 398 elif followsym:
399 399 newdirs = []
400 400 for d in dirs:
401 401 fname = os.path.join(root, d)
402 402 if adddir(seen_dirs, fname):
403 403 if os.path.islink(fname):
404 404 for hgname in walkrepos(fname, True, seen_dirs):
405 405 yield hgname
406 406 else:
407 407 newdirs.append(d)
408 408 dirs[:] = newdirs
409 409
410 410 def binnode(ctx):
411 411 """Return binary node id for a given basectx"""
412 412 node = ctx.node()
413 413 if node is None:
414 414 return wdirid
415 415 return node
416 416
417 417 def intrev(ctx):
418 418 """Return integer for a given basectx that can be used in comparison or
419 419 arithmetic operation"""
420 420 rev = ctx.rev()
421 421 if rev is None:
422 422 return wdirrev
423 423 return rev
424 424
425 425 def formatchangeid(ctx):
426 426 """Format changectx as '{rev}:{node|formatnode}', which is the default
427 427 template provided by logcmdutil.changesettemplater"""
428 428 repo = ctx.repo()
429 429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
430 430
431 431 def formatrevnode(ui, rev, node):
432 432 """Format given revision and node depending on the current verbosity"""
433 433 if ui.debugflag:
434 434 hexfunc = hex
435 435 else:
436 436 hexfunc = short
437 437 return '%d:%s' % (rev, hexfunc(node))
438 438
439 439 def resolvehexnodeidprefix(repo, prefix):
440 440 # Uses unfiltered repo because it's faster when prefix is ambiguous/
441 441 # This matches the shortesthexnodeidprefix() function below.
442 442 node = repo.unfiltered().changelog._partialmatch(prefix)
443 443 if node is None:
444 444 return
445 445 repo.changelog.rev(node) # make sure node isn't filtered
446 446 return node
447 447
448 448 def shortesthexnodeidprefix(repo, node, minlength=1):
449 449 """Find the shortest unambiguous prefix that matches hexnode."""
450 450 # _partialmatch() of filtered changelog could take O(len(repo)) time,
451 451 # which would be unacceptably slow. so we look for hash collision in
452 452 # unfiltered space, which means some hashes may be slightly longer.
453 453 cl = repo.unfiltered().changelog
454 454
455 455 def isrev(prefix):
456 456 try:
457 457 i = int(prefix)
458 458 # if we are a pure int, then starting with zero will not be
459 459 # confused as a rev; or, obviously, if the int is larger
460 460 # than the value of the tip rev
461 461 if prefix[0:1] == b'0' or i > len(cl):
462 462 return False
463 463 return True
464 464 except ValueError:
465 465 return False
466 466
467 467 def disambiguate(prefix):
468 468 """Disambiguate against revnums."""
469 469 hexnode = hex(node)
470 470 for length in range(len(prefix), len(hexnode) + 1):
471 471 prefix = hexnode[:length]
472 472 if not isrev(prefix):
473 473 return prefix
474 474
475 475 try:
476 476 return disambiguate(cl.shortest(node, minlength))
477 477 except error.LookupError:
478 478 raise error.RepoLookupError()
479 479
480 480 def isrevsymbol(repo, symbol):
481 481 """Checks if a symbol exists in the repo.
482 482
483 483 See revsymbol() for details. Raises error.LookupError if the symbol is an
484 484 ambiguous nodeid prefix.
485 485 """
486 486 try:
487 487 revsymbol(repo, symbol)
488 488 return True
489 489 except error.RepoLookupError:
490 490 return False
491 491
492 492 def revsymbol(repo, symbol):
493 493 """Returns a context given a single revision symbol (as string).
494 494
495 495 This is similar to revsingle(), but accepts only a single revision symbol,
496 496 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
497 497 not "max(public())".
498 498 """
499 499 if not isinstance(symbol, bytes):
500 500 msg = ("symbol (%s of type %s) was not a string, did you mean "
501 501 "repo[symbol]?" % (symbol, type(symbol)))
502 502 raise error.ProgrammingError(msg)
503 503 try:
504 504 if symbol in ('.', 'tip', 'null'):
505 505 return repo[symbol]
506 506
507 507 try:
508 508 r = int(symbol)
509 509 if '%d' % r != symbol:
510 510 raise ValueError
511 511 l = len(repo.changelog)
512 512 if r < 0:
513 513 r += l
514 514 if r < 0 or r >= l and r != wdirrev:
515 515 raise ValueError
516 516 return repo[r]
517 517 except error.FilteredIndexError:
518 518 raise
519 519 except (ValueError, OverflowError, IndexError):
520 520 pass
521 521
522 522 if len(symbol) == 40:
523 523 try:
524 524 node = bin(symbol)
525 525 rev = repo.changelog.rev(node)
526 526 return repo[rev]
527 527 except error.FilteredLookupError:
528 528 raise
529 529 except (TypeError, LookupError):
530 530 pass
531 531
532 532 # look up bookmarks through the name interface
533 533 try:
534 534 node = repo.names.singlenode(repo, symbol)
535 535 rev = repo.changelog.rev(node)
536 536 return repo[rev]
537 537 except KeyError:
538 538 pass
539 539
540 540 node = resolvehexnodeidprefix(repo, symbol)
541 541 if node is not None:
542 542 rev = repo.changelog.rev(node)
543 543 return repo[rev]
544 544
545 545 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
546 546
547 547 except error.WdirUnsupported:
548 548 return repo[None]
549 549 except (error.FilteredIndexError, error.FilteredLookupError,
550 550 error.FilteredRepoLookupError):
551 551 raise _filterederror(repo, symbol)
552 552
553 553 def _filterederror(repo, changeid):
554 554 """build an exception to be raised about a filtered changeid
555 555
556 556 This is extracted in a function to help extensions (eg: evolve) to
557 557 experiment with various message variants."""
558 558 if repo.filtername.startswith('visible'):
559 559
560 560 # Check if the changeset is obsolete
561 561 unfilteredrepo = repo.unfiltered()
562 562 ctx = revsymbol(unfilteredrepo, changeid)
563 563
564 564 # If the changeset is obsolete, enrich the message with the reason
565 565 # that made this changeset not visible
566 566 if ctx.obsolete():
567 567 msg = obsutil._getfilteredreason(repo, changeid, ctx)
568 568 else:
569 569 msg = _("hidden revision '%s'") % changeid
570 570
571 571 hint = _('use --hidden to access hidden revisions')
572 572
573 573 return error.FilteredRepoLookupError(msg, hint=hint)
574 574 msg = _("filtered revision '%s' (not in '%s' subset)")
575 575 msg %= (changeid, repo.filtername)
576 576 return error.FilteredRepoLookupError(msg)
577 577
578 578 def revsingle(repo, revspec, default='.', localalias=None):
579 579 if not revspec and revspec != 0:
580 580 return repo[default]
581 581
582 582 l = revrange(repo, [revspec], localalias=localalias)
583 583 if not l:
584 584 raise error.Abort(_('empty revision set'))
585 585 return repo[l.last()]
586 586
587 587 def _pairspec(revspec):
588 588 tree = revsetlang.parse(revspec)
589 589 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
590 590
591 591 def revpair(repo, revs):
592 592 if not revs:
593 593 return repo['.'], repo[None]
594 594
595 595 l = revrange(repo, revs)
596 596
597 597 if not l:
598 598 first = second = None
599 599 elif l.isascending():
600 600 first = l.min()
601 601 second = l.max()
602 602 elif l.isdescending():
603 603 first = l.max()
604 604 second = l.min()
605 605 else:
606 606 first = l.first()
607 607 second = l.last()
608 608
609 609 if first is None:
610 610 raise error.Abort(_('empty revision range'))
611 611 if (first == second and len(revs) >= 2
612 612 and not all(revrange(repo, [r]) for r in revs)):
613 613 raise error.Abort(_('empty revision on one side of range'))
614 614
615 615 # if top-level is range expression, the result must always be a pair
616 616 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
617 617 return repo[first], repo[None]
618 618
619 619 return repo[first], repo[second]
620 620
621 621 def revrange(repo, specs, localalias=None):
622 622 """Execute 1 to many revsets and return the union.
623 623
624 624 This is the preferred mechanism for executing revsets using user-specified
625 625 config options, such as revset aliases.
626 626
627 627 The revsets specified by ``specs`` will be executed via a chained ``OR``
628 628 expression. If ``specs`` is empty, an empty result is returned.
629 629
630 630 ``specs`` can contain integers, in which case they are assumed to be
631 631 revision numbers.
632 632
633 633 It is assumed the revsets are already formatted. If you have arguments
634 634 that need to be expanded in the revset, call ``revsetlang.formatspec()``
635 635 and pass the result as an element of ``specs``.
636 636
637 637 Specifying a single revset is allowed.
638 638
639 639 Returns a ``revset.abstractsmartset`` which is a list-like interface over
640 640 integer revisions.
641 641 """
642 642 allspecs = []
643 643 for spec in specs:
644 644 if isinstance(spec, int):
645 645 spec = revsetlang.formatspec('rev(%d)', spec)
646 646 allspecs.append(spec)
647 647 return repo.anyrevs(allspecs, user=True, localalias=localalias)
648 648
649 649 def meaningfulparents(repo, ctx):
650 650 """Return list of meaningful (or all if debug) parentrevs for rev.
651 651
652 652 For merges (two non-nullrev revisions) both parents are meaningful.
653 653 Otherwise the first parent revision is considered meaningful if it
654 654 is not the preceding revision.
655 655 """
656 656 parents = ctx.parents()
657 657 if len(parents) > 1:
658 658 return parents
659 659 if repo.ui.debugflag:
660 660 return [parents[0], repo['null']]
661 661 if parents[0].rev() >= intrev(ctx) - 1:
662 662 return []
663 663 return parents
664 664
665 665 def expandpats(pats):
666 666 '''Expand bare globs when running on windows.
667 667 On posix we assume it already has already been done by sh.'''
668 668 if not util.expandglobs:
669 669 return list(pats)
670 670 ret = []
671 671 for kindpat in pats:
672 672 kind, pat = matchmod._patsplit(kindpat, None)
673 673 if kind is None:
674 674 try:
675 675 globbed = glob.glob(pat)
676 676 except re.error:
677 677 globbed = [pat]
678 678 if globbed:
679 679 ret.extend(globbed)
680 680 continue
681 681 ret.append(kindpat)
682 682 return ret
683 683
684 684 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
685 685 badfn=None):
686 686 '''Return a matcher and the patterns that were used.
687 687 The matcher will warn about bad matches, unless an alternate badfn callback
688 688 is provided.'''
689 689 if pats == ("",):
690 690 pats = []
691 691 if opts is None:
692 692 opts = {}
693 693 if not globbed and default == 'relpath':
694 694 pats = expandpats(pats or [])
695 695
696 696 def bad(f, msg):
697 697 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
698 698
699 699 if badfn is None:
700 700 badfn = bad
701 701
702 702 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
703 703 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
704 704
705 705 if m.always():
706 706 pats = []
707 707 return m, pats
708 708
709 709 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
710 710 badfn=None):
711 711 '''Return a matcher that will warn about bad matches.'''
712 712 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
713 713
714 714 def matchall(repo):
715 715 '''Return a matcher that will efficiently match everything.'''
716 716 return matchmod.always(repo.root, repo.getcwd())
717 717
718 718 def matchfiles(repo, files, badfn=None):
719 719 '''Return a matcher that will efficiently match exactly these files.'''
720 720 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
721 721
722 722 def parsefollowlinespattern(repo, rev, pat, msg):
723 723 """Return a file name from `pat` pattern suitable for usage in followlines
724 724 logic.
725 725 """
726 726 if not matchmod.patkind(pat):
727 727 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
728 728 else:
729 729 ctx = repo[rev]
730 730 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
731 731 files = [f for f in ctx if m(f)]
732 732 if len(files) != 1:
733 733 raise error.ParseError(msg)
734 734 return files[0]
735 735
736 736 def origpath(ui, repo, filepath):
737 737 '''customize where .orig files are created
738 738
739 739 Fetch user defined path from config file: [ui] origbackuppath = <path>
740 740 Fall back to default (filepath with .orig suffix) if not specified
741 741 '''
742 742 origbackuppath = ui.config('ui', 'origbackuppath')
743 743 if not origbackuppath:
744 744 return filepath + ".orig"
745 745
746 746 # Convert filepath from an absolute path into a path inside the repo.
747 747 filepathfromroot = util.normpath(os.path.relpath(filepath,
748 748 start=repo.root))
749 749
750 750 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
751 751 origbackupdir = origvfs.dirname(filepathfromroot)
752 752 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
753 753 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
754 754
755 755 # Remove any files that conflict with the backup file's path
756 756 for f in reversed(list(util.finddirs(filepathfromroot))):
757 757 if origvfs.isfileorlink(f):
758 758 ui.note(_('removing conflicting file: %s\n')
759 759 % origvfs.join(f))
760 760 origvfs.unlink(f)
761 761 break
762 762
763 763 origvfs.makedirs(origbackupdir)
764 764
765 765 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
766 766 ui.note(_('removing conflicting directory: %s\n')
767 767 % origvfs.join(filepathfromroot))
768 768 origvfs.rmtree(filepathfromroot, forcibly=True)
769 769
770 770 return origvfs.join(filepathfromroot)
771 771
772 772 class _containsnode(object):
773 773 """proxy __contains__(node) to container.__contains__ which accepts revs"""
774 774
775 775 def __init__(self, repo, revcontainer):
776 776 self._torev = repo.changelog.rev
777 777 self._revcontains = revcontainer.__contains__
778 778
779 779 def __contains__(self, node):
780 780 return self._revcontains(self._torev(node))
781 781
782 782 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
783 fixphase=False, targetphase=None):
783 fixphase=False, targetphase=None, backup=True):
784 784 """do common cleanups when old nodes are replaced by new nodes
785 785
786 786 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
787 787 (we might also want to move working directory parent in the future)
788 788
789 789 By default, bookmark moves are calculated automatically from 'replacements',
790 790 but 'moves' can be used to override that. Also, 'moves' may include
791 791 additional bookmark moves that should not have associated obsmarkers.
792 792
793 793 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
794 794 have replacements. operation is a string, like "rebase".
795 795
796 796 metadata is dictionary containing metadata to be stored in obsmarker if
797 797 obsolescence is enabled.
798 798 """
799 799 assert fixphase or targetphase is None
800 800 if not replacements and not moves:
801 801 return
802 802
803 803 # translate mapping's other forms
804 804 if not util.safehasattr(replacements, 'items'):
805 805 replacements = {n: () for n in replacements}
806 806
807 807 # Calculate bookmark movements
808 808 if moves is None:
809 809 moves = {}
810 810 # Unfiltered repo is needed since nodes in replacements might be hidden.
811 811 unfi = repo.unfiltered()
812 812 for oldnode, newnodes in replacements.items():
813 813 if oldnode in moves:
814 814 continue
815 815 if len(newnodes) > 1:
816 816 # usually a split, take the one with biggest rev number
817 817 newnode = next(unfi.set('max(%ln)', newnodes)).node()
818 818 elif len(newnodes) == 0:
819 819 # move bookmark backwards
820 820 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
821 821 list(replacements)))
822 822 if roots:
823 823 newnode = roots[0].node()
824 824 else:
825 825 newnode = nullid
826 826 else:
827 827 newnode = newnodes[0]
828 828 moves[oldnode] = newnode
829 829
830 830 allnewnodes = [n for ns in replacements.values() for n in ns]
831 831 toretract = {}
832 832 toadvance = {}
833 833 if fixphase:
834 834 precursors = {}
835 835 for oldnode, newnodes in replacements.items():
836 836 for newnode in newnodes:
837 837 precursors.setdefault(newnode, []).append(oldnode)
838 838
839 839 allnewnodes.sort(key=lambda n: unfi[n].rev())
840 840 newphases = {}
841 841 def phase(ctx):
842 842 return newphases.get(ctx.node(), ctx.phase())
843 843 for newnode in allnewnodes:
844 844 ctx = unfi[newnode]
845 845 parentphase = max(phase(p) for p in ctx.parents())
846 846 if targetphase is None:
847 847 oldphase = max(unfi[oldnode].phase()
848 848 for oldnode in precursors[newnode])
849 849 newphase = max(oldphase, parentphase)
850 850 else:
851 851 newphase = max(targetphase, parentphase)
852 852 newphases[newnode] = newphase
853 853 if newphase > ctx.phase():
854 854 toretract.setdefault(newphase, []).append(newnode)
855 855 elif newphase < ctx.phase():
856 856 toadvance.setdefault(newphase, []).append(newnode)
857 857
858 858 with repo.transaction('cleanup') as tr:
859 859 # Move bookmarks
860 860 bmarks = repo._bookmarks
861 861 bmarkchanges = []
862 862 for oldnode, newnode in moves.items():
863 863 oldbmarks = repo.nodebookmarks(oldnode)
864 864 if not oldbmarks:
865 865 continue
866 866 from . import bookmarks # avoid import cycle
867 867 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
868 868 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
869 869 hex(oldnode), hex(newnode)))
870 870 # Delete divergent bookmarks being parents of related newnodes
871 871 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
872 872 allnewnodes, newnode, oldnode)
873 873 deletenodes = _containsnode(repo, deleterevs)
874 874 for name in oldbmarks:
875 875 bmarkchanges.append((name, newnode))
876 876 for b in bookmarks.divergent2delete(repo, deletenodes, name):
877 877 bmarkchanges.append((b, None))
878 878
879 879 if bmarkchanges:
880 880 bmarks.applychanges(repo, tr, bmarkchanges)
881 881
882 882 for phase, nodes in toretract.items():
883 883 phases.retractboundary(repo, tr, phase, nodes)
884 884 for phase, nodes in toadvance.items():
885 885 phases.advanceboundary(repo, tr, phase, nodes)
886 886
887 887 # Obsolete or strip nodes
888 888 if obsolete.isenabled(repo, obsolete.createmarkersopt):
889 889 # If a node is already obsoleted, and we want to obsolete it
890 890 # without a successor, skip that obssolete request since it's
891 891 # unnecessary. That's the "if s or not isobs(n)" check below.
892 892 # Also sort the node in topology order, that might be useful for
893 893 # some obsstore logic.
894 894 # NOTE: the filtering and sorting might belong to createmarkers.
895 895 isobs = unfi.obsstore.successors.__contains__
896 896 torev = unfi.changelog.rev
897 897 sortfunc = lambda ns: torev(ns[0])
898 898 rels = [(unfi[n], tuple(unfi[m] for m in s))
899 899 for n, s in sorted(replacements.items(), key=sortfunc)
900 900 if s or not isobs(n)]
901 901 if rels:
902 902 obsolete.createmarkers(repo, rels, operation=operation,
903 903 metadata=metadata)
904 904 else:
905 905 from . import repair # avoid import cycle
906 906 tostrip = list(replacements)
907 907 if tostrip:
908 repair.delayedstrip(repo.ui, repo, tostrip, operation)
908 repair.delayedstrip(repo.ui, repo, tostrip, operation,
909 backup=backup)
909 910
910 911 def addremove(repo, matcher, prefix, opts=None):
911 912 if opts is None:
912 913 opts = {}
913 914 m = matcher
914 915 dry_run = opts.get('dry_run')
915 916 try:
916 917 similarity = float(opts.get('similarity') or 0)
917 918 except ValueError:
918 919 raise error.Abort(_('similarity must be a number'))
919 920 if similarity < 0 or similarity > 100:
920 921 raise error.Abort(_('similarity must be between 0 and 100'))
921 922 similarity /= 100.0
922 923
923 924 ret = 0
924 925 join = lambda f: os.path.join(prefix, f)
925 926
926 927 wctx = repo[None]
927 928 for subpath in sorted(wctx.substate):
928 929 submatch = matchmod.subdirmatcher(subpath, m)
929 930 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
930 931 sub = wctx.sub(subpath)
931 932 try:
932 933 if sub.addremove(submatch, prefix, opts):
933 934 ret = 1
934 935 except error.LookupError:
935 936 repo.ui.status(_("skipping missing subrepository: %s\n")
936 937 % join(subpath))
937 938
938 939 rejected = []
939 940 def badfn(f, msg):
940 941 if f in m.files():
941 942 m.bad(f, msg)
942 943 rejected.append(f)
943 944
944 945 badmatch = matchmod.badmatch(m, badfn)
945 946 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
946 947 badmatch)
947 948
948 949 unknownset = set(unknown + forgotten)
949 950 toprint = unknownset.copy()
950 951 toprint.update(deleted)
951 952 for abs in sorted(toprint):
952 953 if repo.ui.verbose or not m.exact(abs):
953 954 if abs in unknownset:
954 955 status = _('adding %s\n') % m.uipath(abs)
955 956 else:
956 957 status = _('removing %s\n') % m.uipath(abs)
957 958 repo.ui.status(status)
958 959
959 960 renames = _findrenames(repo, m, added + unknown, removed + deleted,
960 961 similarity)
961 962
962 963 if not dry_run:
963 964 _markchanges(repo, unknown + forgotten, deleted, renames)
964 965
965 966 for f in rejected:
966 967 if f in m.files():
967 968 return 1
968 969 return ret
969 970
970 971 def marktouched(repo, files, similarity=0.0):
971 972 '''Assert that files have somehow been operated upon. files are relative to
972 973 the repo root.'''
973 974 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
974 975 rejected = []
975 976
976 977 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
977 978
978 979 if repo.ui.verbose:
979 980 unknownset = set(unknown + forgotten)
980 981 toprint = unknownset.copy()
981 982 toprint.update(deleted)
982 983 for abs in sorted(toprint):
983 984 if abs in unknownset:
984 985 status = _('adding %s\n') % abs
985 986 else:
986 987 status = _('removing %s\n') % abs
987 988 repo.ui.status(status)
988 989
989 990 renames = _findrenames(repo, m, added + unknown, removed + deleted,
990 991 similarity)
991 992
992 993 _markchanges(repo, unknown + forgotten, deleted, renames)
993 994
994 995 for f in rejected:
995 996 if f in m.files():
996 997 return 1
997 998 return 0
998 999
999 1000 def _interestingfiles(repo, matcher):
1000 1001 '''Walk dirstate with matcher, looking for files that addremove would care
1001 1002 about.
1002 1003
1003 1004 This is different from dirstate.status because it doesn't care about
1004 1005 whether files are modified or clean.'''
1005 1006 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1006 1007 audit_path = pathutil.pathauditor(repo.root, cached=True)
1007 1008
1008 1009 ctx = repo[None]
1009 1010 dirstate = repo.dirstate
1010 1011 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1011 1012 unknown=True, ignored=False, full=False)
1012 1013 for abs, st in walkresults.iteritems():
1013 1014 dstate = dirstate[abs]
1014 1015 if dstate == '?' and audit_path.check(abs):
1015 1016 unknown.append(abs)
1016 1017 elif dstate != 'r' and not st:
1017 1018 deleted.append(abs)
1018 1019 elif dstate == 'r' and st:
1019 1020 forgotten.append(abs)
1020 1021 # for finding renames
1021 1022 elif dstate == 'r' and not st:
1022 1023 removed.append(abs)
1023 1024 elif dstate == 'a':
1024 1025 added.append(abs)
1025 1026
1026 1027 return added, unknown, deleted, removed, forgotten
1027 1028
1028 1029 def _findrenames(repo, matcher, added, removed, similarity):
1029 1030 '''Find renames from removed files to added ones.'''
1030 1031 renames = {}
1031 1032 if similarity > 0:
1032 1033 for old, new, score in similar.findrenames(repo, added, removed,
1033 1034 similarity):
1034 1035 if (repo.ui.verbose or not matcher.exact(old)
1035 1036 or not matcher.exact(new)):
1036 1037 repo.ui.status(_('recording removal of %s as rename to %s '
1037 1038 '(%d%% similar)\n') %
1038 1039 (matcher.rel(old), matcher.rel(new),
1039 1040 score * 100))
1040 1041 renames[new] = old
1041 1042 return renames
1042 1043
1043 1044 def _markchanges(repo, unknown, deleted, renames):
1044 1045 '''Marks the files in unknown as added, the files in deleted as removed,
1045 1046 and the files in renames as copied.'''
1046 1047 wctx = repo[None]
1047 1048 with repo.wlock():
1048 1049 wctx.forget(deleted)
1049 1050 wctx.add(unknown)
1050 1051 for new, old in renames.iteritems():
1051 1052 wctx.copy(old, new)
1052 1053
1053 1054 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1054 1055 """Update the dirstate to reflect the intent of copying src to dst. For
1055 1056 different reasons it might not end with dst being marked as copied from src.
1056 1057 """
1057 1058 origsrc = repo.dirstate.copied(src) or src
1058 1059 if dst == origsrc: # copying back a copy?
1059 1060 if repo.dirstate[dst] not in 'mn' and not dryrun:
1060 1061 repo.dirstate.normallookup(dst)
1061 1062 else:
1062 1063 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1063 1064 if not ui.quiet:
1064 1065 ui.warn(_("%s has not been committed yet, so no copy "
1065 1066 "data will be stored for %s.\n")
1066 1067 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1067 1068 if repo.dirstate[dst] in '?r' and not dryrun:
1068 1069 wctx.add([dst])
1069 1070 elif not dryrun:
1070 1071 wctx.copy(origsrc, dst)
1071 1072
1072 1073 def readrequires(opener, supported):
1073 1074 '''Reads and parses .hg/requires and checks if all entries found
1074 1075 are in the list of supported features.'''
1075 1076 requirements = set(opener.read("requires").splitlines())
1076 1077 missings = []
1077 1078 for r in requirements:
1078 1079 if r not in supported:
1079 1080 if not r or not r[0:1].isalnum():
1080 1081 raise error.RequirementError(_(".hg/requires file is corrupt"))
1081 1082 missings.append(r)
1082 1083 missings.sort()
1083 1084 if missings:
1084 1085 raise error.RequirementError(
1085 1086 _("repository requires features unknown to this Mercurial: %s")
1086 1087 % " ".join(missings),
1087 1088 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1088 1089 " for more information"))
1089 1090 return requirements
1090 1091
1091 1092 def writerequires(opener, requirements):
1092 1093 with opener('requires', 'w') as fp:
1093 1094 for r in sorted(requirements):
1094 1095 fp.write("%s\n" % r)
1095 1096
1096 1097 class filecachesubentry(object):
1097 1098 def __init__(self, path, stat):
1098 1099 self.path = path
1099 1100 self.cachestat = None
1100 1101 self._cacheable = None
1101 1102
1102 1103 if stat:
1103 1104 self.cachestat = filecachesubentry.stat(self.path)
1104 1105
1105 1106 if self.cachestat:
1106 1107 self._cacheable = self.cachestat.cacheable()
1107 1108 else:
1108 1109 # None means we don't know yet
1109 1110 self._cacheable = None
1110 1111
1111 1112 def refresh(self):
1112 1113 if self.cacheable():
1113 1114 self.cachestat = filecachesubentry.stat(self.path)
1114 1115
1115 1116 def cacheable(self):
1116 1117 if self._cacheable is not None:
1117 1118 return self._cacheable
1118 1119
1119 1120 # we don't know yet, assume it is for now
1120 1121 return True
1121 1122
1122 1123 def changed(self):
1123 1124 # no point in going further if we can't cache it
1124 1125 if not self.cacheable():
1125 1126 return True
1126 1127
1127 1128 newstat = filecachesubentry.stat(self.path)
1128 1129
1129 1130 # we may not know if it's cacheable yet, check again now
1130 1131 if newstat and self._cacheable is None:
1131 1132 self._cacheable = newstat.cacheable()
1132 1133
1133 1134 # check again
1134 1135 if not self._cacheable:
1135 1136 return True
1136 1137
1137 1138 if self.cachestat != newstat:
1138 1139 self.cachestat = newstat
1139 1140 return True
1140 1141 else:
1141 1142 return False
1142 1143
1143 1144 @staticmethod
1144 1145 def stat(path):
1145 1146 try:
1146 1147 return util.cachestat(path)
1147 1148 except OSError as e:
1148 1149 if e.errno != errno.ENOENT:
1149 1150 raise
1150 1151
1151 1152 class filecacheentry(object):
1152 1153 def __init__(self, paths, stat=True):
1153 1154 self._entries = []
1154 1155 for path in paths:
1155 1156 self._entries.append(filecachesubentry(path, stat))
1156 1157
1157 1158 def changed(self):
1158 1159 '''true if any entry has changed'''
1159 1160 for entry in self._entries:
1160 1161 if entry.changed():
1161 1162 return True
1162 1163 return False
1163 1164
1164 1165 def refresh(self):
1165 1166 for entry in self._entries:
1166 1167 entry.refresh()
1167 1168
1168 1169 class filecache(object):
1169 1170 """A property like decorator that tracks files under .hg/ for updates.
1170 1171
1171 1172 On first access, the files defined as arguments are stat()ed and the
1172 1173 results cached. The decorated function is called. The results are stashed
1173 1174 away in a ``_filecache`` dict on the object whose method is decorated.
1174 1175
1175 1176 On subsequent access, the cached result is returned.
1176 1177
1177 1178 On external property set operations, stat() calls are performed and the new
1178 1179 value is cached.
1179 1180
1180 1181 On property delete operations, cached data is removed.
1181 1182
1182 1183 When using the property API, cached data is always returned, if available:
1183 1184 no stat() is performed to check if the file has changed and if the function
1184 1185 needs to be called to reflect file changes.
1185 1186
1186 1187 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1187 1188 can populate an entry before the property's getter is called. In this case,
1188 1189 entries in ``_filecache`` will be used during property operations,
1189 1190 if available. If the underlying file changes, it is up to external callers
1190 1191 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1191 1192 method result as well as possibly calling ``del obj._filecache[attr]`` to
1192 1193 remove the ``filecacheentry``.
1193 1194 """
1194 1195
1195 1196 def __init__(self, *paths):
1196 1197 self.paths = paths
1197 1198
1198 1199 def join(self, obj, fname):
1199 1200 """Used to compute the runtime path of a cached file.
1200 1201
1201 1202 Users should subclass filecache and provide their own version of this
1202 1203 function to call the appropriate join function on 'obj' (an instance
1203 1204 of the class that its member function was decorated).
1204 1205 """
1205 1206 raise NotImplementedError
1206 1207
1207 1208 def __call__(self, func):
1208 1209 self.func = func
1209 1210 self.sname = func.__name__
1210 1211 self.name = pycompat.sysbytes(self.sname)
1211 1212 return self
1212 1213
1213 1214 def __get__(self, obj, type=None):
1214 1215 # if accessed on the class, return the descriptor itself.
1215 1216 if obj is None:
1216 1217 return self
1217 1218 # do we need to check if the file changed?
1218 1219 if self.sname in obj.__dict__:
1219 1220 assert self.name in obj._filecache, self.name
1220 1221 return obj.__dict__[self.sname]
1221 1222
1222 1223 entry = obj._filecache.get(self.name)
1223 1224
1224 1225 if entry:
1225 1226 if entry.changed():
1226 1227 entry.obj = self.func(obj)
1227 1228 else:
1228 1229 paths = [self.join(obj, path) for path in self.paths]
1229 1230
1230 1231 # We stat -before- creating the object so our cache doesn't lie if
1231 1232 # a writer modified between the time we read and stat
1232 1233 entry = filecacheentry(paths, True)
1233 1234 entry.obj = self.func(obj)
1234 1235
1235 1236 obj._filecache[self.name] = entry
1236 1237
1237 1238 obj.__dict__[self.sname] = entry.obj
1238 1239 return entry.obj
1239 1240
1240 1241 def __set__(self, obj, value):
1241 1242 if self.name not in obj._filecache:
1242 1243 # we add an entry for the missing value because X in __dict__
1243 1244 # implies X in _filecache
1244 1245 paths = [self.join(obj, path) for path in self.paths]
1245 1246 ce = filecacheentry(paths, False)
1246 1247 obj._filecache[self.name] = ce
1247 1248 else:
1248 1249 ce = obj._filecache[self.name]
1249 1250
1250 1251 ce.obj = value # update cached copy
1251 1252 obj.__dict__[self.sname] = value # update copy returned by obj.x
1252 1253
1253 1254 def __delete__(self, obj):
1254 1255 try:
1255 1256 del obj.__dict__[self.sname]
1256 1257 except KeyError:
1257 1258 raise AttributeError(self.sname)
1258 1259
1259 1260 def extdatasource(repo, source):
1260 1261 """Gather a map of rev -> value dict from the specified source
1261 1262
1262 1263 A source spec is treated as a URL, with a special case shell: type
1263 1264 for parsing the output from a shell command.
1264 1265
1265 1266 The data is parsed as a series of newline-separated records where
1266 1267 each record is a revision specifier optionally followed by a space
1267 1268 and a freeform string value. If the revision is known locally, it
1268 1269 is converted to a rev, otherwise the record is skipped.
1269 1270
1270 1271 Note that both key and value are treated as UTF-8 and converted to
1271 1272 the local encoding. This allows uniformity between local and
1272 1273 remote data sources.
1273 1274 """
1274 1275
1275 1276 spec = repo.ui.config("extdata", source)
1276 1277 if not spec:
1277 1278 raise error.Abort(_("unknown extdata source '%s'") % source)
1278 1279
1279 1280 data = {}
1280 1281 src = proc = None
1281 1282 try:
1282 1283 if spec.startswith("shell:"):
1283 1284 # external commands should be run relative to the repo root
1284 1285 cmd = spec[6:]
1285 1286 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1286 1287 close_fds=procutil.closefds,
1287 1288 stdout=subprocess.PIPE, cwd=repo.root)
1288 1289 src = proc.stdout
1289 1290 else:
1290 1291 # treat as a URL or file
1291 1292 src = url.open(repo.ui, spec)
1292 1293 for l in src:
1293 1294 if " " in l:
1294 1295 k, v = l.strip().split(" ", 1)
1295 1296 else:
1296 1297 k, v = l.strip(), ""
1297 1298
1298 1299 k = encoding.tolocal(k)
1299 1300 try:
1300 1301 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1301 1302 except (error.LookupError, error.RepoLookupError):
1302 1303 pass # we ignore data for nodes that don't exist locally
1303 1304 finally:
1304 1305 if proc:
1305 1306 proc.communicate()
1306 1307 if src:
1307 1308 src.close()
1308 1309 if proc and proc.returncode != 0:
1309 1310 raise error.Abort(_("extdata command '%s' failed: %s")
1310 1311 % (cmd, procutil.explainexit(proc.returncode)))
1311 1312
1312 1313 return data
1313 1314
1314 1315 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1315 1316 if lock is None:
1316 1317 raise error.LockInheritanceContractViolation(
1317 1318 'lock can only be inherited while held')
1318 1319 if environ is None:
1319 1320 environ = {}
1320 1321 with lock.inherit() as locker:
1321 1322 environ[envvar] = locker
1322 1323 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1323 1324
1324 1325 def wlocksub(repo, cmd, *args, **kwargs):
1325 1326 """run cmd as a subprocess that allows inheriting repo's wlock
1326 1327
1327 1328 This can only be called while the wlock is held. This takes all the
1328 1329 arguments that ui.system does, and returns the exit code of the
1329 1330 subprocess."""
1330 1331 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1331 1332 **kwargs)
1332 1333
1333 1334 class progress(object):
1334 1335 def __init__(self, ui, topic, unit="", total=None):
1335 1336 self.ui = ui
1336 1337 self.pos = 0
1337 1338 self.topic = topic
1338 1339 self.unit = unit
1339 1340 self.total = total
1340 1341
1341 1342 def __enter__(self):
1342 1343 return self
1343 1344
1344 1345 def __exit__(self, exc_type, exc_value, exc_tb):
1345 1346 self.complete()
1346 1347
1347 1348 def update(self, pos, item="", total=None):
1348 1349 assert pos is not None
1349 1350 if total:
1350 1351 self.total = total
1351 1352 self.pos = pos
1352 1353 self._print(item)
1353 1354
1354 1355 def increment(self, step=1, item="", total=None):
1355 1356 self.update(self.pos + step, item, total)
1356 1357
1357 1358 def complete(self):
1358 1359 self.ui.progress(self.topic, None)
1359 1360
1360 1361 def _print(self, item):
1361 1362 self.ui.progress(self.topic, self.pos, item, self.unit,
1362 1363 self.total)
1363 1364
1364 1365 def gdinitconfig(ui):
1365 1366 """helper function to know if a repo should be created as general delta
1366 1367 """
1367 1368 # experimental config: format.generaldelta
1368 1369 return (ui.configbool('format', 'generaldelta')
1369 1370 or ui.configbool('format', 'usegeneraldelta')
1370 1371 or ui.configbool('format', 'sparse-revlog'))
1371 1372
1372 1373 def gddeltaconfig(ui):
1373 1374 """helper function to know if incoming delta should be optimised
1374 1375 """
1375 1376 # experimental config: format.generaldelta
1376 1377 return ui.configbool('format', 'generaldelta')
1377 1378
1378 1379 class simplekeyvaluefile(object):
1379 1380 """A simple file with key=value lines
1380 1381
1381 1382 Keys must be alphanumerics and start with a letter, values must not
1382 1383 contain '\n' characters"""
1383 1384 firstlinekey = '__firstline'
1384 1385
1385 1386 def __init__(self, vfs, path, keys=None):
1386 1387 self.vfs = vfs
1387 1388 self.path = path
1388 1389
1389 1390 def read(self, firstlinenonkeyval=False):
1390 1391 """Read the contents of a simple key-value file
1391 1392
1392 1393 'firstlinenonkeyval' indicates whether the first line of file should
1393 1394 be treated as a key-value pair or reuturned fully under the
1394 1395 __firstline key."""
1395 1396 lines = self.vfs.readlines(self.path)
1396 1397 d = {}
1397 1398 if firstlinenonkeyval:
1398 1399 if not lines:
1399 1400 e = _("empty simplekeyvalue file")
1400 1401 raise error.CorruptedState(e)
1401 1402 # we don't want to include '\n' in the __firstline
1402 1403 d[self.firstlinekey] = lines[0][:-1]
1403 1404 del lines[0]
1404 1405
1405 1406 try:
1406 1407 # the 'if line.strip()' part prevents us from failing on empty
1407 1408 # lines which only contain '\n' therefore are not skipped
1408 1409 # by 'if line'
1409 1410 updatedict = dict(line[:-1].split('=', 1) for line in lines
1410 1411 if line.strip())
1411 1412 if self.firstlinekey in updatedict:
1412 1413 e = _("%r can't be used as a key")
1413 1414 raise error.CorruptedState(e % self.firstlinekey)
1414 1415 d.update(updatedict)
1415 1416 except ValueError as e:
1416 1417 raise error.CorruptedState(str(e))
1417 1418 return d
1418 1419
1419 1420 def write(self, data, firstline=None):
1420 1421 """Write key=>value mapping to a file
1421 1422 data is a dict. Keys must be alphanumerical and start with a letter.
1422 1423 Values must not contain newline characters.
1423 1424
1424 1425 If 'firstline' is not None, it is written to file before
1425 1426 everything else, as it is, not in a key=value form"""
1426 1427 lines = []
1427 1428 if firstline is not None:
1428 1429 lines.append('%s\n' % firstline)
1429 1430
1430 1431 for k, v in data.items():
1431 1432 if k == self.firstlinekey:
1432 1433 e = "key name '%s' is reserved" % self.firstlinekey
1433 1434 raise error.ProgrammingError(e)
1434 1435 if not k[0:1].isalpha():
1435 1436 e = "keys must start with a letter in a key-value file"
1436 1437 raise error.ProgrammingError(e)
1437 1438 if not k.isalnum():
1438 1439 e = "invalid key name in a simple key-value file"
1439 1440 raise error.ProgrammingError(e)
1440 1441 if '\n' in v:
1441 1442 e = "invalid value in a simple key-value file"
1442 1443 raise error.ProgrammingError(e)
1443 1444 lines.append("%s=%s\n" % (k, v))
1444 1445 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1445 1446 fp.write(''.join(lines))
1446 1447
1447 1448 _reportobsoletedsource = [
1448 1449 'debugobsolete',
1449 1450 'pull',
1450 1451 'push',
1451 1452 'serve',
1452 1453 'unbundle',
1453 1454 ]
1454 1455
1455 1456 _reportnewcssource = [
1456 1457 'pull',
1457 1458 'unbundle',
1458 1459 ]
1459 1460
1460 1461 def prefetchfiles(repo, revs, match):
1461 1462 """Invokes the registered file prefetch functions, allowing extensions to
1462 1463 ensure the corresponding files are available locally, before the command
1463 1464 uses them."""
1464 1465 if match:
1465 1466 # The command itself will complain about files that don't exist, so
1466 1467 # don't duplicate the message.
1467 1468 match = matchmod.badmatch(match, lambda fn, msg: None)
1468 1469 else:
1469 1470 match = matchall(repo)
1470 1471
1471 1472 fileprefetchhooks(repo, revs, match)
1472 1473
1473 1474 # a list of (repo, revs, match) prefetch functions
1474 1475 fileprefetchhooks = util.hooks()
1475 1476
1476 1477 # A marker that tells the evolve extension to suppress its own reporting
1477 1478 _reportstroubledchangesets = True
1478 1479
1479 1480 def registersummarycallback(repo, otr, txnname=''):
1480 1481 """register a callback to issue a summary after the transaction is closed
1481 1482 """
1482 1483 def txmatch(sources):
1483 1484 return any(txnname.startswith(source) for source in sources)
1484 1485
1485 1486 categories = []
1486 1487
1487 1488 def reportsummary(func):
1488 1489 """decorator for report callbacks."""
1489 1490 # The repoview life cycle is shorter than the one of the actual
1490 1491 # underlying repository. So the filtered object can die before the
1491 1492 # weakref is used leading to troubles. We keep a reference to the
1492 1493 # unfiltered object and restore the filtering when retrieving the
1493 1494 # repository through the weakref.
1494 1495 filtername = repo.filtername
1495 1496 reporef = weakref.ref(repo.unfiltered())
1496 1497 def wrapped(tr):
1497 1498 repo = reporef()
1498 1499 if filtername:
1499 1500 repo = repo.filtered(filtername)
1500 1501 func(repo, tr)
1501 1502 newcat = '%02i-txnreport' % len(categories)
1502 1503 otr.addpostclose(newcat, wrapped)
1503 1504 categories.append(newcat)
1504 1505 return wrapped
1505 1506
1506 1507 if txmatch(_reportobsoletedsource):
1507 1508 @reportsummary
1508 1509 def reportobsoleted(repo, tr):
1509 1510 obsoleted = obsutil.getobsoleted(repo, tr)
1510 1511 if obsoleted:
1511 1512 repo.ui.status(_('obsoleted %i changesets\n')
1512 1513 % len(obsoleted))
1513 1514
1514 1515 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1515 1516 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1516 1517 instabilitytypes = [
1517 1518 ('orphan', 'orphan'),
1518 1519 ('phase-divergent', 'phasedivergent'),
1519 1520 ('content-divergent', 'contentdivergent'),
1520 1521 ]
1521 1522
1522 1523 def getinstabilitycounts(repo):
1523 1524 filtered = repo.changelog.filteredrevs
1524 1525 counts = {}
1525 1526 for instability, revset in instabilitytypes:
1526 1527 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1527 1528 filtered)
1528 1529 return counts
1529 1530
1530 1531 oldinstabilitycounts = getinstabilitycounts(repo)
1531 1532 @reportsummary
1532 1533 def reportnewinstabilities(repo, tr):
1533 1534 newinstabilitycounts = getinstabilitycounts(repo)
1534 1535 for instability, revset in instabilitytypes:
1535 1536 delta = (newinstabilitycounts[instability] -
1536 1537 oldinstabilitycounts[instability])
1537 1538 msg = getinstabilitymessage(delta, instability)
1538 1539 if msg:
1539 1540 repo.ui.warn(msg)
1540 1541
1541 1542 if txmatch(_reportnewcssource):
1542 1543 @reportsummary
1543 1544 def reportnewcs(repo, tr):
1544 1545 """Report the range of new revisions pulled/unbundled."""
1545 1546 newrevs = tr.changes.get('revs', pycompat.xrange(0, 0))
1546 1547 if not newrevs:
1547 1548 return
1548 1549
1549 1550 # Compute the bounds of new revisions' range, excluding obsoletes.
1550 1551 unfi = repo.unfiltered()
1551 1552 revs = unfi.revs('%ld and not obsolete()', newrevs)
1552 1553 if not revs:
1553 1554 # Got only obsoletes.
1554 1555 return
1555 1556 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1556 1557
1557 1558 if minrev == maxrev:
1558 1559 revrange = minrev
1559 1560 else:
1560 1561 revrange = '%s:%s' % (minrev, maxrev)
1561 1562 repo.ui.status(_('new changesets %s\n') % revrange)
1562 1563
1563 1564 @reportsummary
1564 1565 def reportphasechanges(repo, tr):
1565 1566 """Report statistics of phase changes for changesets pre-existing
1566 1567 pull/unbundle.
1567 1568 """
1568 1569 newrevs = tr.changes.get('revs', pycompat.xrange(0, 0))
1569 1570 phasetracking = tr.changes.get('phases', {})
1570 1571 if not phasetracking:
1571 1572 return
1572 1573 published = [
1573 1574 rev for rev, (old, new) in phasetracking.iteritems()
1574 1575 if new == phases.public and rev not in newrevs
1575 1576 ]
1576 1577 if not published:
1577 1578 return
1578 1579 repo.ui.status(_('%d local changesets published\n')
1579 1580 % len(published))
1580 1581
1581 1582 def getinstabilitymessage(delta, instability):
1582 1583 """function to return the message to show warning about new instabilities
1583 1584
1584 1585 exists as a separate function so that extension can wrap to show more
1585 1586 information like how to fix instabilities"""
1586 1587 if delta > 0:
1587 1588 return _('%i new %s changesets\n') % (delta, instability)
1588 1589
1589 1590 def nodesummaries(repo, nodes, maxnumnodes=4):
1590 1591 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1591 1592 return ' '.join(short(h) for h in nodes)
1592 1593 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1593 1594 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1594 1595
1595 1596 def enforcesinglehead(repo, tr, desc):
1596 1597 """check that no named branch has multiple heads"""
1597 1598 if desc in ('strip', 'repair'):
1598 1599 # skip the logic during strip
1599 1600 return
1600 1601 visible = repo.filtered('visible')
1601 1602 # possible improvement: we could restrict the check to affected branch
1602 1603 for name, heads in visible.branchmap().iteritems():
1603 1604 if len(heads) > 1:
1604 1605 msg = _('rejecting multiple heads on branch "%s"')
1605 1606 msg %= name
1606 1607 hint = _('%d heads: %s')
1607 1608 hint %= (len(heads), nodesummaries(repo, heads))
1608 1609 raise error.Abort(msg, hint=hint)
1609 1610
1610 1611 def wrapconvertsink(sink):
1611 1612 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1612 1613 before it is used, whether or not the convert extension was formally loaded.
1613 1614 """
1614 1615 return sink
1615 1616
1616 1617 def unhidehashlikerevs(repo, specs, hiddentype):
1617 1618 """parse the user specs and unhide changesets whose hash or revision number
1618 1619 is passed.
1619 1620
1620 1621 hiddentype can be: 1) 'warn': warn while unhiding changesets
1621 1622 2) 'nowarn': don't warn while unhiding changesets
1622 1623
1623 1624 returns a repo object with the required changesets unhidden
1624 1625 """
1625 1626 if not repo.filtername or not repo.ui.configbool('experimental',
1626 1627 'directaccess'):
1627 1628 return repo
1628 1629
1629 1630 if repo.filtername not in ('visible', 'visible-hidden'):
1630 1631 return repo
1631 1632
1632 1633 symbols = set()
1633 1634 for spec in specs:
1634 1635 try:
1635 1636 tree = revsetlang.parse(spec)
1636 1637 except error.ParseError: # will be reported by scmutil.revrange()
1637 1638 continue
1638 1639
1639 1640 symbols.update(revsetlang.gethashlikesymbols(tree))
1640 1641
1641 1642 if not symbols:
1642 1643 return repo
1643 1644
1644 1645 revs = _getrevsfromsymbols(repo, symbols)
1645 1646
1646 1647 if not revs:
1647 1648 return repo
1648 1649
1649 1650 if hiddentype == 'warn':
1650 1651 unfi = repo.unfiltered()
1651 1652 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1652 1653 repo.ui.warn(_("warning: accessing hidden changesets for write "
1653 1654 "operation: %s\n") % revstr)
1654 1655
1655 1656 # we have to use new filtername to separate branch/tags cache until we can
1656 1657 # disbale these cache when revisions are dynamically pinned.
1657 1658 return repo.filtered('visible-hidden', revs)
1658 1659
1659 1660 def _getrevsfromsymbols(repo, symbols):
1660 1661 """parse the list of symbols and returns a set of revision numbers of hidden
1661 1662 changesets present in symbols"""
1662 1663 revs = set()
1663 1664 unfi = repo.unfiltered()
1664 1665 unficl = unfi.changelog
1665 1666 cl = repo.changelog
1666 1667 tiprev = len(unficl)
1667 1668 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1668 1669 for s in symbols:
1669 1670 try:
1670 1671 n = int(s)
1671 1672 if n <= tiprev:
1672 1673 if not allowrevnums:
1673 1674 continue
1674 1675 else:
1675 1676 if n not in cl:
1676 1677 revs.add(n)
1677 1678 continue
1678 1679 except ValueError:
1679 1680 pass
1680 1681
1681 1682 try:
1682 1683 s = resolvehexnodeidprefix(unfi, s)
1683 1684 except (error.LookupError, error.WdirUnsupported):
1684 1685 s = None
1685 1686
1686 1687 if s is not None:
1687 1688 rev = unficl.rev(s)
1688 1689 if rev not in cl:
1689 1690 revs.add(rev)
1690 1691
1691 1692 return revs
1692 1693
1693 1694 def bookmarkrevs(repo, mark):
1694 1695 """
1695 1696 Select revisions reachable by a given bookmark
1696 1697 """
1697 1698 return repo.revs("ancestors(bookmark(%s)) - "
1698 1699 "ancestors(head() and not bookmark(%s)) - "
1699 1700 "ancestors(bookmark() and not bookmark(%s))",
1700 1701 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now