##// END OF EJS Templates
cleanupnodes: rename "mapping" to "replacements"...
Martin von Zweigbergk -
r34363:2dbd6d25 stable
parent child Browse files
Show More
@@ -1,1110 +1,1110 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 wdirid,
23 23 wdirrev,
24 24 )
25 25
26 26 from . import (
27 27 encoding,
28 28 error,
29 29 match as matchmod,
30 30 obsolete,
31 31 obsutil,
32 32 pathutil,
33 33 phases,
34 34 pycompat,
35 35 revsetlang,
36 36 similar,
37 37 util,
38 38 )
39 39
40 40 if pycompat.osname == 'nt':
41 41 from . import scmwindows as scmplatform
42 42 else:
43 43 from . import scmposix as scmplatform
44 44
45 45 termsize = scmplatform.termsize
46 46
47 47 class status(tuple):
48 48 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
49 49 and 'ignored' properties are only relevant to the working copy.
50 50 '''
51 51
52 52 __slots__ = ()
53 53
54 54 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
55 55 clean):
56 56 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
57 57 ignored, clean))
58 58
59 59 @property
60 60 def modified(self):
61 61 '''files that have been modified'''
62 62 return self[0]
63 63
64 64 @property
65 65 def added(self):
66 66 '''files that have been added'''
67 67 return self[1]
68 68
69 69 @property
70 70 def removed(self):
71 71 '''files that have been removed'''
72 72 return self[2]
73 73
74 74 @property
75 75 def deleted(self):
76 76 '''files that are in the dirstate, but have been deleted from the
77 77 working copy (aka "missing")
78 78 '''
79 79 return self[3]
80 80
81 81 @property
82 82 def unknown(self):
83 83 '''files not in the dirstate that are not ignored'''
84 84 return self[4]
85 85
86 86 @property
87 87 def ignored(self):
88 88 '''files not in the dirstate that are ignored (by _dirignore())'''
89 89 return self[5]
90 90
91 91 @property
92 92 def clean(self):
93 93 '''files that have not been modified'''
94 94 return self[6]
95 95
96 96 def __repr__(self, *args, **kwargs):
97 97 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
98 98 'unknown=%r, ignored=%r, clean=%r>') % self)
99 99
100 100 def itersubrepos(ctx1, ctx2):
101 101 """find subrepos in ctx1 or ctx2"""
102 102 # Create a (subpath, ctx) mapping where we prefer subpaths from
103 103 # ctx1. The subpaths from ctx2 are important when the .hgsub file
104 104 # has been modified (in ctx2) but not yet committed (in ctx1).
105 105 subpaths = dict.fromkeys(ctx2.substate, ctx2)
106 106 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
107 107
108 108 missing = set()
109 109
110 110 for subpath in ctx2.substate:
111 111 if subpath not in ctx1.substate:
112 112 del subpaths[subpath]
113 113 missing.add(subpath)
114 114
115 115 for subpath, ctx in sorted(subpaths.iteritems()):
116 116 yield subpath, ctx.sub(subpath)
117 117
118 118 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
119 119 # status and diff will have an accurate result when it does
120 120 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
121 121 # against itself.
122 122 for subpath in missing:
123 123 yield subpath, ctx2.nullsub(subpath, ctx1)
124 124
125 125 def nochangesfound(ui, repo, excluded=None):
126 126 '''Report no changes for push/pull, excluded is None or a list of
127 127 nodes excluded from the push/pull.
128 128 '''
129 129 secretlist = []
130 130 if excluded:
131 131 for n in excluded:
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def callcatch(ui, func):
143 143 """call func() with global exception handling
144 144
145 145 return func() if no exception happens. otherwise do some error handling
146 146 and return an exit code accordingly. does not handle all exceptions.
147 147 """
148 148 try:
149 149 try:
150 150 return func()
151 151 except: # re-raises
152 152 ui.traceback()
153 153 raise
154 154 # Global exception handling, alphabetically
155 155 # Mercurial-specific first, followed by built-in and library exceptions
156 156 except error.LockHeld as inst:
157 157 if inst.errno == errno.ETIMEDOUT:
158 158 reason = _('timed out waiting for lock held by %r') % inst.locker
159 159 else:
160 160 reason = _('lock held by %r') % inst.locker
161 161 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
162 162 if not inst.locker:
163 163 ui.warn(_("(lock might be very busy)\n"))
164 164 except error.LockUnavailable as inst:
165 165 ui.warn(_("abort: could not lock %s: %s\n") %
166 166 (inst.desc or inst.filename, inst.strerror))
167 167 except error.OutOfBandError as inst:
168 168 if inst.args:
169 169 msg = _("abort: remote error:\n")
170 170 else:
171 171 msg = _("abort: remote error\n")
172 172 ui.warn(msg)
173 173 if inst.args:
174 174 ui.warn(''.join(inst.args))
175 175 if inst.hint:
176 176 ui.warn('(%s)\n' % inst.hint)
177 177 except error.RepoError as inst:
178 178 ui.warn(_("abort: %s!\n") % inst)
179 179 if inst.hint:
180 180 ui.warn(_("(%s)\n") % inst.hint)
181 181 except error.ResponseError as inst:
182 182 ui.warn(_("abort: %s") % inst.args[0])
183 183 if not isinstance(inst.args[1], basestring):
184 184 ui.warn(" %r\n" % (inst.args[1],))
185 185 elif not inst.args[1]:
186 186 ui.warn(_(" empty string\n"))
187 187 else:
188 188 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
189 189 except error.CensoredNodeError as inst:
190 190 ui.warn(_("abort: file censored %s!\n") % inst)
191 191 except error.RevlogError as inst:
192 192 ui.warn(_("abort: %s!\n") % inst)
193 193 except error.InterventionRequired as inst:
194 194 ui.warn("%s\n" % inst)
195 195 if inst.hint:
196 196 ui.warn(_("(%s)\n") % inst.hint)
197 197 return 1
198 198 except error.WdirUnsupported:
199 199 ui.warn(_("abort: working directory revision cannot be specified\n"))
200 200 except error.Abort as inst:
201 201 ui.warn(_("abort: %s\n") % inst)
202 202 if inst.hint:
203 203 ui.warn(_("(%s)\n") % inst.hint)
204 204 except ImportError as inst:
205 205 ui.warn(_("abort: %s!\n") % inst)
206 206 m = str(inst).split()[-1]
207 207 if m in "mpatch bdiff".split():
208 208 ui.warn(_("(did you forget to compile extensions?)\n"))
209 209 elif m in "zlib".split():
210 210 ui.warn(_("(is your Python install correct?)\n"))
211 211 except IOError as inst:
212 212 if util.safehasattr(inst, "code"):
213 213 ui.warn(_("abort: %s\n") % inst)
214 214 elif util.safehasattr(inst, "reason"):
215 215 try: # usually it is in the form (errno, strerror)
216 216 reason = inst.reason.args[1]
217 217 except (AttributeError, IndexError):
218 218 # it might be anything, for example a string
219 219 reason = inst.reason
220 220 if isinstance(reason, unicode):
221 221 # SSLError of Python 2.7.9 contains a unicode
222 222 reason = encoding.unitolocal(reason)
223 223 ui.warn(_("abort: error: %s\n") % reason)
224 224 elif (util.safehasattr(inst, "args")
225 225 and inst.args and inst.args[0] == errno.EPIPE):
226 226 pass
227 227 elif getattr(inst, "strerror", None):
228 228 if getattr(inst, "filename", None):
229 229 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
230 230 else:
231 231 ui.warn(_("abort: %s\n") % inst.strerror)
232 232 else:
233 233 raise
234 234 except OSError as inst:
235 235 if getattr(inst, "filename", None) is not None:
236 236 ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
237 237 else:
238 238 ui.warn(_("abort: %s\n") % inst.strerror)
239 239 except MemoryError:
240 240 ui.warn(_("abort: out of memory\n"))
241 241 except SystemExit as inst:
242 242 # Commands shouldn't sys.exit directly, but give a return code.
243 243 # Just in case catch this and and pass exit code to caller.
244 244 return inst.code
245 245 except socket.error as inst:
246 246 ui.warn(_("abort: %s\n") % inst.args[-1])
247 247
248 248 return -1
249 249
250 250 def checknewlabel(repo, lbl, kind):
251 251 # Do not use the "kind" parameter in ui output.
252 252 # It makes strings difficult to translate.
253 253 if lbl in ['tip', '.', 'null']:
254 254 raise error.Abort(_("the name '%s' is reserved") % lbl)
255 255 for c in (':', '\0', '\n', '\r'):
256 256 if c in lbl:
257 257 raise error.Abort(_("%r cannot be used in a name") % c)
258 258 try:
259 259 int(lbl)
260 260 raise error.Abort(_("cannot use an integer as a name"))
261 261 except ValueError:
262 262 pass
263 263
264 264 def checkfilename(f):
265 265 '''Check that the filename f is an acceptable filename for a tracked file'''
266 266 if '\r' in f or '\n' in f:
267 267 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
268 268
269 269 def checkportable(ui, f):
270 270 '''Check if filename f is portable and warn or abort depending on config'''
271 271 checkfilename(f)
272 272 abort, warn = checkportabilityalert(ui)
273 273 if abort or warn:
274 274 msg = util.checkwinfilename(f)
275 275 if msg:
276 276 msg = "%s: %r" % (msg, f)
277 277 if abort:
278 278 raise error.Abort(msg)
279 279 ui.warn(_("warning: %s\n") % msg)
280 280
281 281 def checkportabilityalert(ui):
282 282 '''check if the user's config requests nothing, a warning, or abort for
283 283 non-portable filenames'''
284 284 val = ui.config('ui', 'portablefilenames')
285 285 lval = val.lower()
286 286 bval = util.parsebool(val)
287 287 abort = pycompat.osname == 'nt' or lval == 'abort'
288 288 warn = bval or lval == 'warn'
289 289 if bval is None and not (warn or abort or lval == 'ignore'):
290 290 raise error.ConfigError(
291 291 _("ui.portablefilenames value is invalid ('%s')") % val)
292 292 return abort, warn
293 293
294 294 class casecollisionauditor(object):
295 295 def __init__(self, ui, abort, dirstate):
296 296 self._ui = ui
297 297 self._abort = abort
298 298 allfiles = '\0'.join(dirstate._map)
299 299 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
300 300 self._dirstate = dirstate
301 301 # The purpose of _newfiles is so that we don't complain about
302 302 # case collisions if someone were to call this object with the
303 303 # same filename twice.
304 304 self._newfiles = set()
305 305
306 306 def __call__(self, f):
307 307 if f in self._newfiles:
308 308 return
309 309 fl = encoding.lower(f)
310 310 if fl in self._loweredfiles and f not in self._dirstate:
311 311 msg = _('possible case-folding collision for %s') % f
312 312 if self._abort:
313 313 raise error.Abort(msg)
314 314 self._ui.warn(_("warning: %s\n") % msg)
315 315 self._loweredfiles.add(fl)
316 316 self._newfiles.add(f)
317 317
318 318 def filteredhash(repo, maxrev):
319 319 """build hash of filtered revisions in the current repoview.
320 320
321 321 Multiple caches perform up-to-date validation by checking that the
322 322 tiprev and tipnode stored in the cache file match the current repository.
323 323 However, this is not sufficient for validating repoviews because the set
324 324 of revisions in the view may change without the repository tiprev and
325 325 tipnode changing.
326 326
327 327 This function hashes all the revs filtered from the view and returns
328 328 that SHA-1 digest.
329 329 """
330 330 cl = repo.changelog
331 331 if not cl.filteredrevs:
332 332 return None
333 333 key = None
334 334 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
335 335 if revs:
336 336 s = hashlib.sha1()
337 337 for rev in revs:
338 338 s.update('%d;' % rev)
339 339 key = s.digest()
340 340 return key
341 341
342 342 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
343 343 '''yield every hg repository under path, always recursively.
344 344 The recurse flag will only control recursion into repo working dirs'''
345 345 def errhandler(err):
346 346 if err.filename == path:
347 347 raise err
348 348 samestat = getattr(os.path, 'samestat', None)
349 349 if followsym and samestat is not None:
350 350 def adddir(dirlst, dirname):
351 351 match = False
352 352 dirstat = os.stat(dirname)
353 353 for lstdirstat in dirlst:
354 354 if samestat(dirstat, lstdirstat):
355 355 match = True
356 356 break
357 357 if not match:
358 358 dirlst.append(dirstat)
359 359 return not match
360 360 else:
361 361 followsym = False
362 362
363 363 if (seen_dirs is None) and followsym:
364 364 seen_dirs = []
365 365 adddir(seen_dirs, path)
366 366 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
367 367 dirs.sort()
368 368 if '.hg' in dirs:
369 369 yield root # found a repository
370 370 qroot = os.path.join(root, '.hg', 'patches')
371 371 if os.path.isdir(os.path.join(qroot, '.hg')):
372 372 yield qroot # we have a patch queue repo here
373 373 if recurse:
374 374 # avoid recursing inside the .hg directory
375 375 dirs.remove('.hg')
376 376 else:
377 377 dirs[:] = [] # don't descend further
378 378 elif followsym:
379 379 newdirs = []
380 380 for d in dirs:
381 381 fname = os.path.join(root, d)
382 382 if adddir(seen_dirs, fname):
383 383 if os.path.islink(fname):
384 384 for hgname in walkrepos(fname, True, seen_dirs):
385 385 yield hgname
386 386 else:
387 387 newdirs.append(d)
388 388 dirs[:] = newdirs
389 389
390 390 def binnode(ctx):
391 391 """Return binary node id for a given basectx"""
392 392 node = ctx.node()
393 393 if node is None:
394 394 return wdirid
395 395 return node
396 396
397 397 def intrev(ctx):
398 398 """Return integer for a given basectx that can be used in comparison or
399 399 arithmetic operation"""
400 400 rev = ctx.rev()
401 401 if rev is None:
402 402 return wdirrev
403 403 return rev
404 404
405 405 def revsingle(repo, revspec, default='.'):
406 406 if not revspec and revspec != 0:
407 407 return repo[default]
408 408
409 409 l = revrange(repo, [revspec])
410 410 if not l:
411 411 raise error.Abort(_('empty revision set'))
412 412 return repo[l.last()]
413 413
414 414 def _pairspec(revspec):
415 415 tree = revsetlang.parse(revspec)
416 416 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
417 417
418 418 def revpair(repo, revs):
419 419 if not revs:
420 420 return repo.dirstate.p1(), None
421 421
422 422 l = revrange(repo, revs)
423 423
424 424 if not l:
425 425 first = second = None
426 426 elif l.isascending():
427 427 first = l.min()
428 428 second = l.max()
429 429 elif l.isdescending():
430 430 first = l.max()
431 431 second = l.min()
432 432 else:
433 433 first = l.first()
434 434 second = l.last()
435 435
436 436 if first is None:
437 437 raise error.Abort(_('empty revision range'))
438 438 if (first == second and len(revs) >= 2
439 439 and not all(revrange(repo, [r]) for r in revs)):
440 440 raise error.Abort(_('empty revision on one side of range'))
441 441
442 442 # if top-level is range expression, the result must always be a pair
443 443 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
444 444 return repo.lookup(first), None
445 445
446 446 return repo.lookup(first), repo.lookup(second)
447 447
448 448 def revrange(repo, specs):
449 449 """Execute 1 to many revsets and return the union.
450 450
451 451 This is the preferred mechanism for executing revsets using user-specified
452 452 config options, such as revset aliases.
453 453
454 454 The revsets specified by ``specs`` will be executed via a chained ``OR``
455 455 expression. If ``specs`` is empty, an empty result is returned.
456 456
457 457 ``specs`` can contain integers, in which case they are assumed to be
458 458 revision numbers.
459 459
460 460 It is assumed the revsets are already formatted. If you have arguments
461 461 that need to be expanded in the revset, call ``revsetlang.formatspec()``
462 462 and pass the result as an element of ``specs``.
463 463
464 464 Specifying a single revset is allowed.
465 465
466 466 Returns a ``revset.abstractsmartset`` which is a list-like interface over
467 467 integer revisions.
468 468 """
469 469 allspecs = []
470 470 for spec in specs:
471 471 if isinstance(spec, int):
472 472 spec = revsetlang.formatspec('rev(%d)', spec)
473 473 allspecs.append(spec)
474 474 return repo.anyrevs(allspecs, user=True)
475 475
476 476 def meaningfulparents(repo, ctx):
477 477 """Return list of meaningful (or all if debug) parentrevs for rev.
478 478
479 479 For merges (two non-nullrev revisions) both parents are meaningful.
480 480 Otherwise the first parent revision is considered meaningful if it
481 481 is not the preceding revision.
482 482 """
483 483 parents = ctx.parents()
484 484 if len(parents) > 1:
485 485 return parents
486 486 if repo.ui.debugflag:
487 487 return [parents[0], repo['null']]
488 488 if parents[0].rev() >= intrev(ctx) - 1:
489 489 return []
490 490 return parents
491 491
492 492 def expandpats(pats):
493 493 '''Expand bare globs when running on windows.
494 494 On posix we assume it already has already been done by sh.'''
495 495 if not util.expandglobs:
496 496 return list(pats)
497 497 ret = []
498 498 for kindpat in pats:
499 499 kind, pat = matchmod._patsplit(kindpat, None)
500 500 if kind is None:
501 501 try:
502 502 globbed = glob.glob(pat)
503 503 except re.error:
504 504 globbed = [pat]
505 505 if globbed:
506 506 ret.extend(globbed)
507 507 continue
508 508 ret.append(kindpat)
509 509 return ret
510 510
511 511 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
512 512 badfn=None):
513 513 '''Return a matcher and the patterns that were used.
514 514 The matcher will warn about bad matches, unless an alternate badfn callback
515 515 is provided.'''
516 516 if pats == ("",):
517 517 pats = []
518 518 if opts is None:
519 519 opts = {}
520 520 if not globbed and default == 'relpath':
521 521 pats = expandpats(pats or [])
522 522
523 523 def bad(f, msg):
524 524 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
525 525
526 526 if badfn is None:
527 527 badfn = bad
528 528
529 529 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
530 530 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
531 531
532 532 if m.always():
533 533 pats = []
534 534 return m, pats
535 535
536 536 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
537 537 badfn=None):
538 538 '''Return a matcher that will warn about bad matches.'''
539 539 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
540 540
541 541 def matchall(repo):
542 542 '''Return a matcher that will efficiently match everything.'''
543 543 return matchmod.always(repo.root, repo.getcwd())
544 544
545 545 def matchfiles(repo, files, badfn=None):
546 546 '''Return a matcher that will efficiently match exactly these files.'''
547 547 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
548 548
549 549 def origpath(ui, repo, filepath):
550 550 '''customize where .orig files are created
551 551
552 552 Fetch user defined path from config file: [ui] origbackuppath = <path>
553 553 Fall back to default (filepath) if not specified
554 554 '''
555 555 origbackuppath = ui.config('ui', 'origbackuppath')
556 556 if origbackuppath is None:
557 557 return filepath + ".orig"
558 558
559 559 filepathfromroot = os.path.relpath(filepath, start=repo.root)
560 560 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
561 561
562 562 origbackupdir = repo.vfs.dirname(fullorigpath)
563 563 if not repo.vfs.exists(origbackupdir):
564 564 ui.note(_('creating directory: %s\n') % origbackupdir)
565 565 util.makedirs(origbackupdir)
566 566
567 567 return fullorigpath + ".orig"
568 568
569 569 class _containsnode(object):
570 570 """proxy __contains__(node) to container.__contains__ which accepts revs"""
571 571
572 572 def __init__(self, repo, revcontainer):
573 573 self._torev = repo.changelog.rev
574 574 self._revcontains = revcontainer.__contains__
575 575
576 576 def __contains__(self, node):
577 577 return self._revcontains(self._torev(node))
578 578
579 def cleanupnodes(repo, mapping, operation):
579 def cleanupnodes(repo, replacements, operation):
580 580 """do common cleanups when old nodes are replaced by new nodes
581 581
582 582 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
583 583 (we might also want to move working directory parent in the future)
584 584
585 mapping is {oldnode: [newnode]} or a iterable of nodes if they do not have
586 replacements. operation is a string, like "rebase".
585 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
586 have replacements. operation is a string, like "rebase".
587 587 """
588 if not util.safehasattr(mapping, 'items'):
589 mapping = {n: () for n in mapping}
588 if not util.safehasattr(replacements, 'items'):
589 replacements = {n: () for n in replacements}
590 590
591 591 # Calculate bookmark movements
592 592 moves = {}
593 # Unfiltered repo is needed since nodes in mapping might be hidden.
593 # Unfiltered repo is needed since nodes in replacements might be hidden.
594 594 unfi = repo.unfiltered()
595 for oldnode, newnodes in mapping.items():
595 for oldnode, newnodes in replacements.items():
596 596 if len(newnodes) > 1:
597 597 # usually a split, take the one with biggest rev number
598 598 newnode = next(unfi.set('max(%ln)', newnodes)).node()
599 599 elif len(newnodes) == 0:
600 600 # move bookmark backwards
601 601 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
602 list(mapping)))
602 list(replacements)))
603 603 if roots:
604 604 newnode = roots[0].node()
605 605 else:
606 606 newnode = nullid
607 607 else:
608 608 newnode = newnodes[0]
609 609 moves[oldnode] = newnode
610 610
611 611 with repo.transaction('cleanup') as tr:
612 612 # Move bookmarks
613 613 bmarks = repo._bookmarks
614 614 bmarkchanges = []
615 allnewnodes = [n for ns in mapping.values() for n in ns]
615 allnewnodes = [n for ns in replacements.values() for n in ns]
616 616 for oldnode, newnode in moves.items():
617 617 oldbmarks = repo.nodebookmarks(oldnode)
618 618 if not oldbmarks:
619 619 continue
620 620 from . import bookmarks # avoid import cycle
621 621 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
622 622 (oldbmarks, hex(oldnode), hex(newnode)))
623 623 # Delete divergent bookmarks being parents of related newnodes
624 624 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
625 625 allnewnodes, newnode, oldnode)
626 626 deletenodes = _containsnode(repo, deleterevs)
627 627 for name in oldbmarks:
628 628 bmarkchanges.append((name, newnode))
629 629 for b in bookmarks.divergent2delete(repo, deletenodes, name):
630 630 bmarkchanges.append((b, None))
631 631
632 632 if bmarkchanges:
633 633 bmarks.applychanges(repo, tr, bmarkchanges)
634 634
635 635 # Obsolete or strip nodes
636 636 if obsolete.isenabled(repo, obsolete.createmarkersopt):
637 637 # If a node is already obsoleted, and we want to obsolete it
638 638 # without a successor, skip that obssolete request since it's
639 639 # unnecessary. That's the "if s or not isobs(n)" check below.
640 640 # Also sort the node in topology order, that might be useful for
641 641 # some obsstore logic.
642 642 # NOTE: the filtering and sorting might belong to createmarkers.
643 643 isobs = unfi.obsstore.successors.__contains__
644 644 torev = unfi.changelog.rev
645 645 sortfunc = lambda ns: torev(ns[0])
646 646 rels = [(unfi[n], tuple(unfi[m] for m in s))
647 for n, s in sorted(mapping.items(), key=sortfunc)
647 for n, s in sorted(replacements.items(), key=sortfunc)
648 648 if s or not isobs(n)]
649 649 obsolete.createmarkers(repo, rels, operation=operation)
650 650 else:
651 651 from . import repair # avoid import cycle
652 repair.delayedstrip(repo.ui, repo, list(mapping), operation)
652 repair.delayedstrip(repo.ui, repo, list(replacements), operation)
653 653
654 654 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
655 655 if opts is None:
656 656 opts = {}
657 657 m = matcher
658 658 if dry_run is None:
659 659 dry_run = opts.get('dry_run')
660 660 if similarity is None:
661 661 similarity = float(opts.get('similarity') or 0)
662 662
663 663 ret = 0
664 664 join = lambda f: os.path.join(prefix, f)
665 665
666 666 wctx = repo[None]
667 667 for subpath in sorted(wctx.substate):
668 668 submatch = matchmod.subdirmatcher(subpath, m)
669 669 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
670 670 sub = wctx.sub(subpath)
671 671 try:
672 672 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
673 673 ret = 1
674 674 except error.LookupError:
675 675 repo.ui.status(_("skipping missing subrepository: %s\n")
676 676 % join(subpath))
677 677
678 678 rejected = []
679 679 def badfn(f, msg):
680 680 if f in m.files():
681 681 m.bad(f, msg)
682 682 rejected.append(f)
683 683
684 684 badmatch = matchmod.badmatch(m, badfn)
685 685 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
686 686 badmatch)
687 687
688 688 unknownset = set(unknown + forgotten)
689 689 toprint = unknownset.copy()
690 690 toprint.update(deleted)
691 691 for abs in sorted(toprint):
692 692 if repo.ui.verbose or not m.exact(abs):
693 693 if abs in unknownset:
694 694 status = _('adding %s\n') % m.uipath(abs)
695 695 else:
696 696 status = _('removing %s\n') % m.uipath(abs)
697 697 repo.ui.status(status)
698 698
699 699 renames = _findrenames(repo, m, added + unknown, removed + deleted,
700 700 similarity)
701 701
702 702 if not dry_run:
703 703 _markchanges(repo, unknown + forgotten, deleted, renames)
704 704
705 705 for f in rejected:
706 706 if f in m.files():
707 707 return 1
708 708 return ret
709 709
710 710 def marktouched(repo, files, similarity=0.0):
711 711 '''Assert that files have somehow been operated upon. files are relative to
712 712 the repo root.'''
713 713 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
714 714 rejected = []
715 715
716 716 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
717 717
718 718 if repo.ui.verbose:
719 719 unknownset = set(unknown + forgotten)
720 720 toprint = unknownset.copy()
721 721 toprint.update(deleted)
722 722 for abs in sorted(toprint):
723 723 if abs in unknownset:
724 724 status = _('adding %s\n') % abs
725 725 else:
726 726 status = _('removing %s\n') % abs
727 727 repo.ui.status(status)
728 728
729 729 renames = _findrenames(repo, m, added + unknown, removed + deleted,
730 730 similarity)
731 731
732 732 _markchanges(repo, unknown + forgotten, deleted, renames)
733 733
734 734 for f in rejected:
735 735 if f in m.files():
736 736 return 1
737 737 return 0
738 738
739 739 def _interestingfiles(repo, matcher):
740 740 '''Walk dirstate with matcher, looking for files that addremove would care
741 741 about.
742 742
743 743 This is different from dirstate.status because it doesn't care about
744 744 whether files are modified or clean.'''
745 745 added, unknown, deleted, removed, forgotten = [], [], [], [], []
746 746 audit_path = pathutil.pathauditor(repo.root, cached=True)
747 747
748 748 ctx = repo[None]
749 749 dirstate = repo.dirstate
750 750 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
751 751 full=False)
752 752 for abs, st in walkresults.iteritems():
753 753 dstate = dirstate[abs]
754 754 if dstate == '?' and audit_path.check(abs):
755 755 unknown.append(abs)
756 756 elif dstate != 'r' and not st:
757 757 deleted.append(abs)
758 758 elif dstate == 'r' and st:
759 759 forgotten.append(abs)
760 760 # for finding renames
761 761 elif dstate == 'r' and not st:
762 762 removed.append(abs)
763 763 elif dstate == 'a':
764 764 added.append(abs)
765 765
766 766 return added, unknown, deleted, removed, forgotten
767 767
768 768 def _findrenames(repo, matcher, added, removed, similarity):
769 769 '''Find renames from removed files to added ones.'''
770 770 renames = {}
771 771 if similarity > 0:
772 772 for old, new, score in similar.findrenames(repo, added, removed,
773 773 similarity):
774 774 if (repo.ui.verbose or not matcher.exact(old)
775 775 or not matcher.exact(new)):
776 776 repo.ui.status(_('recording removal of %s as rename to %s '
777 777 '(%d%% similar)\n') %
778 778 (matcher.rel(old), matcher.rel(new),
779 779 score * 100))
780 780 renames[new] = old
781 781 return renames
782 782
783 783 def _markchanges(repo, unknown, deleted, renames):
784 784 '''Marks the files in unknown as added, the files in deleted as removed,
785 785 and the files in renames as copied.'''
786 786 wctx = repo[None]
787 787 with repo.wlock():
788 788 wctx.forget(deleted)
789 789 wctx.add(unknown)
790 790 for new, old in renames.iteritems():
791 791 wctx.copy(old, new)
792 792
793 793 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
794 794 """Update the dirstate to reflect the intent of copying src to dst. For
795 795 different reasons it might not end with dst being marked as copied from src.
796 796 """
797 797 origsrc = repo.dirstate.copied(src) or src
798 798 if dst == origsrc: # copying back a copy?
799 799 if repo.dirstate[dst] not in 'mn' and not dryrun:
800 800 repo.dirstate.normallookup(dst)
801 801 else:
802 802 if repo.dirstate[origsrc] == 'a' and origsrc == src:
803 803 if not ui.quiet:
804 804 ui.warn(_("%s has not been committed yet, so no copy "
805 805 "data will be stored for %s.\n")
806 806 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
807 807 if repo.dirstate[dst] in '?r' and not dryrun:
808 808 wctx.add([dst])
809 809 elif not dryrun:
810 810 wctx.copy(origsrc, dst)
811 811
812 812 def readrequires(opener, supported):
813 813 '''Reads and parses .hg/requires and checks if all entries found
814 814 are in the list of supported features.'''
815 815 requirements = set(opener.read("requires").splitlines())
816 816 missings = []
817 817 for r in requirements:
818 818 if r not in supported:
819 819 if not r or not r[0].isalnum():
820 820 raise error.RequirementError(_(".hg/requires file is corrupt"))
821 821 missings.append(r)
822 822 missings.sort()
823 823 if missings:
824 824 raise error.RequirementError(
825 825 _("repository requires features unknown to this Mercurial: %s")
826 826 % " ".join(missings),
827 827 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
828 828 " for more information"))
829 829 return requirements
830 830
831 831 def writerequires(opener, requirements):
832 832 with opener('requires', 'w') as fp:
833 833 for r in sorted(requirements):
834 834 fp.write("%s\n" % r)
835 835
836 836 class filecachesubentry(object):
837 837 def __init__(self, path, stat):
838 838 self.path = path
839 839 self.cachestat = None
840 840 self._cacheable = None
841 841
842 842 if stat:
843 843 self.cachestat = filecachesubentry.stat(self.path)
844 844
845 845 if self.cachestat:
846 846 self._cacheable = self.cachestat.cacheable()
847 847 else:
848 848 # None means we don't know yet
849 849 self._cacheable = None
850 850
851 851 def refresh(self):
852 852 if self.cacheable():
853 853 self.cachestat = filecachesubentry.stat(self.path)
854 854
855 855 def cacheable(self):
856 856 if self._cacheable is not None:
857 857 return self._cacheable
858 858
859 859 # we don't know yet, assume it is for now
860 860 return True
861 861
862 862 def changed(self):
863 863 # no point in going further if we can't cache it
864 864 if not self.cacheable():
865 865 return True
866 866
867 867 newstat = filecachesubentry.stat(self.path)
868 868
869 869 # we may not know if it's cacheable yet, check again now
870 870 if newstat and self._cacheable is None:
871 871 self._cacheable = newstat.cacheable()
872 872
873 873 # check again
874 874 if not self._cacheable:
875 875 return True
876 876
877 877 if self.cachestat != newstat:
878 878 self.cachestat = newstat
879 879 return True
880 880 else:
881 881 return False
882 882
883 883 @staticmethod
884 884 def stat(path):
885 885 try:
886 886 return util.cachestat(path)
887 887 except OSError as e:
888 888 if e.errno != errno.ENOENT:
889 889 raise
890 890
891 891 class filecacheentry(object):
892 892 def __init__(self, paths, stat=True):
893 893 self._entries = []
894 894 for path in paths:
895 895 self._entries.append(filecachesubentry(path, stat))
896 896
897 897 def changed(self):
898 898 '''true if any entry has changed'''
899 899 for entry in self._entries:
900 900 if entry.changed():
901 901 return True
902 902 return False
903 903
904 904 def refresh(self):
905 905 for entry in self._entries:
906 906 entry.refresh()
907 907
908 908 class filecache(object):
909 909 '''A property like decorator that tracks files under .hg/ for updates.
910 910
911 911 Records stat info when called in _filecache.
912 912
913 913 On subsequent calls, compares old stat info with new info, and recreates the
914 914 object when any of the files changes, updating the new stat info in
915 915 _filecache.
916 916
917 917 Mercurial either atomic renames or appends for files under .hg,
918 918 so to ensure the cache is reliable we need the filesystem to be able
919 919 to tell us if a file has been replaced. If it can't, we fallback to
920 920 recreating the object on every call (essentially the same behavior as
921 921 propertycache).
922 922
923 923 '''
924 924 def __init__(self, *paths):
925 925 self.paths = paths
926 926
927 927 def join(self, obj, fname):
928 928 """Used to compute the runtime path of a cached file.
929 929
930 930 Users should subclass filecache and provide their own version of this
931 931 function to call the appropriate join function on 'obj' (an instance
932 932 of the class that its member function was decorated).
933 933 """
934 934 raise NotImplementedError
935 935
936 936 def __call__(self, func):
937 937 self.func = func
938 938 self.name = func.__name__.encode('ascii')
939 939 return self
940 940
941 941 def __get__(self, obj, type=None):
942 942 # if accessed on the class, return the descriptor itself.
943 943 if obj is None:
944 944 return self
945 945 # do we need to check if the file changed?
946 946 if self.name in obj.__dict__:
947 947 assert self.name in obj._filecache, self.name
948 948 return obj.__dict__[self.name]
949 949
950 950 entry = obj._filecache.get(self.name)
951 951
952 952 if entry:
953 953 if entry.changed():
954 954 entry.obj = self.func(obj)
955 955 else:
956 956 paths = [self.join(obj, path) for path in self.paths]
957 957
958 958 # We stat -before- creating the object so our cache doesn't lie if
959 959 # a writer modified between the time we read and stat
960 960 entry = filecacheentry(paths, True)
961 961 entry.obj = self.func(obj)
962 962
963 963 obj._filecache[self.name] = entry
964 964
965 965 obj.__dict__[self.name] = entry.obj
966 966 return entry.obj
967 967
968 968 def __set__(self, obj, value):
969 969 if self.name not in obj._filecache:
970 970 # we add an entry for the missing value because X in __dict__
971 971 # implies X in _filecache
972 972 paths = [self.join(obj, path) for path in self.paths]
973 973 ce = filecacheentry(paths, False)
974 974 obj._filecache[self.name] = ce
975 975 else:
976 976 ce = obj._filecache[self.name]
977 977
978 978 ce.obj = value # update cached copy
979 979 obj.__dict__[self.name] = value # update copy returned by obj.x
980 980
981 981 def __delete__(self, obj):
982 982 try:
983 983 del obj.__dict__[self.name]
984 984 except KeyError:
985 985 raise AttributeError(self.name)
986 986
987 987 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
988 988 if lock is None:
989 989 raise error.LockInheritanceContractViolation(
990 990 'lock can only be inherited while held')
991 991 if environ is None:
992 992 environ = {}
993 993 with lock.inherit() as locker:
994 994 environ[envvar] = locker
995 995 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
996 996
997 997 def wlocksub(repo, cmd, *args, **kwargs):
998 998 """run cmd as a subprocess that allows inheriting repo's wlock
999 999
1000 1000 This can only be called while the wlock is held. This takes all the
1001 1001 arguments that ui.system does, and returns the exit code of the
1002 1002 subprocess."""
1003 1003 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1004 1004 **kwargs)
1005 1005
1006 1006 def gdinitconfig(ui):
1007 1007 """helper function to know if a repo should be created as general delta
1008 1008 """
1009 1009 # experimental config: format.generaldelta
1010 1010 return (ui.configbool('format', 'generaldelta')
1011 1011 or ui.configbool('format', 'usegeneraldelta'))
1012 1012
1013 1013 def gddeltaconfig(ui):
1014 1014 """helper function to know if incoming delta should be optimised
1015 1015 """
1016 1016 # experimental config: format.generaldelta
1017 1017 return ui.configbool('format', 'generaldelta')
1018 1018
1019 1019 class simplekeyvaluefile(object):
1020 1020 """A simple file with key=value lines
1021 1021
1022 1022 Keys must be alphanumerics and start with a letter, values must not
1023 1023 contain '\n' characters"""
1024 1024 firstlinekey = '__firstline'
1025 1025
1026 1026 def __init__(self, vfs, path, keys=None):
1027 1027 self.vfs = vfs
1028 1028 self.path = path
1029 1029
1030 1030 def read(self, firstlinenonkeyval=False):
1031 1031 """Read the contents of a simple key-value file
1032 1032
1033 1033 'firstlinenonkeyval' indicates whether the first line of file should
1034 1034 be treated as a key-value pair or reuturned fully under the
1035 1035 __firstline key."""
1036 1036 lines = self.vfs.readlines(self.path)
1037 1037 d = {}
1038 1038 if firstlinenonkeyval:
1039 1039 if not lines:
1040 1040 e = _("empty simplekeyvalue file")
1041 1041 raise error.CorruptedState(e)
1042 1042 # we don't want to include '\n' in the __firstline
1043 1043 d[self.firstlinekey] = lines[0][:-1]
1044 1044 del lines[0]
1045 1045
1046 1046 try:
1047 1047 # the 'if line.strip()' part prevents us from failing on empty
1048 1048 # lines which only contain '\n' therefore are not skipped
1049 1049 # by 'if line'
1050 1050 updatedict = dict(line[:-1].split('=', 1) for line in lines
1051 1051 if line.strip())
1052 1052 if self.firstlinekey in updatedict:
1053 1053 e = _("%r can't be used as a key")
1054 1054 raise error.CorruptedState(e % self.firstlinekey)
1055 1055 d.update(updatedict)
1056 1056 except ValueError as e:
1057 1057 raise error.CorruptedState(str(e))
1058 1058 return d
1059 1059
1060 1060 def write(self, data, firstline=None):
1061 1061 """Write key=>value mapping to a file
1062 1062 data is a dict. Keys must be alphanumerical and start with a letter.
1063 1063 Values must not contain newline characters.
1064 1064
1065 1065 If 'firstline' is not None, it is written to file before
1066 1066 everything else, as it is, not in a key=value form"""
1067 1067 lines = []
1068 1068 if firstline is not None:
1069 1069 lines.append('%s\n' % firstline)
1070 1070
1071 1071 for k, v in data.items():
1072 1072 if k == self.firstlinekey:
1073 1073 e = "key name '%s' is reserved" % self.firstlinekey
1074 1074 raise error.ProgrammingError(e)
1075 1075 if not k[0].isalpha():
1076 1076 e = "keys must start with a letter in a key-value file"
1077 1077 raise error.ProgrammingError(e)
1078 1078 if not k.isalnum():
1079 1079 e = "invalid key name in a simple key-value file"
1080 1080 raise error.ProgrammingError(e)
1081 1081 if '\n' in v:
1082 1082 e = "invalid value in a simple key-value file"
1083 1083 raise error.ProgrammingError(e)
1084 1084 lines.append("%s=%s\n" % (k, v))
1085 1085 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1086 1086 fp.write(''.join(lines))
1087 1087
1088 1088 _reportobsoletedsource = [
1089 1089 'debugobsolete',
1090 1090 'pull',
1091 1091 'push',
1092 1092 'serve',
1093 1093 'unbundle',
1094 1094 ]
1095 1095
1096 1096 def registersummarycallback(repo, otr, txnname=''):
1097 1097 """register a callback to issue a summary after the transaction is closed
1098 1098 """
1099 1099 for source in _reportobsoletedsource:
1100 1100 if txnname.startswith(source):
1101 1101 reporef = weakref.ref(repo)
1102 1102 def reportsummary(tr):
1103 1103 """the actual callback reporting the summary"""
1104 1104 repo = reporef()
1105 1105 obsoleted = obsutil.getobsoleted(repo, tr)
1106 1106 if obsoleted:
1107 1107 repo.ui.status(_('obsoleted %i changesets\n')
1108 1108 % len(obsoleted))
1109 1109 otr.addpostclose('00-txnreport', reportsummary)
1110 1110 break
General Comments 0
You need to be logged in to leave comments. Login now