##// END OF EJS Templates
extdata: use subprocess so we don't have to chdir() manually
Yuya Nishihara -
r34462:c67db5dc default
parent child Browse files
Show More
@@ -1,1194 +1,1196 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 import subprocess
16 17 import weakref
17 18
18 19 from .i18n import _
19 20 from .node import (
20 21 hex,
21 22 nullid,
22 23 short,
23 24 wdirid,
24 25 wdirrev,
25 26 )
26 27
27 28 from . import (
28 29 encoding,
29 30 error,
30 31 match as matchmod,
31 32 obsolete,
32 33 obsutil,
33 34 pathutil,
34 35 phases,
35 36 pycompat,
36 37 revsetlang,
37 38 similar,
38 39 url,
39 40 util,
40 41 )
41 42
42 43 if pycompat.osname == 'nt':
43 44 from . import scmwindows as scmplatform
44 45 else:
45 46 from . import scmposix as scmplatform
46 47
47 48 termsize = scmplatform.termsize
48 49
49 50 class status(tuple):
50 51 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
51 52 and 'ignored' properties are only relevant to the working copy.
52 53 '''
53 54
54 55 __slots__ = ()
55 56
56 57 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
57 58 clean):
58 59 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
59 60 ignored, clean))
60 61
61 62 @property
62 63 def modified(self):
63 64 '''files that have been modified'''
64 65 return self[0]
65 66
66 67 @property
67 68 def added(self):
68 69 '''files that have been added'''
69 70 return self[1]
70 71
71 72 @property
72 73 def removed(self):
73 74 '''files that have been removed'''
74 75 return self[2]
75 76
76 77 @property
77 78 def deleted(self):
78 79 '''files that are in the dirstate, but have been deleted from the
79 80 working copy (aka "missing")
80 81 '''
81 82 return self[3]
82 83
83 84 @property
84 85 def unknown(self):
85 86 '''files not in the dirstate that are not ignored'''
86 87 return self[4]
87 88
88 89 @property
89 90 def ignored(self):
90 91 '''files not in the dirstate that are ignored (by _dirignore())'''
91 92 return self[5]
92 93
93 94 @property
94 95 def clean(self):
95 96 '''files that have not been modified'''
96 97 return self[6]
97 98
98 99 def __repr__(self, *args, **kwargs):
99 100 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
100 101 'unknown=%r, ignored=%r, clean=%r>') % self)
101 102
102 103 def itersubrepos(ctx1, ctx2):
103 104 """find subrepos in ctx1 or ctx2"""
104 105 # Create a (subpath, ctx) mapping where we prefer subpaths from
105 106 # ctx1. The subpaths from ctx2 are important when the .hgsub file
106 107 # has been modified (in ctx2) but not yet committed (in ctx1).
107 108 subpaths = dict.fromkeys(ctx2.substate, ctx2)
108 109 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
109 110
110 111 missing = set()
111 112
112 113 for subpath in ctx2.substate:
113 114 if subpath not in ctx1.substate:
114 115 del subpaths[subpath]
115 116 missing.add(subpath)
116 117
117 118 for subpath, ctx in sorted(subpaths.iteritems()):
118 119 yield subpath, ctx.sub(subpath)
119 120
120 121 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
121 122 # status and diff will have an accurate result when it does
122 123 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
123 124 # against itself.
124 125 for subpath in missing:
125 126 yield subpath, ctx2.nullsub(subpath, ctx1)
126 127
127 128 def nochangesfound(ui, repo, excluded=None):
128 129 '''Report no changes for push/pull, excluded is None or a list of
129 130 nodes excluded from the push/pull.
130 131 '''
131 132 secretlist = []
132 133 if excluded:
133 134 for n in excluded:
134 135 ctx = repo[n]
135 136 if ctx.phase() >= phases.secret and not ctx.extinct():
136 137 secretlist.append(n)
137 138
138 139 if secretlist:
139 140 ui.status(_("no changes found (ignored %d secret changesets)\n")
140 141 % len(secretlist))
141 142 else:
142 143 ui.status(_("no changes found\n"))
143 144
144 145 def callcatch(ui, func):
145 146 """call func() with global exception handling
146 147
147 148 return func() if no exception happens. otherwise do some error handling
148 149 and return an exit code accordingly. does not handle all exceptions.
149 150 """
150 151 try:
151 152 try:
152 153 return func()
153 154 except: # re-raises
154 155 ui.traceback()
155 156 raise
156 157 # Global exception handling, alphabetically
157 158 # Mercurial-specific first, followed by built-in and library exceptions
158 159 except error.LockHeld as inst:
159 160 if inst.errno == errno.ETIMEDOUT:
160 161 reason = _('timed out waiting for lock held by %r') % inst.locker
161 162 else:
162 163 reason = _('lock held by %r') % inst.locker
163 164 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
164 165 if not inst.locker:
165 166 ui.warn(_("(lock might be very busy)\n"))
166 167 except error.LockUnavailable as inst:
167 168 ui.warn(_("abort: could not lock %s: %s\n") %
168 169 (inst.desc or inst.filename,
169 170 encoding.strtolocal(inst.strerror)))
170 171 except error.OutOfBandError as inst:
171 172 if inst.args:
172 173 msg = _("abort: remote error:\n")
173 174 else:
174 175 msg = _("abort: remote error\n")
175 176 ui.warn(msg)
176 177 if inst.args:
177 178 ui.warn(''.join(inst.args))
178 179 if inst.hint:
179 180 ui.warn('(%s)\n' % inst.hint)
180 181 except error.RepoError as inst:
181 182 ui.warn(_("abort: %s!\n") % inst)
182 183 if inst.hint:
183 184 ui.warn(_("(%s)\n") % inst.hint)
184 185 except error.ResponseError as inst:
185 186 ui.warn(_("abort: %s") % inst.args[0])
186 187 if not isinstance(inst.args[1], basestring):
187 188 ui.warn(" %r\n" % (inst.args[1],))
188 189 elif not inst.args[1]:
189 190 ui.warn(_(" empty string\n"))
190 191 else:
191 192 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
192 193 except error.CensoredNodeError as inst:
193 194 ui.warn(_("abort: file censored %s!\n") % inst)
194 195 except error.RevlogError as inst:
195 196 ui.warn(_("abort: %s!\n") % inst)
196 197 except error.InterventionRequired as inst:
197 198 ui.warn("%s\n" % inst)
198 199 if inst.hint:
199 200 ui.warn(_("(%s)\n") % inst.hint)
200 201 return 1
201 202 except error.WdirUnsupported:
202 203 ui.warn(_("abort: working directory revision cannot be specified\n"))
203 204 except error.Abort as inst:
204 205 ui.warn(_("abort: %s\n") % inst)
205 206 if inst.hint:
206 207 ui.warn(_("(%s)\n") % inst.hint)
207 208 except ImportError as inst:
208 209 ui.warn(_("abort: %s!\n") % inst)
209 210 m = str(inst).split()[-1]
210 211 if m in "mpatch bdiff".split():
211 212 ui.warn(_("(did you forget to compile extensions?)\n"))
212 213 elif m in "zlib".split():
213 214 ui.warn(_("(is your Python install correct?)\n"))
214 215 except IOError as inst:
215 216 if util.safehasattr(inst, "code"):
216 217 ui.warn(_("abort: %s\n") % inst)
217 218 elif util.safehasattr(inst, "reason"):
218 219 try: # usually it is in the form (errno, strerror)
219 220 reason = inst.reason.args[1]
220 221 except (AttributeError, IndexError):
221 222 # it might be anything, for example a string
222 223 reason = inst.reason
223 224 if isinstance(reason, unicode):
224 225 # SSLError of Python 2.7.9 contains a unicode
225 226 reason = encoding.unitolocal(reason)
226 227 ui.warn(_("abort: error: %s\n") % reason)
227 228 elif (util.safehasattr(inst, "args")
228 229 and inst.args and inst.args[0] == errno.EPIPE):
229 230 pass
230 231 elif getattr(inst, "strerror", None):
231 232 if getattr(inst, "filename", None):
232 233 ui.warn(_("abort: %s: %s\n") % (
233 234 encoding.strtolocal(inst.strerror), inst.filename))
234 235 else:
235 236 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
236 237 else:
237 238 raise
238 239 except OSError as inst:
239 240 if getattr(inst, "filename", None) is not None:
240 241 ui.warn(_("abort: %s: '%s'\n") % (
241 242 encoding.strtolocal(inst.strerror), inst.filename))
242 243 else:
243 244 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
244 245 except MemoryError:
245 246 ui.warn(_("abort: out of memory\n"))
246 247 except SystemExit as inst:
247 248 # Commands shouldn't sys.exit directly, but give a return code.
248 249 # Just in case catch this and and pass exit code to caller.
249 250 return inst.code
250 251 except socket.error as inst:
251 252 ui.warn(_("abort: %s\n") % inst.args[-1])
252 253
253 254 return -1
254 255
255 256 def checknewlabel(repo, lbl, kind):
256 257 # Do not use the "kind" parameter in ui output.
257 258 # It makes strings difficult to translate.
258 259 if lbl in ['tip', '.', 'null']:
259 260 raise error.Abort(_("the name '%s' is reserved") % lbl)
260 261 for c in (':', '\0', '\n', '\r'):
261 262 if c in lbl:
262 263 raise error.Abort(_("%r cannot be used in a name") % c)
263 264 try:
264 265 int(lbl)
265 266 raise error.Abort(_("cannot use an integer as a name"))
266 267 except ValueError:
267 268 pass
268 269
269 270 def checkfilename(f):
270 271 '''Check that the filename f is an acceptable filename for a tracked file'''
271 272 if '\r' in f or '\n' in f:
272 273 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
273 274
274 275 def checkportable(ui, f):
275 276 '''Check if filename f is portable and warn or abort depending on config'''
276 277 checkfilename(f)
277 278 abort, warn = checkportabilityalert(ui)
278 279 if abort or warn:
279 280 msg = util.checkwinfilename(f)
280 281 if msg:
281 282 msg = "%s: %s" % (msg, util.shellquote(f))
282 283 if abort:
283 284 raise error.Abort(msg)
284 285 ui.warn(_("warning: %s\n") % msg)
285 286
286 287 def checkportabilityalert(ui):
287 288 '''check if the user's config requests nothing, a warning, or abort for
288 289 non-portable filenames'''
289 290 val = ui.config('ui', 'portablefilenames')
290 291 lval = val.lower()
291 292 bval = util.parsebool(val)
292 293 abort = pycompat.osname == 'nt' or lval == 'abort'
293 294 warn = bval or lval == 'warn'
294 295 if bval is None and not (warn or abort or lval == 'ignore'):
295 296 raise error.ConfigError(
296 297 _("ui.portablefilenames value is invalid ('%s')") % val)
297 298 return abort, warn
298 299
299 300 class casecollisionauditor(object):
300 301 def __init__(self, ui, abort, dirstate):
301 302 self._ui = ui
302 303 self._abort = abort
303 304 allfiles = '\0'.join(dirstate._map)
304 305 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
305 306 self._dirstate = dirstate
306 307 # The purpose of _newfiles is so that we don't complain about
307 308 # case collisions if someone were to call this object with the
308 309 # same filename twice.
309 310 self._newfiles = set()
310 311
311 312 def __call__(self, f):
312 313 if f in self._newfiles:
313 314 return
314 315 fl = encoding.lower(f)
315 316 if fl in self._loweredfiles and f not in self._dirstate:
316 317 msg = _('possible case-folding collision for %s') % f
317 318 if self._abort:
318 319 raise error.Abort(msg)
319 320 self._ui.warn(_("warning: %s\n") % msg)
320 321 self._loweredfiles.add(fl)
321 322 self._newfiles.add(f)
322 323
323 324 def filteredhash(repo, maxrev):
324 325 """build hash of filtered revisions in the current repoview.
325 326
326 327 Multiple caches perform up-to-date validation by checking that the
327 328 tiprev and tipnode stored in the cache file match the current repository.
328 329 However, this is not sufficient for validating repoviews because the set
329 330 of revisions in the view may change without the repository tiprev and
330 331 tipnode changing.
331 332
332 333 This function hashes all the revs filtered from the view and returns
333 334 that SHA-1 digest.
334 335 """
335 336 cl = repo.changelog
336 337 if not cl.filteredrevs:
337 338 return None
338 339 key = None
339 340 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
340 341 if revs:
341 342 s = hashlib.sha1()
342 343 for rev in revs:
343 344 s.update('%d;' % rev)
344 345 key = s.digest()
345 346 return key
346 347
347 348 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
348 349 '''yield every hg repository under path, always recursively.
349 350 The recurse flag will only control recursion into repo working dirs'''
350 351 def errhandler(err):
351 352 if err.filename == path:
352 353 raise err
353 354 samestat = getattr(os.path, 'samestat', None)
354 355 if followsym and samestat is not None:
355 356 def adddir(dirlst, dirname):
356 357 match = False
357 358 dirstat = os.stat(dirname)
358 359 for lstdirstat in dirlst:
359 360 if samestat(dirstat, lstdirstat):
360 361 match = True
361 362 break
362 363 if not match:
363 364 dirlst.append(dirstat)
364 365 return not match
365 366 else:
366 367 followsym = False
367 368
368 369 if (seen_dirs is None) and followsym:
369 370 seen_dirs = []
370 371 adddir(seen_dirs, path)
371 372 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
372 373 dirs.sort()
373 374 if '.hg' in dirs:
374 375 yield root # found a repository
375 376 qroot = os.path.join(root, '.hg', 'patches')
376 377 if os.path.isdir(os.path.join(qroot, '.hg')):
377 378 yield qroot # we have a patch queue repo here
378 379 if recurse:
379 380 # avoid recursing inside the .hg directory
380 381 dirs.remove('.hg')
381 382 else:
382 383 dirs[:] = [] # don't descend further
383 384 elif followsym:
384 385 newdirs = []
385 386 for d in dirs:
386 387 fname = os.path.join(root, d)
387 388 if adddir(seen_dirs, fname):
388 389 if os.path.islink(fname):
389 390 for hgname in walkrepos(fname, True, seen_dirs):
390 391 yield hgname
391 392 else:
392 393 newdirs.append(d)
393 394 dirs[:] = newdirs
394 395
395 396 def binnode(ctx):
396 397 """Return binary node id for a given basectx"""
397 398 node = ctx.node()
398 399 if node is None:
399 400 return wdirid
400 401 return node
401 402
402 403 def intrev(ctx):
403 404 """Return integer for a given basectx that can be used in comparison or
404 405 arithmetic operation"""
405 406 rev = ctx.rev()
406 407 if rev is None:
407 408 return wdirrev
408 409 return rev
409 410
410 411 def formatchangeid(ctx):
411 412 """Format changectx as '{rev}:{node|formatnode}', which is the default
412 413 template provided by cmdutil.changeset_templater"""
413 414 repo = ctx.repo()
414 415 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
415 416
416 417 def formatrevnode(ui, rev, node):
417 418 """Format given revision and node depending on the current verbosity"""
418 419 if ui.debugflag:
419 420 hexfunc = hex
420 421 else:
421 422 hexfunc = short
422 423 return '%d:%s' % (rev, hexfunc(node))
423 424
424 425 def revsingle(repo, revspec, default='.', localalias=None):
425 426 if not revspec and revspec != 0:
426 427 return repo[default]
427 428
428 429 l = revrange(repo, [revspec], localalias=localalias)
429 430 if not l:
430 431 raise error.Abort(_('empty revision set'))
431 432 return repo[l.last()]
432 433
433 434 def _pairspec(revspec):
434 435 tree = revsetlang.parse(revspec)
435 436 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
436 437
437 438 def revpair(repo, revs):
438 439 if not revs:
439 440 return repo.dirstate.p1(), None
440 441
441 442 l = revrange(repo, revs)
442 443
443 444 if not l:
444 445 first = second = None
445 446 elif l.isascending():
446 447 first = l.min()
447 448 second = l.max()
448 449 elif l.isdescending():
449 450 first = l.max()
450 451 second = l.min()
451 452 else:
452 453 first = l.first()
453 454 second = l.last()
454 455
455 456 if first is None:
456 457 raise error.Abort(_('empty revision range'))
457 458 if (first == second and len(revs) >= 2
458 459 and not all(revrange(repo, [r]) for r in revs)):
459 460 raise error.Abort(_('empty revision on one side of range'))
460 461
461 462 # if top-level is range expression, the result must always be a pair
462 463 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
463 464 return repo.lookup(first), None
464 465
465 466 return repo.lookup(first), repo.lookup(second)
466 467
467 468 def revrange(repo, specs, localalias=None):
468 469 """Execute 1 to many revsets and return the union.
469 470
470 471 This is the preferred mechanism for executing revsets using user-specified
471 472 config options, such as revset aliases.
472 473
473 474 The revsets specified by ``specs`` will be executed via a chained ``OR``
474 475 expression. If ``specs`` is empty, an empty result is returned.
475 476
476 477 ``specs`` can contain integers, in which case they are assumed to be
477 478 revision numbers.
478 479
479 480 It is assumed the revsets are already formatted. If you have arguments
480 481 that need to be expanded in the revset, call ``revsetlang.formatspec()``
481 482 and pass the result as an element of ``specs``.
482 483
483 484 Specifying a single revset is allowed.
484 485
485 486 Returns a ``revset.abstractsmartset`` which is a list-like interface over
486 487 integer revisions.
487 488 """
488 489 allspecs = []
489 490 for spec in specs:
490 491 if isinstance(spec, int):
491 492 spec = revsetlang.formatspec('rev(%d)', spec)
492 493 allspecs.append(spec)
493 494 return repo.anyrevs(allspecs, user=True, localalias=localalias)
494 495
495 496 def meaningfulparents(repo, ctx):
496 497 """Return list of meaningful (or all if debug) parentrevs for rev.
497 498
498 499 For merges (two non-nullrev revisions) both parents are meaningful.
499 500 Otherwise the first parent revision is considered meaningful if it
500 501 is not the preceding revision.
501 502 """
502 503 parents = ctx.parents()
503 504 if len(parents) > 1:
504 505 return parents
505 506 if repo.ui.debugflag:
506 507 return [parents[0], repo['null']]
507 508 if parents[0].rev() >= intrev(ctx) - 1:
508 509 return []
509 510 return parents
510 511
511 512 def expandpats(pats):
512 513 '''Expand bare globs when running on windows.
513 514 On posix we assume it already has already been done by sh.'''
514 515 if not util.expandglobs:
515 516 return list(pats)
516 517 ret = []
517 518 for kindpat in pats:
518 519 kind, pat = matchmod._patsplit(kindpat, None)
519 520 if kind is None:
520 521 try:
521 522 globbed = glob.glob(pat)
522 523 except re.error:
523 524 globbed = [pat]
524 525 if globbed:
525 526 ret.extend(globbed)
526 527 continue
527 528 ret.append(kindpat)
528 529 return ret
529 530
530 531 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
531 532 badfn=None):
532 533 '''Return a matcher and the patterns that were used.
533 534 The matcher will warn about bad matches, unless an alternate badfn callback
534 535 is provided.'''
535 536 if pats == ("",):
536 537 pats = []
537 538 if opts is None:
538 539 opts = {}
539 540 if not globbed and default == 'relpath':
540 541 pats = expandpats(pats or [])
541 542
542 543 def bad(f, msg):
543 544 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
544 545
545 546 if badfn is None:
546 547 badfn = bad
547 548
548 549 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
549 550 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
550 551
551 552 if m.always():
552 553 pats = []
553 554 return m, pats
554 555
555 556 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
556 557 badfn=None):
557 558 '''Return a matcher that will warn about bad matches.'''
558 559 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
559 560
560 561 def matchall(repo):
561 562 '''Return a matcher that will efficiently match everything.'''
562 563 return matchmod.always(repo.root, repo.getcwd())
563 564
564 565 def matchfiles(repo, files, badfn=None):
565 566 '''Return a matcher that will efficiently match exactly these files.'''
566 567 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
567 568
568 569 def origpath(ui, repo, filepath):
569 570 '''customize where .orig files are created
570 571
571 572 Fetch user defined path from config file: [ui] origbackuppath = <path>
572 573 Fall back to default (filepath with .orig suffix) if not specified
573 574 '''
574 575 origbackuppath = ui.config('ui', 'origbackuppath')
575 576 if origbackuppath is None:
576 577 return filepath + ".orig"
577 578
578 579 filepathfromroot = os.path.relpath(filepath, start=repo.root)
579 580 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
580 581
581 582 origbackupdir = repo.vfs.dirname(fullorigpath)
582 583 if not repo.vfs.exists(origbackupdir):
583 584 ui.note(_('creating directory: %s\n') % origbackupdir)
584 585 util.makedirs(origbackupdir)
585 586
586 587 return fullorigpath
587 588
588 589 class _containsnode(object):
589 590 """proxy __contains__(node) to container.__contains__ which accepts revs"""
590 591
591 592 def __init__(self, repo, revcontainer):
592 593 self._torev = repo.changelog.rev
593 594 self._revcontains = revcontainer.__contains__
594 595
595 596 def __contains__(self, node):
596 597 return self._revcontains(self._torev(node))
597 598
598 599 def cleanupnodes(repo, replacements, operation, moves=None):
599 600 """do common cleanups when old nodes are replaced by new nodes
600 601
601 602 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
602 603 (we might also want to move working directory parent in the future)
603 604
604 605 By default, bookmark moves are calculated automatically from 'replacements',
605 606 but 'moves' can be used to override that. Also, 'moves' may include
606 607 additional bookmark moves that should not have associated obsmarkers.
607 608
608 609 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
609 610 have replacements. operation is a string, like "rebase".
610 611 """
611 612 if not replacements and not moves:
612 613 return
613 614
614 615 # translate mapping's other forms
615 616 if not util.safehasattr(replacements, 'items'):
616 617 replacements = {n: () for n in replacements}
617 618
618 619 # Calculate bookmark movements
619 620 if moves is None:
620 621 moves = {}
621 622 # Unfiltered repo is needed since nodes in replacements might be hidden.
622 623 unfi = repo.unfiltered()
623 624 for oldnode, newnodes in replacements.items():
624 625 if oldnode in moves:
625 626 continue
626 627 if len(newnodes) > 1:
627 628 # usually a split, take the one with biggest rev number
628 629 newnode = next(unfi.set('max(%ln)', newnodes)).node()
629 630 elif len(newnodes) == 0:
630 631 # move bookmark backwards
631 632 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
632 633 list(replacements)))
633 634 if roots:
634 635 newnode = roots[0].node()
635 636 else:
636 637 newnode = nullid
637 638 else:
638 639 newnode = newnodes[0]
639 640 moves[oldnode] = newnode
640 641
641 642 with repo.transaction('cleanup') as tr:
642 643 # Move bookmarks
643 644 bmarks = repo._bookmarks
644 645 bmarkchanges = []
645 646 allnewnodes = [n for ns in replacements.values() for n in ns]
646 647 for oldnode, newnode in moves.items():
647 648 oldbmarks = repo.nodebookmarks(oldnode)
648 649 if not oldbmarks:
649 650 continue
650 651 from . import bookmarks # avoid import cycle
651 652 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
652 653 (oldbmarks, hex(oldnode), hex(newnode)))
653 654 # Delete divergent bookmarks being parents of related newnodes
654 655 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
655 656 allnewnodes, newnode, oldnode)
656 657 deletenodes = _containsnode(repo, deleterevs)
657 658 for name in oldbmarks:
658 659 bmarkchanges.append((name, newnode))
659 660 for b in bookmarks.divergent2delete(repo, deletenodes, name):
660 661 bmarkchanges.append((b, None))
661 662
662 663 if bmarkchanges:
663 664 bmarks.applychanges(repo, tr, bmarkchanges)
664 665
665 666 # Obsolete or strip nodes
666 667 if obsolete.isenabled(repo, obsolete.createmarkersopt):
667 668 # If a node is already obsoleted, and we want to obsolete it
668 669 # without a successor, skip that obssolete request since it's
669 670 # unnecessary. That's the "if s or not isobs(n)" check below.
670 671 # Also sort the node in topology order, that might be useful for
671 672 # some obsstore logic.
672 673 # NOTE: the filtering and sorting might belong to createmarkers.
673 674 isobs = unfi.obsstore.successors.__contains__
674 675 torev = unfi.changelog.rev
675 676 sortfunc = lambda ns: torev(ns[0])
676 677 rels = [(unfi[n], tuple(unfi[m] for m in s))
677 678 for n, s in sorted(replacements.items(), key=sortfunc)
678 679 if s or not isobs(n)]
679 680 if rels:
680 681 obsolete.createmarkers(repo, rels, operation=operation)
681 682 else:
682 683 from . import repair # avoid import cycle
683 684 tostrip = list(replacements)
684 685 if tostrip:
685 686 repair.delayedstrip(repo.ui, repo, tostrip, operation)
686 687
687 688 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
688 689 if opts is None:
689 690 opts = {}
690 691 m = matcher
691 692 if dry_run is None:
692 693 dry_run = opts.get('dry_run')
693 694 if similarity is None:
694 695 similarity = float(opts.get('similarity') or 0)
695 696
696 697 ret = 0
697 698 join = lambda f: os.path.join(prefix, f)
698 699
699 700 wctx = repo[None]
700 701 for subpath in sorted(wctx.substate):
701 702 submatch = matchmod.subdirmatcher(subpath, m)
702 703 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
703 704 sub = wctx.sub(subpath)
704 705 try:
705 706 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
706 707 ret = 1
707 708 except error.LookupError:
708 709 repo.ui.status(_("skipping missing subrepository: %s\n")
709 710 % join(subpath))
710 711
711 712 rejected = []
712 713 def badfn(f, msg):
713 714 if f in m.files():
714 715 m.bad(f, msg)
715 716 rejected.append(f)
716 717
717 718 badmatch = matchmod.badmatch(m, badfn)
718 719 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
719 720 badmatch)
720 721
721 722 unknownset = set(unknown + forgotten)
722 723 toprint = unknownset.copy()
723 724 toprint.update(deleted)
724 725 for abs in sorted(toprint):
725 726 if repo.ui.verbose or not m.exact(abs):
726 727 if abs in unknownset:
727 728 status = _('adding %s\n') % m.uipath(abs)
728 729 else:
729 730 status = _('removing %s\n') % m.uipath(abs)
730 731 repo.ui.status(status)
731 732
732 733 renames = _findrenames(repo, m, added + unknown, removed + deleted,
733 734 similarity)
734 735
735 736 if not dry_run:
736 737 _markchanges(repo, unknown + forgotten, deleted, renames)
737 738
738 739 for f in rejected:
739 740 if f in m.files():
740 741 return 1
741 742 return ret
742 743
743 744 def marktouched(repo, files, similarity=0.0):
744 745 '''Assert that files have somehow been operated upon. files are relative to
745 746 the repo root.'''
746 747 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
747 748 rejected = []
748 749
749 750 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
750 751
751 752 if repo.ui.verbose:
752 753 unknownset = set(unknown + forgotten)
753 754 toprint = unknownset.copy()
754 755 toprint.update(deleted)
755 756 for abs in sorted(toprint):
756 757 if abs in unknownset:
757 758 status = _('adding %s\n') % abs
758 759 else:
759 760 status = _('removing %s\n') % abs
760 761 repo.ui.status(status)
761 762
762 763 renames = _findrenames(repo, m, added + unknown, removed + deleted,
763 764 similarity)
764 765
765 766 _markchanges(repo, unknown + forgotten, deleted, renames)
766 767
767 768 for f in rejected:
768 769 if f in m.files():
769 770 return 1
770 771 return 0
771 772
772 773 def _interestingfiles(repo, matcher):
773 774 '''Walk dirstate with matcher, looking for files that addremove would care
774 775 about.
775 776
776 777 This is different from dirstate.status because it doesn't care about
777 778 whether files are modified or clean.'''
778 779 added, unknown, deleted, removed, forgotten = [], [], [], [], []
779 780 audit_path = pathutil.pathauditor(repo.root, cached=True)
780 781
781 782 ctx = repo[None]
782 783 dirstate = repo.dirstate
783 784 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
784 785 unknown=True, ignored=False, full=False)
785 786 for abs, st in walkresults.iteritems():
786 787 dstate = dirstate[abs]
787 788 if dstate == '?' and audit_path.check(abs):
788 789 unknown.append(abs)
789 790 elif dstate != 'r' and not st:
790 791 deleted.append(abs)
791 792 elif dstate == 'r' and st:
792 793 forgotten.append(abs)
793 794 # for finding renames
794 795 elif dstate == 'r' and not st:
795 796 removed.append(abs)
796 797 elif dstate == 'a':
797 798 added.append(abs)
798 799
799 800 return added, unknown, deleted, removed, forgotten
800 801
801 802 def _findrenames(repo, matcher, added, removed, similarity):
802 803 '''Find renames from removed files to added ones.'''
803 804 renames = {}
804 805 if similarity > 0:
805 806 for old, new, score in similar.findrenames(repo, added, removed,
806 807 similarity):
807 808 if (repo.ui.verbose or not matcher.exact(old)
808 809 or not matcher.exact(new)):
809 810 repo.ui.status(_('recording removal of %s as rename to %s '
810 811 '(%d%% similar)\n') %
811 812 (matcher.rel(old), matcher.rel(new),
812 813 score * 100))
813 814 renames[new] = old
814 815 return renames
815 816
816 817 def _markchanges(repo, unknown, deleted, renames):
817 818 '''Marks the files in unknown as added, the files in deleted as removed,
818 819 and the files in renames as copied.'''
819 820 wctx = repo[None]
820 821 with repo.wlock():
821 822 wctx.forget(deleted)
822 823 wctx.add(unknown)
823 824 for new, old in renames.iteritems():
824 825 wctx.copy(old, new)
825 826
826 827 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
827 828 """Update the dirstate to reflect the intent of copying src to dst. For
828 829 different reasons it might not end with dst being marked as copied from src.
829 830 """
830 831 origsrc = repo.dirstate.copied(src) or src
831 832 if dst == origsrc: # copying back a copy?
832 833 if repo.dirstate[dst] not in 'mn' and not dryrun:
833 834 repo.dirstate.normallookup(dst)
834 835 else:
835 836 if repo.dirstate[origsrc] == 'a' and origsrc == src:
836 837 if not ui.quiet:
837 838 ui.warn(_("%s has not been committed yet, so no copy "
838 839 "data will be stored for %s.\n")
839 840 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
840 841 if repo.dirstate[dst] in '?r' and not dryrun:
841 842 wctx.add([dst])
842 843 elif not dryrun:
843 844 wctx.copy(origsrc, dst)
844 845
845 846 def readrequires(opener, supported):
846 847 '''Reads and parses .hg/requires and checks if all entries found
847 848 are in the list of supported features.'''
848 849 requirements = set(opener.read("requires").splitlines())
849 850 missings = []
850 851 for r in requirements:
851 852 if r not in supported:
852 853 if not r or not r[0].isalnum():
853 854 raise error.RequirementError(_(".hg/requires file is corrupt"))
854 855 missings.append(r)
855 856 missings.sort()
856 857 if missings:
857 858 raise error.RequirementError(
858 859 _("repository requires features unknown to this Mercurial: %s")
859 860 % " ".join(missings),
860 861 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
861 862 " for more information"))
862 863 return requirements
863 864
864 865 def writerequires(opener, requirements):
865 866 with opener('requires', 'w') as fp:
866 867 for r in sorted(requirements):
867 868 fp.write("%s\n" % r)
868 869
869 870 class filecachesubentry(object):
870 871 def __init__(self, path, stat):
871 872 self.path = path
872 873 self.cachestat = None
873 874 self._cacheable = None
874 875
875 876 if stat:
876 877 self.cachestat = filecachesubentry.stat(self.path)
877 878
878 879 if self.cachestat:
879 880 self._cacheable = self.cachestat.cacheable()
880 881 else:
881 882 # None means we don't know yet
882 883 self._cacheable = None
883 884
884 885 def refresh(self):
885 886 if self.cacheable():
886 887 self.cachestat = filecachesubentry.stat(self.path)
887 888
888 889 def cacheable(self):
889 890 if self._cacheable is not None:
890 891 return self._cacheable
891 892
892 893 # we don't know yet, assume it is for now
893 894 return True
894 895
895 896 def changed(self):
896 897 # no point in going further if we can't cache it
897 898 if not self.cacheable():
898 899 return True
899 900
900 901 newstat = filecachesubentry.stat(self.path)
901 902
902 903 # we may not know if it's cacheable yet, check again now
903 904 if newstat and self._cacheable is None:
904 905 self._cacheable = newstat.cacheable()
905 906
906 907 # check again
907 908 if not self._cacheable:
908 909 return True
909 910
910 911 if self.cachestat != newstat:
911 912 self.cachestat = newstat
912 913 return True
913 914 else:
914 915 return False
915 916
916 917 @staticmethod
917 918 def stat(path):
918 919 try:
919 920 return util.cachestat(path)
920 921 except OSError as e:
921 922 if e.errno != errno.ENOENT:
922 923 raise
923 924
924 925 class filecacheentry(object):
925 926 def __init__(self, paths, stat=True):
926 927 self._entries = []
927 928 for path in paths:
928 929 self._entries.append(filecachesubentry(path, stat))
929 930
930 931 def changed(self):
931 932 '''true if any entry has changed'''
932 933 for entry in self._entries:
933 934 if entry.changed():
934 935 return True
935 936 return False
936 937
937 938 def refresh(self):
938 939 for entry in self._entries:
939 940 entry.refresh()
940 941
941 942 class filecache(object):
942 943 '''A property like decorator that tracks files under .hg/ for updates.
943 944
944 945 Records stat info when called in _filecache.
945 946
946 947 On subsequent calls, compares old stat info with new info, and recreates the
947 948 object when any of the files changes, updating the new stat info in
948 949 _filecache.
949 950
950 951 Mercurial either atomic renames or appends for files under .hg,
951 952 so to ensure the cache is reliable we need the filesystem to be able
952 953 to tell us if a file has been replaced. If it can't, we fallback to
953 954 recreating the object on every call (essentially the same behavior as
954 955 propertycache).
955 956
956 957 '''
957 958 def __init__(self, *paths):
958 959 self.paths = paths
959 960
960 961 def join(self, obj, fname):
961 962 """Used to compute the runtime path of a cached file.
962 963
963 964 Users should subclass filecache and provide their own version of this
964 965 function to call the appropriate join function on 'obj' (an instance
965 966 of the class that its member function was decorated).
966 967 """
967 968 raise NotImplementedError
968 969
969 970 def __call__(self, func):
970 971 self.func = func
971 972 self.name = func.__name__.encode('ascii')
972 973 return self
973 974
974 975 def __get__(self, obj, type=None):
975 976 # if accessed on the class, return the descriptor itself.
976 977 if obj is None:
977 978 return self
978 979 # do we need to check if the file changed?
979 980 if self.name in obj.__dict__:
980 981 assert self.name in obj._filecache, self.name
981 982 return obj.__dict__[self.name]
982 983
983 984 entry = obj._filecache.get(self.name)
984 985
985 986 if entry:
986 987 if entry.changed():
987 988 entry.obj = self.func(obj)
988 989 else:
989 990 paths = [self.join(obj, path) for path in self.paths]
990 991
991 992 # We stat -before- creating the object so our cache doesn't lie if
992 993 # a writer modified between the time we read and stat
993 994 entry = filecacheentry(paths, True)
994 995 entry.obj = self.func(obj)
995 996
996 997 obj._filecache[self.name] = entry
997 998
998 999 obj.__dict__[self.name] = entry.obj
999 1000 return entry.obj
1000 1001
1001 1002 def __set__(self, obj, value):
1002 1003 if self.name not in obj._filecache:
1003 1004 # we add an entry for the missing value because X in __dict__
1004 1005 # implies X in _filecache
1005 1006 paths = [self.join(obj, path) for path in self.paths]
1006 1007 ce = filecacheentry(paths, False)
1007 1008 obj._filecache[self.name] = ce
1008 1009 else:
1009 1010 ce = obj._filecache[self.name]
1010 1011
1011 1012 ce.obj = value # update cached copy
1012 1013 obj.__dict__[self.name] = value # update copy returned by obj.x
1013 1014
1014 1015 def __delete__(self, obj):
1015 1016 try:
1016 1017 del obj.__dict__[self.name]
1017 1018 except KeyError:
1018 1019 raise AttributeError(self.name)
1019 1020
1020 1021 def extdatasource(repo, source):
1021 1022 """Gather a map of rev -> value dict from the specified source
1022 1023
1023 1024 A source spec is treated as a URL, with a special case shell: type
1024 1025 for parsing the output from a shell command.
1025 1026
1026 1027 The data is parsed as a series of newline-separated records where
1027 1028 each record is a revision specifier optionally followed by a space
1028 1029 and a freeform string value. If the revision is known locally, it
1029 1030 is converted to a rev, otherwise the record is skipped.
1030 1031
1031 1032 Note that both key and value are treated as UTF-8 and converted to
1032 1033 the local encoding. This allows uniformity between local and
1033 1034 remote data sources.
1034 1035 """
1035 1036
1036 1037 spec = repo.ui.config("extdata", source)
1037 1038 if not spec:
1038 1039 raise error.Abort(_("unknown extdata source '%s'") % source)
1039 1040
1040 1041 data = {}
1041 if spec.startswith("shell:"):
1042 # external commands should be run relative to the repo root
1043 cmd = spec[6:]
1044 cwd = os.getcwd()
1045 os.chdir(repo.root)
1046 try:
1047 src = util.popen(cmd)
1048 finally:
1049 os.chdir(cwd)
1050 else:
1051 # treat as a URL or file
1052 src = url.open(repo.ui, spec)
1053
1042 src = proc = None
1054 1043 try:
1044 if spec.startswith("shell:"):
1045 # external commands should be run relative to the repo root
1046 cmd = spec[6:]
1047 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1048 close_fds=util.closefds,
1049 stdout=subprocess.PIPE, cwd=repo.root)
1050 src = proc.stdout
1051 else:
1052 # treat as a URL or file
1053 src = url.open(repo.ui, spec)
1055 1054 for l in src:
1056 1055 if " " in l:
1057 1056 k, v = l.strip().split(" ", 1)
1058 1057 else:
1059 1058 k, v = l.strip(), ""
1060 1059
1061 1060 k = encoding.tolocal(k)
1062 1061 try:
1063 1062 data[repo[k].rev()] = encoding.tolocal(v)
1064 1063 except (error.LookupError, error.RepoLookupError):
1065 1064 pass # we ignore data for nodes that don't exist locally
1066 1065 finally:
1067 src.close()
1066 if proc:
1067 proc.communicate()
1068 if src:
1069 src.close()
1068 1070
1069 1071 return data
1070 1072
1071 1073 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1072 1074 if lock is None:
1073 1075 raise error.LockInheritanceContractViolation(
1074 1076 'lock can only be inherited while held')
1075 1077 if environ is None:
1076 1078 environ = {}
1077 1079 with lock.inherit() as locker:
1078 1080 environ[envvar] = locker
1079 1081 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1080 1082
1081 1083 def wlocksub(repo, cmd, *args, **kwargs):
1082 1084 """run cmd as a subprocess that allows inheriting repo's wlock
1083 1085
1084 1086 This can only be called while the wlock is held. This takes all the
1085 1087 arguments that ui.system does, and returns the exit code of the
1086 1088 subprocess."""
1087 1089 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1088 1090 **kwargs)
1089 1091
1090 1092 def gdinitconfig(ui):
1091 1093 """helper function to know if a repo should be created as general delta
1092 1094 """
1093 1095 # experimental config: format.generaldelta
1094 1096 return (ui.configbool('format', 'generaldelta')
1095 1097 or ui.configbool('format', 'usegeneraldelta'))
1096 1098
1097 1099 def gddeltaconfig(ui):
1098 1100 """helper function to know if incoming delta should be optimised
1099 1101 """
1100 1102 # experimental config: format.generaldelta
1101 1103 return ui.configbool('format', 'generaldelta')
1102 1104
1103 1105 class simplekeyvaluefile(object):
1104 1106 """A simple file with key=value lines
1105 1107
1106 1108 Keys must be alphanumerics and start with a letter, values must not
1107 1109 contain '\n' characters"""
1108 1110 firstlinekey = '__firstline'
1109 1111
1110 1112 def __init__(self, vfs, path, keys=None):
1111 1113 self.vfs = vfs
1112 1114 self.path = path
1113 1115
1114 1116 def read(self, firstlinenonkeyval=False):
1115 1117 """Read the contents of a simple key-value file
1116 1118
1117 1119 'firstlinenonkeyval' indicates whether the first line of file should
1118 1120 be treated as a key-value pair or reuturned fully under the
1119 1121 __firstline key."""
1120 1122 lines = self.vfs.readlines(self.path)
1121 1123 d = {}
1122 1124 if firstlinenonkeyval:
1123 1125 if not lines:
1124 1126 e = _("empty simplekeyvalue file")
1125 1127 raise error.CorruptedState(e)
1126 1128 # we don't want to include '\n' in the __firstline
1127 1129 d[self.firstlinekey] = lines[0][:-1]
1128 1130 del lines[0]
1129 1131
1130 1132 try:
1131 1133 # the 'if line.strip()' part prevents us from failing on empty
1132 1134 # lines which only contain '\n' therefore are not skipped
1133 1135 # by 'if line'
1134 1136 updatedict = dict(line[:-1].split('=', 1) for line in lines
1135 1137 if line.strip())
1136 1138 if self.firstlinekey in updatedict:
1137 1139 e = _("%r can't be used as a key")
1138 1140 raise error.CorruptedState(e % self.firstlinekey)
1139 1141 d.update(updatedict)
1140 1142 except ValueError as e:
1141 1143 raise error.CorruptedState(str(e))
1142 1144 return d
1143 1145
1144 1146 def write(self, data, firstline=None):
1145 1147 """Write key=>value mapping to a file
1146 1148 data is a dict. Keys must be alphanumerical and start with a letter.
1147 1149 Values must not contain newline characters.
1148 1150
1149 1151 If 'firstline' is not None, it is written to file before
1150 1152 everything else, as it is, not in a key=value form"""
1151 1153 lines = []
1152 1154 if firstline is not None:
1153 1155 lines.append('%s\n' % firstline)
1154 1156
1155 1157 for k, v in data.items():
1156 1158 if k == self.firstlinekey:
1157 1159 e = "key name '%s' is reserved" % self.firstlinekey
1158 1160 raise error.ProgrammingError(e)
1159 1161 if not k[0].isalpha():
1160 1162 e = "keys must start with a letter in a key-value file"
1161 1163 raise error.ProgrammingError(e)
1162 1164 if not k.isalnum():
1163 1165 e = "invalid key name in a simple key-value file"
1164 1166 raise error.ProgrammingError(e)
1165 1167 if '\n' in v:
1166 1168 e = "invalid value in a simple key-value file"
1167 1169 raise error.ProgrammingError(e)
1168 1170 lines.append("%s=%s\n" % (k, v))
1169 1171 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1170 1172 fp.write(''.join(lines))
1171 1173
1172 1174 _reportobsoletedsource = [
1173 1175 'debugobsolete',
1174 1176 'pull',
1175 1177 'push',
1176 1178 'serve',
1177 1179 'unbundle',
1178 1180 ]
1179 1181
1180 1182 def registersummarycallback(repo, otr, txnname=''):
1181 1183 """register a callback to issue a summary after the transaction is closed
1182 1184 """
1183 1185 for source in _reportobsoletedsource:
1184 1186 if txnname.startswith(source):
1185 1187 reporef = weakref.ref(repo)
1186 1188 def reportsummary(tr):
1187 1189 """the actual callback reporting the summary"""
1188 1190 repo = reporef()
1189 1191 obsoleted = obsutil.getobsoleted(repo, tr)
1190 1192 if obsoleted:
1191 1193 repo.ui.status(_('obsoleted %i changesets\n')
1192 1194 % len(obsoleted))
1193 1195 otr.addpostclose('00-txnreport', reportsummary)
1194 1196 break
General Comments 0
You need to be logged in to leave comments. Login now