##// END OF EJS Templates
revsymbol: stop delegating to repo.__getitem__ for unhandled symbols (API)...
Martin von Zweigbergk -
r37549:6639ac97 default
parent child Browse files
Show More
@@ -1,1543 +1,1543 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28
29 29 from . import (
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 revsetlang,
39 39 similar,
40 40 url,
41 41 util,
42 42 vfs,
43 43 )
44 44
45 45 from .utils import (
46 46 procutil,
47 47 stringutil,
48 48 )
49 49
50 50 if pycompat.iswindows:
51 51 from . import scmwindows as scmplatform
52 52 else:
53 53 from . import scmposix as scmplatform
54 54
55 55 termsize = scmplatform.termsize
56 56
57 57 class status(tuple):
58 58 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
59 59 and 'ignored' properties are only relevant to the working copy.
60 60 '''
61 61
62 62 __slots__ = ()
63 63
64 64 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
65 65 clean):
66 66 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
67 67 ignored, clean))
68 68
69 69 @property
70 70 def modified(self):
71 71 '''files that have been modified'''
72 72 return self[0]
73 73
74 74 @property
75 75 def added(self):
76 76 '''files that have been added'''
77 77 return self[1]
78 78
79 79 @property
80 80 def removed(self):
81 81 '''files that have been removed'''
82 82 return self[2]
83 83
84 84 @property
85 85 def deleted(self):
86 86 '''files that are in the dirstate, but have been deleted from the
87 87 working copy (aka "missing")
88 88 '''
89 89 return self[3]
90 90
91 91 @property
92 92 def unknown(self):
93 93 '''files not in the dirstate that are not ignored'''
94 94 return self[4]
95 95
96 96 @property
97 97 def ignored(self):
98 98 '''files not in the dirstate that are ignored (by _dirignore())'''
99 99 return self[5]
100 100
101 101 @property
102 102 def clean(self):
103 103 '''files that have not been modified'''
104 104 return self[6]
105 105
106 106 def __repr__(self, *args, **kwargs):
107 107 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
108 108 'unknown=%r, ignored=%r, clean=%r>') % self)
109 109
110 110 def itersubrepos(ctx1, ctx2):
111 111 """find subrepos in ctx1 or ctx2"""
112 112 # Create a (subpath, ctx) mapping where we prefer subpaths from
113 113 # ctx1. The subpaths from ctx2 are important when the .hgsub file
114 114 # has been modified (in ctx2) but not yet committed (in ctx1).
115 115 subpaths = dict.fromkeys(ctx2.substate, ctx2)
116 116 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
117 117
118 118 missing = set()
119 119
120 120 for subpath in ctx2.substate:
121 121 if subpath not in ctx1.substate:
122 122 del subpaths[subpath]
123 123 missing.add(subpath)
124 124
125 125 for subpath, ctx in sorted(subpaths.iteritems()):
126 126 yield subpath, ctx.sub(subpath)
127 127
128 128 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
129 129 # status and diff will have an accurate result when it does
130 130 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
131 131 # against itself.
132 132 for subpath in missing:
133 133 yield subpath, ctx2.nullsub(subpath, ctx1)
134 134
135 135 def nochangesfound(ui, repo, excluded=None):
136 136 '''Report no changes for push/pull, excluded is None or a list of
137 137 nodes excluded from the push/pull.
138 138 '''
139 139 secretlist = []
140 140 if excluded:
141 141 for n in excluded:
142 142 ctx = repo[n]
143 143 if ctx.phase() >= phases.secret and not ctx.extinct():
144 144 secretlist.append(n)
145 145
146 146 if secretlist:
147 147 ui.status(_("no changes found (ignored %d secret changesets)\n")
148 148 % len(secretlist))
149 149 else:
150 150 ui.status(_("no changes found\n"))
151 151
152 152 def callcatch(ui, func):
153 153 """call func() with global exception handling
154 154
155 155 return func() if no exception happens. otherwise do some error handling
156 156 and return an exit code accordingly. does not handle all exceptions.
157 157 """
158 158 try:
159 159 try:
160 160 return func()
161 161 except: # re-raises
162 162 ui.traceback()
163 163 raise
164 164 # Global exception handling, alphabetically
165 165 # Mercurial-specific first, followed by built-in and library exceptions
166 166 except error.LockHeld as inst:
167 167 if inst.errno == errno.ETIMEDOUT:
168 168 reason = _('timed out waiting for lock held by %r') % inst.locker
169 169 else:
170 170 reason = _('lock held by %r') % inst.locker
171 171 ui.warn(_("abort: %s: %s\n")
172 172 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
173 173 if not inst.locker:
174 174 ui.warn(_("(lock might be very busy)\n"))
175 175 except error.LockUnavailable as inst:
176 176 ui.warn(_("abort: could not lock %s: %s\n") %
177 177 (inst.desc or stringutil.forcebytestr(inst.filename),
178 178 encoding.strtolocal(inst.strerror)))
179 179 except error.OutOfBandError as inst:
180 180 if inst.args:
181 181 msg = _("abort: remote error:\n")
182 182 else:
183 183 msg = _("abort: remote error\n")
184 184 ui.warn(msg)
185 185 if inst.args:
186 186 ui.warn(''.join(inst.args))
187 187 if inst.hint:
188 188 ui.warn('(%s)\n' % inst.hint)
189 189 except error.RepoError as inst:
190 190 ui.warn(_("abort: %s!\n") % inst)
191 191 if inst.hint:
192 192 ui.warn(_("(%s)\n") % inst.hint)
193 193 except error.ResponseError as inst:
194 194 ui.warn(_("abort: %s") % inst.args[0])
195 195 msg = inst.args[1]
196 196 if isinstance(msg, type(u'')):
197 197 msg = pycompat.sysbytes(msg)
198 198 if not isinstance(msg, bytes):
199 199 ui.warn(" %r\n" % (msg,))
200 200 elif not msg:
201 201 ui.warn(_(" empty string\n"))
202 202 else:
203 203 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
204 204 except error.CensoredNodeError as inst:
205 205 ui.warn(_("abort: file censored %s!\n") % inst)
206 206 except error.RevlogError as inst:
207 207 ui.warn(_("abort: %s!\n") % inst)
208 208 except error.InterventionRequired as inst:
209 209 ui.warn("%s\n" % inst)
210 210 if inst.hint:
211 211 ui.warn(_("(%s)\n") % inst.hint)
212 212 return 1
213 213 except error.WdirUnsupported:
214 214 ui.warn(_("abort: working directory revision cannot be specified\n"))
215 215 except error.Abort as inst:
216 216 ui.warn(_("abort: %s\n") % inst)
217 217 if inst.hint:
218 218 ui.warn(_("(%s)\n") % inst.hint)
219 219 except ImportError as inst:
220 220 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
221 221 m = stringutil.forcebytestr(inst).split()[-1]
222 222 if m in "mpatch bdiff".split():
223 223 ui.warn(_("(did you forget to compile extensions?)\n"))
224 224 elif m in "zlib".split():
225 225 ui.warn(_("(is your Python install correct?)\n"))
226 226 except IOError as inst:
227 227 if util.safehasattr(inst, "code"):
228 228 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
229 229 elif util.safehasattr(inst, "reason"):
230 230 try: # usually it is in the form (errno, strerror)
231 231 reason = inst.reason.args[1]
232 232 except (AttributeError, IndexError):
233 233 # it might be anything, for example a string
234 234 reason = inst.reason
235 235 if isinstance(reason, unicode):
236 236 # SSLError of Python 2.7.9 contains a unicode
237 237 reason = encoding.unitolocal(reason)
238 238 ui.warn(_("abort: error: %s\n") % reason)
239 239 elif (util.safehasattr(inst, "args")
240 240 and inst.args and inst.args[0] == errno.EPIPE):
241 241 pass
242 242 elif getattr(inst, "strerror", None):
243 243 if getattr(inst, "filename", None):
244 244 ui.warn(_("abort: %s: %s\n") % (
245 245 encoding.strtolocal(inst.strerror),
246 246 stringutil.forcebytestr(inst.filename)))
247 247 else:
248 248 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
249 249 else:
250 250 raise
251 251 except OSError as inst:
252 252 if getattr(inst, "filename", None) is not None:
253 253 ui.warn(_("abort: %s: '%s'\n") % (
254 254 encoding.strtolocal(inst.strerror),
255 255 stringutil.forcebytestr(inst.filename)))
256 256 else:
257 257 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 258 except MemoryError:
259 259 ui.warn(_("abort: out of memory\n"))
260 260 except SystemExit as inst:
261 261 # Commands shouldn't sys.exit directly, but give a return code.
262 262 # Just in case catch this and and pass exit code to caller.
263 263 return inst.code
264 264 except socket.error as inst:
265 265 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
266 266
267 267 return -1
268 268
269 269 def checknewlabel(repo, lbl, kind):
270 270 # Do not use the "kind" parameter in ui output.
271 271 # It makes strings difficult to translate.
272 272 if lbl in ['tip', '.', 'null']:
273 273 raise error.Abort(_("the name '%s' is reserved") % lbl)
274 274 for c in (':', '\0', '\n', '\r'):
275 275 if c in lbl:
276 276 raise error.Abort(
277 277 _("%r cannot be used in a name") % pycompat.bytestr(c))
278 278 try:
279 279 int(lbl)
280 280 raise error.Abort(_("cannot use an integer as a name"))
281 281 except ValueError:
282 282 pass
283 283 if lbl.strip() != lbl:
284 284 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
285 285
286 286 def checkfilename(f):
287 287 '''Check that the filename f is an acceptable filename for a tracked file'''
288 288 if '\r' in f or '\n' in f:
289 289 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
290 290
291 291 def checkportable(ui, f):
292 292 '''Check if filename f is portable and warn or abort depending on config'''
293 293 checkfilename(f)
294 294 abort, warn = checkportabilityalert(ui)
295 295 if abort or warn:
296 296 msg = util.checkwinfilename(f)
297 297 if msg:
298 298 msg = "%s: %s" % (msg, procutil.shellquote(f))
299 299 if abort:
300 300 raise error.Abort(msg)
301 301 ui.warn(_("warning: %s\n") % msg)
302 302
303 303 def checkportabilityalert(ui):
304 304 '''check if the user's config requests nothing, a warning, or abort for
305 305 non-portable filenames'''
306 306 val = ui.config('ui', 'portablefilenames')
307 307 lval = val.lower()
308 308 bval = stringutil.parsebool(val)
309 309 abort = pycompat.iswindows or lval == 'abort'
310 310 warn = bval or lval == 'warn'
311 311 if bval is None and not (warn or abort or lval == 'ignore'):
312 312 raise error.ConfigError(
313 313 _("ui.portablefilenames value is invalid ('%s')") % val)
314 314 return abort, warn
315 315
316 316 class casecollisionauditor(object):
317 317 def __init__(self, ui, abort, dirstate):
318 318 self._ui = ui
319 319 self._abort = abort
320 320 allfiles = '\0'.join(dirstate._map)
321 321 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
322 322 self._dirstate = dirstate
323 323 # The purpose of _newfiles is so that we don't complain about
324 324 # case collisions if someone were to call this object with the
325 325 # same filename twice.
326 326 self._newfiles = set()
327 327
328 328 def __call__(self, f):
329 329 if f in self._newfiles:
330 330 return
331 331 fl = encoding.lower(f)
332 332 if fl in self._loweredfiles and f not in self._dirstate:
333 333 msg = _('possible case-folding collision for %s') % f
334 334 if self._abort:
335 335 raise error.Abort(msg)
336 336 self._ui.warn(_("warning: %s\n") % msg)
337 337 self._loweredfiles.add(fl)
338 338 self._newfiles.add(f)
339 339
340 340 def filteredhash(repo, maxrev):
341 341 """build hash of filtered revisions in the current repoview.
342 342
343 343 Multiple caches perform up-to-date validation by checking that the
344 344 tiprev and tipnode stored in the cache file match the current repository.
345 345 However, this is not sufficient for validating repoviews because the set
346 346 of revisions in the view may change without the repository tiprev and
347 347 tipnode changing.
348 348
349 349 This function hashes all the revs filtered from the view and returns
350 350 that SHA-1 digest.
351 351 """
352 352 cl = repo.changelog
353 353 if not cl.filteredrevs:
354 354 return None
355 355 key = None
356 356 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
357 357 if revs:
358 358 s = hashlib.sha1()
359 359 for rev in revs:
360 360 s.update('%d;' % rev)
361 361 key = s.digest()
362 362 return key
363 363
364 364 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
365 365 '''yield every hg repository under path, always recursively.
366 366 The recurse flag will only control recursion into repo working dirs'''
367 367 def errhandler(err):
368 368 if err.filename == path:
369 369 raise err
370 370 samestat = getattr(os.path, 'samestat', None)
371 371 if followsym and samestat is not None:
372 372 def adddir(dirlst, dirname):
373 373 dirstat = os.stat(dirname)
374 374 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
375 375 if not match:
376 376 dirlst.append(dirstat)
377 377 return not match
378 378 else:
379 379 followsym = False
380 380
381 381 if (seen_dirs is None) and followsym:
382 382 seen_dirs = []
383 383 adddir(seen_dirs, path)
384 384 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
385 385 dirs.sort()
386 386 if '.hg' in dirs:
387 387 yield root # found a repository
388 388 qroot = os.path.join(root, '.hg', 'patches')
389 389 if os.path.isdir(os.path.join(qroot, '.hg')):
390 390 yield qroot # we have a patch queue repo here
391 391 if recurse:
392 392 # avoid recursing inside the .hg directory
393 393 dirs.remove('.hg')
394 394 else:
395 395 dirs[:] = [] # don't descend further
396 396 elif followsym:
397 397 newdirs = []
398 398 for d in dirs:
399 399 fname = os.path.join(root, d)
400 400 if adddir(seen_dirs, fname):
401 401 if os.path.islink(fname):
402 402 for hgname in walkrepos(fname, True, seen_dirs):
403 403 yield hgname
404 404 else:
405 405 newdirs.append(d)
406 406 dirs[:] = newdirs
407 407
408 408 def binnode(ctx):
409 409 """Return binary node id for a given basectx"""
410 410 node = ctx.node()
411 411 if node is None:
412 412 return wdirid
413 413 return node
414 414
415 415 def intrev(ctx):
416 416 """Return integer for a given basectx that can be used in comparison or
417 417 arithmetic operation"""
418 418 rev = ctx.rev()
419 419 if rev is None:
420 420 return wdirrev
421 421 return rev
422 422
423 423 def formatchangeid(ctx):
424 424 """Format changectx as '{rev}:{node|formatnode}', which is the default
425 425 template provided by logcmdutil.changesettemplater"""
426 426 repo = ctx.repo()
427 427 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
428 428
429 429 def formatrevnode(ui, rev, node):
430 430 """Format given revision and node depending on the current verbosity"""
431 431 if ui.debugflag:
432 432 hexfunc = hex
433 433 else:
434 434 hexfunc = short
435 435 return '%d:%s' % (rev, hexfunc(node))
436 436
437 437 def resolvepartialhexnodeid(repo, prefix):
438 438 # Uses unfiltered repo because it's faster when then prefix is ambiguous/
439 439 # This matches the "shortest" template function.
440 440 node = repo.unfiltered().changelog._partialmatch(prefix)
441 441 if node is None:
442 442 return
443 443 repo.changelog.rev(node) # make sure node isn't filtered
444 444 return node
445 445
446 446 def isrevsymbol(repo, symbol):
447 447 try:
448 448 revsymbol(repo, symbol)
449 449 return True
450 450 except error.RepoLookupError:
451 451 return False
452 452
453 453 def revsymbol(repo, symbol):
454 454 """Returns a context given a single revision symbol (as string).
455 455
456 456 This is similar to revsingle(), but accepts only a single revision symbol,
457 457 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
458 458 not "max(public())".
459 459 """
460 460 if not isinstance(symbol, bytes):
461 461 msg = ("symbol (%s of type %s) was not a string, did you mean "
462 462 "repo[symbol]?" % (symbol, type(symbol)))
463 463 raise error.ProgrammingError(msg)
464 464 try:
465 465 if symbol in ('.', 'tip', 'null'):
466 466 return repo[symbol]
467 467
468 468 try:
469 469 r = int(symbol)
470 470 if '%d' % r != symbol:
471 471 raise ValueError
472 472 l = len(repo.changelog)
473 473 if r < 0:
474 474 r += l
475 475 if r < 0 or r >= l and r != wdirrev:
476 476 raise ValueError
477 477 return repo[r]
478 478 except error.FilteredIndexError:
479 479 raise
480 480 except (ValueError, OverflowError, IndexError):
481 481 pass
482 482
483 483 if len(symbol) == 40:
484 484 try:
485 485 node = bin(symbol)
486 486 rev = repo.changelog.rev(node)
487 487 return repo[rev]
488 488 except error.FilteredLookupError:
489 489 raise
490 490 except (TypeError, LookupError):
491 491 pass
492 492
493 493 # look up bookmarks through the name interface
494 494 try:
495 495 node = repo.names.singlenode(repo, symbol)
496 496 rev = repo.changelog.rev(node)
497 497 return repo[rev]
498 498 except KeyError:
499 499 pass
500 500
501 501 node = repo.unfiltered().changelog._partialmatch(symbol)
502 502 if node is not None:
503 503 rev = repo.changelog.rev(node)
504 504 return repo[rev]
505 505
506 return repo[symbol]
506 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
507 507
508 508 except error.WdirUnsupported:
509 509 return repo[None]
510 510 except (error.FilteredIndexError, error.FilteredLookupError,
511 511 error.FilteredRepoLookupError):
512 512 raise _filterederror(repo, symbol)
513 513
514 514 def _filterederror(repo, changeid):
515 515 """build an exception to be raised about a filtered changeid
516 516
517 517 This is extracted in a function to help extensions (eg: evolve) to
518 518 experiment with various message variants."""
519 519 if repo.filtername.startswith('visible'):
520 520
521 521 # Check if the changeset is obsolete
522 522 unfilteredrepo = repo.unfiltered()
523 523 ctx = revsymbol(unfilteredrepo, changeid)
524 524
525 525 # If the changeset is obsolete, enrich the message with the reason
526 526 # that made this changeset not visible
527 527 if ctx.obsolete():
528 528 msg = obsutil._getfilteredreason(repo, changeid, ctx)
529 529 else:
530 530 msg = _("hidden revision '%s'") % changeid
531 531
532 532 hint = _('use --hidden to access hidden revisions')
533 533
534 534 return error.FilteredRepoLookupError(msg, hint=hint)
535 535 msg = _("filtered revision '%s' (not in '%s' subset)")
536 536 msg %= (changeid, repo.filtername)
537 537 return error.FilteredRepoLookupError(msg)
538 538
539 539 def revsingle(repo, revspec, default='.', localalias=None):
540 540 if not revspec and revspec != 0:
541 541 return repo[default]
542 542
543 543 l = revrange(repo, [revspec], localalias=localalias)
544 544 if not l:
545 545 raise error.Abort(_('empty revision set'))
546 546 return repo[l.last()]
547 547
548 548 def _pairspec(revspec):
549 549 tree = revsetlang.parse(revspec)
550 550 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
551 551
552 552 def revpairnodes(repo, revs):
553 553 repo.ui.deprecwarn("revpairnodes is deprecated, please use revpair", "4.6")
554 554 ctx1, ctx2 = revpair(repo, revs)
555 555 return ctx1.node(), ctx2.node()
556 556
557 557 def revpair(repo, revs):
558 558 if not revs:
559 559 return repo['.'], repo[None]
560 560
561 561 l = revrange(repo, revs)
562 562
563 563 if not l:
564 564 first = second = None
565 565 elif l.isascending():
566 566 first = l.min()
567 567 second = l.max()
568 568 elif l.isdescending():
569 569 first = l.max()
570 570 second = l.min()
571 571 else:
572 572 first = l.first()
573 573 second = l.last()
574 574
575 575 if first is None:
576 576 raise error.Abort(_('empty revision range'))
577 577 if (first == second and len(revs) >= 2
578 578 and not all(revrange(repo, [r]) for r in revs)):
579 579 raise error.Abort(_('empty revision on one side of range'))
580 580
581 581 # if top-level is range expression, the result must always be a pair
582 582 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
583 583 return repo[first], repo[None]
584 584
585 585 return repo[first], repo[second]
586 586
587 587 def revrange(repo, specs, localalias=None):
588 588 """Execute 1 to many revsets and return the union.
589 589
590 590 This is the preferred mechanism for executing revsets using user-specified
591 591 config options, such as revset aliases.
592 592
593 593 The revsets specified by ``specs`` will be executed via a chained ``OR``
594 594 expression. If ``specs`` is empty, an empty result is returned.
595 595
596 596 ``specs`` can contain integers, in which case they are assumed to be
597 597 revision numbers.
598 598
599 599 It is assumed the revsets are already formatted. If you have arguments
600 600 that need to be expanded in the revset, call ``revsetlang.formatspec()``
601 601 and pass the result as an element of ``specs``.
602 602
603 603 Specifying a single revset is allowed.
604 604
605 605 Returns a ``revset.abstractsmartset`` which is a list-like interface over
606 606 integer revisions.
607 607 """
608 608 allspecs = []
609 609 for spec in specs:
610 610 if isinstance(spec, int):
611 611 spec = revsetlang.formatspec('rev(%d)', spec)
612 612 allspecs.append(spec)
613 613 return repo.anyrevs(allspecs, user=True, localalias=localalias)
614 614
615 615 def meaningfulparents(repo, ctx):
616 616 """Return list of meaningful (or all if debug) parentrevs for rev.
617 617
618 618 For merges (two non-nullrev revisions) both parents are meaningful.
619 619 Otherwise the first parent revision is considered meaningful if it
620 620 is not the preceding revision.
621 621 """
622 622 parents = ctx.parents()
623 623 if len(parents) > 1:
624 624 return parents
625 625 if repo.ui.debugflag:
626 626 return [parents[0], repo['null']]
627 627 if parents[0].rev() >= intrev(ctx) - 1:
628 628 return []
629 629 return parents
630 630
631 631 def expandpats(pats):
632 632 '''Expand bare globs when running on windows.
633 633 On posix we assume it already has already been done by sh.'''
634 634 if not util.expandglobs:
635 635 return list(pats)
636 636 ret = []
637 637 for kindpat in pats:
638 638 kind, pat = matchmod._patsplit(kindpat, None)
639 639 if kind is None:
640 640 try:
641 641 globbed = glob.glob(pat)
642 642 except re.error:
643 643 globbed = [pat]
644 644 if globbed:
645 645 ret.extend(globbed)
646 646 continue
647 647 ret.append(kindpat)
648 648 return ret
649 649
650 650 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
651 651 badfn=None):
652 652 '''Return a matcher and the patterns that were used.
653 653 The matcher will warn about bad matches, unless an alternate badfn callback
654 654 is provided.'''
655 655 if pats == ("",):
656 656 pats = []
657 657 if opts is None:
658 658 opts = {}
659 659 if not globbed and default == 'relpath':
660 660 pats = expandpats(pats or [])
661 661
662 662 def bad(f, msg):
663 663 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
664 664
665 665 if badfn is None:
666 666 badfn = bad
667 667
668 668 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
669 669 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
670 670
671 671 if m.always():
672 672 pats = []
673 673 return m, pats
674 674
675 675 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
676 676 badfn=None):
677 677 '''Return a matcher that will warn about bad matches.'''
678 678 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
679 679
680 680 def matchall(repo):
681 681 '''Return a matcher that will efficiently match everything.'''
682 682 return matchmod.always(repo.root, repo.getcwd())
683 683
684 684 def matchfiles(repo, files, badfn=None):
685 685 '''Return a matcher that will efficiently match exactly these files.'''
686 686 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
687 687
688 688 def parsefollowlinespattern(repo, rev, pat, msg):
689 689 """Return a file name from `pat` pattern suitable for usage in followlines
690 690 logic.
691 691 """
692 692 if not matchmod.patkind(pat):
693 693 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
694 694 else:
695 695 ctx = repo[rev]
696 696 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
697 697 files = [f for f in ctx if m(f)]
698 698 if len(files) != 1:
699 699 raise error.ParseError(msg)
700 700 return files[0]
701 701
702 702 def origpath(ui, repo, filepath):
703 703 '''customize where .orig files are created
704 704
705 705 Fetch user defined path from config file: [ui] origbackuppath = <path>
706 706 Fall back to default (filepath with .orig suffix) if not specified
707 707 '''
708 708 origbackuppath = ui.config('ui', 'origbackuppath')
709 709 if not origbackuppath:
710 710 return filepath + ".orig"
711 711
712 712 # Convert filepath from an absolute path into a path inside the repo.
713 713 filepathfromroot = util.normpath(os.path.relpath(filepath,
714 714 start=repo.root))
715 715
716 716 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
717 717 origbackupdir = origvfs.dirname(filepathfromroot)
718 718 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
719 719 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
720 720
721 721 # Remove any files that conflict with the backup file's path
722 722 for f in reversed(list(util.finddirs(filepathfromroot))):
723 723 if origvfs.isfileorlink(f):
724 724 ui.note(_('removing conflicting file: %s\n')
725 725 % origvfs.join(f))
726 726 origvfs.unlink(f)
727 727 break
728 728
729 729 origvfs.makedirs(origbackupdir)
730 730
731 731 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
732 732 ui.note(_('removing conflicting directory: %s\n')
733 733 % origvfs.join(filepathfromroot))
734 734 origvfs.rmtree(filepathfromroot, forcibly=True)
735 735
736 736 return origvfs.join(filepathfromroot)
737 737
738 738 class _containsnode(object):
739 739 """proxy __contains__(node) to container.__contains__ which accepts revs"""
740 740
741 741 def __init__(self, repo, revcontainer):
742 742 self._torev = repo.changelog.rev
743 743 self._revcontains = revcontainer.__contains__
744 744
745 745 def __contains__(self, node):
746 746 return self._revcontains(self._torev(node))
747 747
748 748 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
749 749 """do common cleanups when old nodes are replaced by new nodes
750 750
751 751 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
752 752 (we might also want to move working directory parent in the future)
753 753
754 754 By default, bookmark moves are calculated automatically from 'replacements',
755 755 but 'moves' can be used to override that. Also, 'moves' may include
756 756 additional bookmark moves that should not have associated obsmarkers.
757 757
758 758 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
759 759 have replacements. operation is a string, like "rebase".
760 760
761 761 metadata is dictionary containing metadata to be stored in obsmarker if
762 762 obsolescence is enabled.
763 763 """
764 764 if not replacements and not moves:
765 765 return
766 766
767 767 # translate mapping's other forms
768 768 if not util.safehasattr(replacements, 'items'):
769 769 replacements = {n: () for n in replacements}
770 770
771 771 # Calculate bookmark movements
772 772 if moves is None:
773 773 moves = {}
774 774 # Unfiltered repo is needed since nodes in replacements might be hidden.
775 775 unfi = repo.unfiltered()
776 776 for oldnode, newnodes in replacements.items():
777 777 if oldnode in moves:
778 778 continue
779 779 if len(newnodes) > 1:
780 780 # usually a split, take the one with biggest rev number
781 781 newnode = next(unfi.set('max(%ln)', newnodes)).node()
782 782 elif len(newnodes) == 0:
783 783 # move bookmark backwards
784 784 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
785 785 list(replacements)))
786 786 if roots:
787 787 newnode = roots[0].node()
788 788 else:
789 789 newnode = nullid
790 790 else:
791 791 newnode = newnodes[0]
792 792 moves[oldnode] = newnode
793 793
794 794 with repo.transaction('cleanup') as tr:
795 795 # Move bookmarks
796 796 bmarks = repo._bookmarks
797 797 bmarkchanges = []
798 798 allnewnodes = [n for ns in replacements.values() for n in ns]
799 799 for oldnode, newnode in moves.items():
800 800 oldbmarks = repo.nodebookmarks(oldnode)
801 801 if not oldbmarks:
802 802 continue
803 803 from . import bookmarks # avoid import cycle
804 804 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
805 805 (util.rapply(pycompat.maybebytestr, oldbmarks),
806 806 hex(oldnode), hex(newnode)))
807 807 # Delete divergent bookmarks being parents of related newnodes
808 808 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
809 809 allnewnodes, newnode, oldnode)
810 810 deletenodes = _containsnode(repo, deleterevs)
811 811 for name in oldbmarks:
812 812 bmarkchanges.append((name, newnode))
813 813 for b in bookmarks.divergent2delete(repo, deletenodes, name):
814 814 bmarkchanges.append((b, None))
815 815
816 816 if bmarkchanges:
817 817 bmarks.applychanges(repo, tr, bmarkchanges)
818 818
819 819 # Obsolete or strip nodes
820 820 if obsolete.isenabled(repo, obsolete.createmarkersopt):
821 821 # If a node is already obsoleted, and we want to obsolete it
822 822 # without a successor, skip that obssolete request since it's
823 823 # unnecessary. That's the "if s or not isobs(n)" check below.
824 824 # Also sort the node in topology order, that might be useful for
825 825 # some obsstore logic.
826 826 # NOTE: the filtering and sorting might belong to createmarkers.
827 827 isobs = unfi.obsstore.successors.__contains__
828 828 torev = unfi.changelog.rev
829 829 sortfunc = lambda ns: torev(ns[0])
830 830 rels = [(unfi[n], tuple(unfi[m] for m in s))
831 831 for n, s in sorted(replacements.items(), key=sortfunc)
832 832 if s or not isobs(n)]
833 833 if rels:
834 834 obsolete.createmarkers(repo, rels, operation=operation,
835 835 metadata=metadata)
836 836 else:
837 837 from . import repair # avoid import cycle
838 838 tostrip = list(replacements)
839 839 if tostrip:
840 840 repair.delayedstrip(repo.ui, repo, tostrip, operation)
841 841
842 842 def addremove(repo, matcher, prefix, opts=None):
843 843 if opts is None:
844 844 opts = {}
845 845 m = matcher
846 846 dry_run = opts.get('dry_run')
847 847 try:
848 848 similarity = float(opts.get('similarity') or 0)
849 849 except ValueError:
850 850 raise error.Abort(_('similarity must be a number'))
851 851 if similarity < 0 or similarity > 100:
852 852 raise error.Abort(_('similarity must be between 0 and 100'))
853 853 similarity /= 100.0
854 854
855 855 ret = 0
856 856 join = lambda f: os.path.join(prefix, f)
857 857
858 858 wctx = repo[None]
859 859 for subpath in sorted(wctx.substate):
860 860 submatch = matchmod.subdirmatcher(subpath, m)
861 861 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
862 862 sub = wctx.sub(subpath)
863 863 try:
864 864 if sub.addremove(submatch, prefix, opts):
865 865 ret = 1
866 866 except error.LookupError:
867 867 repo.ui.status(_("skipping missing subrepository: %s\n")
868 868 % join(subpath))
869 869
870 870 rejected = []
871 871 def badfn(f, msg):
872 872 if f in m.files():
873 873 m.bad(f, msg)
874 874 rejected.append(f)
875 875
876 876 badmatch = matchmod.badmatch(m, badfn)
877 877 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
878 878 badmatch)
879 879
880 880 unknownset = set(unknown + forgotten)
881 881 toprint = unknownset.copy()
882 882 toprint.update(deleted)
883 883 for abs in sorted(toprint):
884 884 if repo.ui.verbose or not m.exact(abs):
885 885 if abs in unknownset:
886 886 status = _('adding %s\n') % m.uipath(abs)
887 887 else:
888 888 status = _('removing %s\n') % m.uipath(abs)
889 889 repo.ui.status(status)
890 890
891 891 renames = _findrenames(repo, m, added + unknown, removed + deleted,
892 892 similarity)
893 893
894 894 if not dry_run:
895 895 _markchanges(repo, unknown + forgotten, deleted, renames)
896 896
897 897 for f in rejected:
898 898 if f in m.files():
899 899 return 1
900 900 return ret
901 901
902 902 def marktouched(repo, files, similarity=0.0):
903 903 '''Assert that files have somehow been operated upon. files are relative to
904 904 the repo root.'''
905 905 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
906 906 rejected = []
907 907
908 908 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
909 909
910 910 if repo.ui.verbose:
911 911 unknownset = set(unknown + forgotten)
912 912 toprint = unknownset.copy()
913 913 toprint.update(deleted)
914 914 for abs in sorted(toprint):
915 915 if abs in unknownset:
916 916 status = _('adding %s\n') % abs
917 917 else:
918 918 status = _('removing %s\n') % abs
919 919 repo.ui.status(status)
920 920
921 921 renames = _findrenames(repo, m, added + unknown, removed + deleted,
922 922 similarity)
923 923
924 924 _markchanges(repo, unknown + forgotten, deleted, renames)
925 925
926 926 for f in rejected:
927 927 if f in m.files():
928 928 return 1
929 929 return 0
930 930
931 931 def _interestingfiles(repo, matcher):
932 932 '''Walk dirstate with matcher, looking for files that addremove would care
933 933 about.
934 934
935 935 This is different from dirstate.status because it doesn't care about
936 936 whether files are modified or clean.'''
937 937 added, unknown, deleted, removed, forgotten = [], [], [], [], []
938 938 audit_path = pathutil.pathauditor(repo.root, cached=True)
939 939
940 940 ctx = repo[None]
941 941 dirstate = repo.dirstate
942 942 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
943 943 unknown=True, ignored=False, full=False)
944 944 for abs, st in walkresults.iteritems():
945 945 dstate = dirstate[abs]
946 946 if dstate == '?' and audit_path.check(abs):
947 947 unknown.append(abs)
948 948 elif dstate != 'r' and not st:
949 949 deleted.append(abs)
950 950 elif dstate == 'r' and st:
951 951 forgotten.append(abs)
952 952 # for finding renames
953 953 elif dstate == 'r' and not st:
954 954 removed.append(abs)
955 955 elif dstate == 'a':
956 956 added.append(abs)
957 957
958 958 return added, unknown, deleted, removed, forgotten
959 959
960 960 def _findrenames(repo, matcher, added, removed, similarity):
961 961 '''Find renames from removed files to added ones.'''
962 962 renames = {}
963 963 if similarity > 0:
964 964 for old, new, score in similar.findrenames(repo, added, removed,
965 965 similarity):
966 966 if (repo.ui.verbose or not matcher.exact(old)
967 967 or not matcher.exact(new)):
968 968 repo.ui.status(_('recording removal of %s as rename to %s '
969 969 '(%d%% similar)\n') %
970 970 (matcher.rel(old), matcher.rel(new),
971 971 score * 100))
972 972 renames[new] = old
973 973 return renames
974 974
975 975 def _markchanges(repo, unknown, deleted, renames):
976 976 '''Marks the files in unknown as added, the files in deleted as removed,
977 977 and the files in renames as copied.'''
978 978 wctx = repo[None]
979 979 with repo.wlock():
980 980 wctx.forget(deleted)
981 981 wctx.add(unknown)
982 982 for new, old in renames.iteritems():
983 983 wctx.copy(old, new)
984 984
985 985 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
986 986 """Update the dirstate to reflect the intent of copying src to dst. For
987 987 different reasons it might not end with dst being marked as copied from src.
988 988 """
989 989 origsrc = repo.dirstate.copied(src) or src
990 990 if dst == origsrc: # copying back a copy?
991 991 if repo.dirstate[dst] not in 'mn' and not dryrun:
992 992 repo.dirstate.normallookup(dst)
993 993 else:
994 994 if repo.dirstate[origsrc] == 'a' and origsrc == src:
995 995 if not ui.quiet:
996 996 ui.warn(_("%s has not been committed yet, so no copy "
997 997 "data will be stored for %s.\n")
998 998 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
999 999 if repo.dirstate[dst] in '?r' and not dryrun:
1000 1000 wctx.add([dst])
1001 1001 elif not dryrun:
1002 1002 wctx.copy(origsrc, dst)
1003 1003
1004 1004 def readrequires(opener, supported):
1005 1005 '''Reads and parses .hg/requires and checks if all entries found
1006 1006 are in the list of supported features.'''
1007 1007 requirements = set(opener.read("requires").splitlines())
1008 1008 missings = []
1009 1009 for r in requirements:
1010 1010 if r not in supported:
1011 1011 if not r or not r[0:1].isalnum():
1012 1012 raise error.RequirementError(_(".hg/requires file is corrupt"))
1013 1013 missings.append(r)
1014 1014 missings.sort()
1015 1015 if missings:
1016 1016 raise error.RequirementError(
1017 1017 _("repository requires features unknown to this Mercurial: %s")
1018 1018 % " ".join(missings),
1019 1019 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1020 1020 " for more information"))
1021 1021 return requirements
1022 1022
1023 1023 def writerequires(opener, requirements):
1024 1024 with opener('requires', 'w') as fp:
1025 1025 for r in sorted(requirements):
1026 1026 fp.write("%s\n" % r)
1027 1027
1028 1028 class filecachesubentry(object):
1029 1029 def __init__(self, path, stat):
1030 1030 self.path = path
1031 1031 self.cachestat = None
1032 1032 self._cacheable = None
1033 1033
1034 1034 if stat:
1035 1035 self.cachestat = filecachesubentry.stat(self.path)
1036 1036
1037 1037 if self.cachestat:
1038 1038 self._cacheable = self.cachestat.cacheable()
1039 1039 else:
1040 1040 # None means we don't know yet
1041 1041 self._cacheable = None
1042 1042
1043 1043 def refresh(self):
1044 1044 if self.cacheable():
1045 1045 self.cachestat = filecachesubentry.stat(self.path)
1046 1046
1047 1047 def cacheable(self):
1048 1048 if self._cacheable is not None:
1049 1049 return self._cacheable
1050 1050
1051 1051 # we don't know yet, assume it is for now
1052 1052 return True
1053 1053
1054 1054 def changed(self):
1055 1055 # no point in going further if we can't cache it
1056 1056 if not self.cacheable():
1057 1057 return True
1058 1058
1059 1059 newstat = filecachesubentry.stat(self.path)
1060 1060
1061 1061 # we may not know if it's cacheable yet, check again now
1062 1062 if newstat and self._cacheable is None:
1063 1063 self._cacheable = newstat.cacheable()
1064 1064
1065 1065 # check again
1066 1066 if not self._cacheable:
1067 1067 return True
1068 1068
1069 1069 if self.cachestat != newstat:
1070 1070 self.cachestat = newstat
1071 1071 return True
1072 1072 else:
1073 1073 return False
1074 1074
1075 1075 @staticmethod
1076 1076 def stat(path):
1077 1077 try:
1078 1078 return util.cachestat(path)
1079 1079 except OSError as e:
1080 1080 if e.errno != errno.ENOENT:
1081 1081 raise
1082 1082
1083 1083 class filecacheentry(object):
1084 1084 def __init__(self, paths, stat=True):
1085 1085 self._entries = []
1086 1086 for path in paths:
1087 1087 self._entries.append(filecachesubentry(path, stat))
1088 1088
1089 1089 def changed(self):
1090 1090 '''true if any entry has changed'''
1091 1091 for entry in self._entries:
1092 1092 if entry.changed():
1093 1093 return True
1094 1094 return False
1095 1095
1096 1096 def refresh(self):
1097 1097 for entry in self._entries:
1098 1098 entry.refresh()
1099 1099
1100 1100 class filecache(object):
1101 1101 '''A property like decorator that tracks files under .hg/ for updates.
1102 1102
1103 1103 Records stat info when called in _filecache.
1104 1104
1105 1105 On subsequent calls, compares old stat info with new info, and recreates the
1106 1106 object when any of the files changes, updating the new stat info in
1107 1107 _filecache.
1108 1108
1109 1109 Mercurial either atomic renames or appends for files under .hg,
1110 1110 so to ensure the cache is reliable we need the filesystem to be able
1111 1111 to tell us if a file has been replaced. If it can't, we fallback to
1112 1112 recreating the object on every call (essentially the same behavior as
1113 1113 propertycache).
1114 1114
1115 1115 '''
1116 1116 def __init__(self, *paths):
1117 1117 self.paths = paths
1118 1118
1119 1119 def join(self, obj, fname):
1120 1120 """Used to compute the runtime path of a cached file.
1121 1121
1122 1122 Users should subclass filecache and provide their own version of this
1123 1123 function to call the appropriate join function on 'obj' (an instance
1124 1124 of the class that its member function was decorated).
1125 1125 """
1126 1126 raise NotImplementedError
1127 1127
1128 1128 def __call__(self, func):
1129 1129 self.func = func
1130 1130 self.name = func.__name__.encode('ascii')
1131 1131 return self
1132 1132
1133 1133 def __get__(self, obj, type=None):
1134 1134 # if accessed on the class, return the descriptor itself.
1135 1135 if obj is None:
1136 1136 return self
1137 1137 # do we need to check if the file changed?
1138 1138 if self.name in obj.__dict__:
1139 1139 assert self.name in obj._filecache, self.name
1140 1140 return obj.__dict__[self.name]
1141 1141
1142 1142 entry = obj._filecache.get(self.name)
1143 1143
1144 1144 if entry:
1145 1145 if entry.changed():
1146 1146 entry.obj = self.func(obj)
1147 1147 else:
1148 1148 paths = [self.join(obj, path) for path in self.paths]
1149 1149
1150 1150 # We stat -before- creating the object so our cache doesn't lie if
1151 1151 # a writer modified between the time we read and stat
1152 1152 entry = filecacheentry(paths, True)
1153 1153 entry.obj = self.func(obj)
1154 1154
1155 1155 obj._filecache[self.name] = entry
1156 1156
1157 1157 obj.__dict__[self.name] = entry.obj
1158 1158 return entry.obj
1159 1159
1160 1160 def __set__(self, obj, value):
1161 1161 if self.name not in obj._filecache:
1162 1162 # we add an entry for the missing value because X in __dict__
1163 1163 # implies X in _filecache
1164 1164 paths = [self.join(obj, path) for path in self.paths]
1165 1165 ce = filecacheentry(paths, False)
1166 1166 obj._filecache[self.name] = ce
1167 1167 else:
1168 1168 ce = obj._filecache[self.name]
1169 1169
1170 1170 ce.obj = value # update cached copy
1171 1171 obj.__dict__[self.name] = value # update copy returned by obj.x
1172 1172
1173 1173 def __delete__(self, obj):
1174 1174 try:
1175 1175 del obj.__dict__[self.name]
1176 1176 except KeyError:
1177 1177 raise AttributeError(self.name)
1178 1178
1179 1179 def extdatasource(repo, source):
1180 1180 """Gather a map of rev -> value dict from the specified source
1181 1181
1182 1182 A source spec is treated as a URL, with a special case shell: type
1183 1183 for parsing the output from a shell command.
1184 1184
1185 1185 The data is parsed as a series of newline-separated records where
1186 1186 each record is a revision specifier optionally followed by a space
1187 1187 and a freeform string value. If the revision is known locally, it
1188 1188 is converted to a rev, otherwise the record is skipped.
1189 1189
1190 1190 Note that both key and value are treated as UTF-8 and converted to
1191 1191 the local encoding. This allows uniformity between local and
1192 1192 remote data sources.
1193 1193 """
1194 1194
1195 1195 spec = repo.ui.config("extdata", source)
1196 1196 if not spec:
1197 1197 raise error.Abort(_("unknown extdata source '%s'") % source)
1198 1198
1199 1199 data = {}
1200 1200 src = proc = None
1201 1201 try:
1202 1202 if spec.startswith("shell:"):
1203 1203 # external commands should be run relative to the repo root
1204 1204 cmd = spec[6:]
1205 1205 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1206 1206 close_fds=procutil.closefds,
1207 1207 stdout=subprocess.PIPE, cwd=repo.root)
1208 1208 src = proc.stdout
1209 1209 else:
1210 1210 # treat as a URL or file
1211 1211 src = url.open(repo.ui, spec)
1212 1212 for l in src:
1213 1213 if " " in l:
1214 1214 k, v = l.strip().split(" ", 1)
1215 1215 else:
1216 1216 k, v = l.strip(), ""
1217 1217
1218 1218 k = encoding.tolocal(k)
1219 1219 try:
1220 1220 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1221 1221 except (error.LookupError, error.RepoLookupError):
1222 1222 pass # we ignore data for nodes that don't exist locally
1223 1223 finally:
1224 1224 if proc:
1225 1225 proc.communicate()
1226 1226 if src:
1227 1227 src.close()
1228 1228 if proc and proc.returncode != 0:
1229 1229 raise error.Abort(_("extdata command '%s' failed: %s")
1230 1230 % (cmd, procutil.explainexit(proc.returncode)))
1231 1231
1232 1232 return data
1233 1233
1234 1234 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1235 1235 if lock is None:
1236 1236 raise error.LockInheritanceContractViolation(
1237 1237 'lock can only be inherited while held')
1238 1238 if environ is None:
1239 1239 environ = {}
1240 1240 with lock.inherit() as locker:
1241 1241 environ[envvar] = locker
1242 1242 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1243 1243
1244 1244 def wlocksub(repo, cmd, *args, **kwargs):
1245 1245 """run cmd as a subprocess that allows inheriting repo's wlock
1246 1246
1247 1247 This can only be called while the wlock is held. This takes all the
1248 1248 arguments that ui.system does, and returns the exit code of the
1249 1249 subprocess."""
1250 1250 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1251 1251 **kwargs)
1252 1252
1253 1253 def gdinitconfig(ui):
1254 1254 """helper function to know if a repo should be created as general delta
1255 1255 """
1256 1256 # experimental config: format.generaldelta
1257 1257 return (ui.configbool('format', 'generaldelta')
1258 1258 or ui.configbool('format', 'usegeneraldelta'))
1259 1259
1260 1260 def gddeltaconfig(ui):
1261 1261 """helper function to know if incoming delta should be optimised
1262 1262 """
1263 1263 # experimental config: format.generaldelta
1264 1264 return ui.configbool('format', 'generaldelta')
1265 1265
1266 1266 class simplekeyvaluefile(object):
1267 1267 """A simple file with key=value lines
1268 1268
1269 1269 Keys must be alphanumerics and start with a letter, values must not
1270 1270 contain '\n' characters"""
1271 1271 firstlinekey = '__firstline'
1272 1272
1273 1273 def __init__(self, vfs, path, keys=None):
1274 1274 self.vfs = vfs
1275 1275 self.path = path
1276 1276
1277 1277 def read(self, firstlinenonkeyval=False):
1278 1278 """Read the contents of a simple key-value file
1279 1279
1280 1280 'firstlinenonkeyval' indicates whether the first line of file should
1281 1281 be treated as a key-value pair or reuturned fully under the
1282 1282 __firstline key."""
1283 1283 lines = self.vfs.readlines(self.path)
1284 1284 d = {}
1285 1285 if firstlinenonkeyval:
1286 1286 if not lines:
1287 1287 e = _("empty simplekeyvalue file")
1288 1288 raise error.CorruptedState(e)
1289 1289 # we don't want to include '\n' in the __firstline
1290 1290 d[self.firstlinekey] = lines[0][:-1]
1291 1291 del lines[0]
1292 1292
1293 1293 try:
1294 1294 # the 'if line.strip()' part prevents us from failing on empty
1295 1295 # lines which only contain '\n' therefore are not skipped
1296 1296 # by 'if line'
1297 1297 updatedict = dict(line[:-1].split('=', 1) for line in lines
1298 1298 if line.strip())
1299 1299 if self.firstlinekey in updatedict:
1300 1300 e = _("%r can't be used as a key")
1301 1301 raise error.CorruptedState(e % self.firstlinekey)
1302 1302 d.update(updatedict)
1303 1303 except ValueError as e:
1304 1304 raise error.CorruptedState(str(e))
1305 1305 return d
1306 1306
1307 1307 def write(self, data, firstline=None):
1308 1308 """Write key=>value mapping to a file
1309 1309 data is a dict. Keys must be alphanumerical and start with a letter.
1310 1310 Values must not contain newline characters.
1311 1311
1312 1312 If 'firstline' is not None, it is written to file before
1313 1313 everything else, as it is, not in a key=value form"""
1314 1314 lines = []
1315 1315 if firstline is not None:
1316 1316 lines.append('%s\n' % firstline)
1317 1317
1318 1318 for k, v in data.items():
1319 1319 if k == self.firstlinekey:
1320 1320 e = "key name '%s' is reserved" % self.firstlinekey
1321 1321 raise error.ProgrammingError(e)
1322 1322 if not k[0:1].isalpha():
1323 1323 e = "keys must start with a letter in a key-value file"
1324 1324 raise error.ProgrammingError(e)
1325 1325 if not k.isalnum():
1326 1326 e = "invalid key name in a simple key-value file"
1327 1327 raise error.ProgrammingError(e)
1328 1328 if '\n' in v:
1329 1329 e = "invalid value in a simple key-value file"
1330 1330 raise error.ProgrammingError(e)
1331 1331 lines.append("%s=%s\n" % (k, v))
1332 1332 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1333 1333 fp.write(''.join(lines))
1334 1334
1335 1335 _reportobsoletedsource = [
1336 1336 'debugobsolete',
1337 1337 'pull',
1338 1338 'push',
1339 1339 'serve',
1340 1340 'unbundle',
1341 1341 ]
1342 1342
1343 1343 _reportnewcssource = [
1344 1344 'pull',
1345 1345 'unbundle',
1346 1346 ]
1347 1347
1348 1348 # a list of (repo, ctx, files) functions called by various commands to allow
1349 1349 # extensions to ensure the corresponding files are available locally, before the
1350 1350 # command uses them.
1351 1351 fileprefetchhooks = util.hooks()
1352 1352
1353 1353 # A marker that tells the evolve extension to suppress its own reporting
1354 1354 _reportstroubledchangesets = True
1355 1355
1356 1356 def registersummarycallback(repo, otr, txnname=''):
1357 1357 """register a callback to issue a summary after the transaction is closed
1358 1358 """
1359 1359 def txmatch(sources):
1360 1360 return any(txnname.startswith(source) for source in sources)
1361 1361
1362 1362 categories = []
1363 1363
1364 1364 def reportsummary(func):
1365 1365 """decorator for report callbacks."""
1366 1366 # The repoview life cycle is shorter than the one of the actual
1367 1367 # underlying repository. So the filtered object can die before the
1368 1368 # weakref is used leading to troubles. We keep a reference to the
1369 1369 # unfiltered object and restore the filtering when retrieving the
1370 1370 # repository through the weakref.
1371 1371 filtername = repo.filtername
1372 1372 reporef = weakref.ref(repo.unfiltered())
1373 1373 def wrapped(tr):
1374 1374 repo = reporef()
1375 1375 if filtername:
1376 1376 repo = repo.filtered(filtername)
1377 1377 func(repo, tr)
1378 1378 newcat = '%02i-txnreport' % len(categories)
1379 1379 otr.addpostclose(newcat, wrapped)
1380 1380 categories.append(newcat)
1381 1381 return wrapped
1382 1382
1383 1383 if txmatch(_reportobsoletedsource):
1384 1384 @reportsummary
1385 1385 def reportobsoleted(repo, tr):
1386 1386 obsoleted = obsutil.getobsoleted(repo, tr)
1387 1387 if obsoleted:
1388 1388 repo.ui.status(_('obsoleted %i changesets\n')
1389 1389 % len(obsoleted))
1390 1390
1391 1391 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1392 1392 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1393 1393 instabilitytypes = [
1394 1394 ('orphan', 'orphan'),
1395 1395 ('phase-divergent', 'phasedivergent'),
1396 1396 ('content-divergent', 'contentdivergent'),
1397 1397 ]
1398 1398
1399 1399 def getinstabilitycounts(repo):
1400 1400 filtered = repo.changelog.filteredrevs
1401 1401 counts = {}
1402 1402 for instability, revset in instabilitytypes:
1403 1403 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1404 1404 filtered)
1405 1405 return counts
1406 1406
1407 1407 oldinstabilitycounts = getinstabilitycounts(repo)
1408 1408 @reportsummary
1409 1409 def reportnewinstabilities(repo, tr):
1410 1410 newinstabilitycounts = getinstabilitycounts(repo)
1411 1411 for instability, revset in instabilitytypes:
1412 1412 delta = (newinstabilitycounts[instability] -
1413 1413 oldinstabilitycounts[instability])
1414 1414 if delta > 0:
1415 1415 repo.ui.warn(_('%i new %s changesets\n') %
1416 1416 (delta, instability))
1417 1417
1418 1418 if txmatch(_reportnewcssource):
1419 1419 @reportsummary
1420 1420 def reportnewcs(repo, tr):
1421 1421 """Report the range of new revisions pulled/unbundled."""
1422 1422 newrevs = tr.changes.get('revs', xrange(0, 0))
1423 1423 if not newrevs:
1424 1424 return
1425 1425
1426 1426 # Compute the bounds of new revisions' range, excluding obsoletes.
1427 1427 unfi = repo.unfiltered()
1428 1428 revs = unfi.revs('%ld and not obsolete()', newrevs)
1429 1429 if not revs:
1430 1430 # Got only obsoletes.
1431 1431 return
1432 1432 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1433 1433
1434 1434 if minrev == maxrev:
1435 1435 revrange = minrev
1436 1436 else:
1437 1437 revrange = '%s:%s' % (minrev, maxrev)
1438 1438 repo.ui.status(_('new changesets %s\n') % revrange)
1439 1439
1440 1440 def nodesummaries(repo, nodes, maxnumnodes=4):
1441 1441 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1442 1442 return ' '.join(short(h) for h in nodes)
1443 1443 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1444 1444 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1445 1445
1446 1446 def enforcesinglehead(repo, tr, desc):
1447 1447 """check that no named branch has multiple heads"""
1448 1448 if desc in ('strip', 'repair'):
1449 1449 # skip the logic during strip
1450 1450 return
1451 1451 visible = repo.filtered('visible')
1452 1452 # possible improvement: we could restrict the check to affected branch
1453 1453 for name, heads in visible.branchmap().iteritems():
1454 1454 if len(heads) > 1:
1455 1455 msg = _('rejecting multiple heads on branch "%s"')
1456 1456 msg %= name
1457 1457 hint = _('%d heads: %s')
1458 1458 hint %= (len(heads), nodesummaries(repo, heads))
1459 1459 raise error.Abort(msg, hint=hint)
1460 1460
1461 1461 def wrapconvertsink(sink):
1462 1462 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1463 1463 before it is used, whether or not the convert extension was formally loaded.
1464 1464 """
1465 1465 return sink
1466 1466
1467 1467 def unhidehashlikerevs(repo, specs, hiddentype):
1468 1468 """parse the user specs and unhide changesets whose hash or revision number
1469 1469 is passed.
1470 1470
1471 1471 hiddentype can be: 1) 'warn': warn while unhiding changesets
1472 1472 2) 'nowarn': don't warn while unhiding changesets
1473 1473
1474 1474 returns a repo object with the required changesets unhidden
1475 1475 """
1476 1476 if not repo.filtername or not repo.ui.configbool('experimental',
1477 1477 'directaccess'):
1478 1478 return repo
1479 1479
1480 1480 if repo.filtername not in ('visible', 'visible-hidden'):
1481 1481 return repo
1482 1482
1483 1483 symbols = set()
1484 1484 for spec in specs:
1485 1485 try:
1486 1486 tree = revsetlang.parse(spec)
1487 1487 except error.ParseError: # will be reported by scmutil.revrange()
1488 1488 continue
1489 1489
1490 1490 symbols.update(revsetlang.gethashlikesymbols(tree))
1491 1491
1492 1492 if not symbols:
1493 1493 return repo
1494 1494
1495 1495 revs = _getrevsfromsymbols(repo, symbols)
1496 1496
1497 1497 if not revs:
1498 1498 return repo
1499 1499
1500 1500 if hiddentype == 'warn':
1501 1501 unfi = repo.unfiltered()
1502 1502 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1503 1503 repo.ui.warn(_("warning: accessing hidden changesets for write "
1504 1504 "operation: %s\n") % revstr)
1505 1505
1506 1506 # we have to use new filtername to separate branch/tags cache until we can
1507 1507 # disbale these cache when revisions are dynamically pinned.
1508 1508 return repo.filtered('visible-hidden', revs)
1509 1509
1510 1510 def _getrevsfromsymbols(repo, symbols):
1511 1511 """parse the list of symbols and returns a set of revision numbers of hidden
1512 1512 changesets present in symbols"""
1513 1513 revs = set()
1514 1514 unfi = repo.unfiltered()
1515 1515 unficl = unfi.changelog
1516 1516 cl = repo.changelog
1517 1517 tiprev = len(unficl)
1518 1518 pmatch = unficl._partialmatch
1519 1519 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1520 1520 for s in symbols:
1521 1521 try:
1522 1522 n = int(s)
1523 1523 if n <= tiprev:
1524 1524 if not allowrevnums:
1525 1525 continue
1526 1526 else:
1527 1527 if n not in cl:
1528 1528 revs.add(n)
1529 1529 continue
1530 1530 except ValueError:
1531 1531 pass
1532 1532
1533 1533 try:
1534 1534 s = pmatch(s)
1535 1535 except (error.LookupError, error.WdirUnsupported):
1536 1536 s = None
1537 1537
1538 1538 if s is not None:
1539 1539 rev = unficl.rev(s)
1540 1540 if rev not in cl:
1541 1541 revs.add(rev)
1542 1542
1543 1543 return revs
@@ -1,146 +1,146 b''
1 1 #require serve
2 2
3 3 #testcases sshv1 sshv2
4 4
5 5 #if sshv2
6 6 $ cat >> $HGRCPATH << EOF
7 7 > [experimental]
8 8 > sshpeer.advertise-v2 = true
9 9 > sshserver.support-v2 = true
10 10 > EOF
11 11 #endif
12 12
13 13 $ hg init test
14 14 $ cd test
15 15
16 16 $ echo foo>foo
17 17 $ hg addremove
18 18 adding foo
19 19 $ hg commit -m 1
20 20
21 21 $ hg verify
22 22 checking changesets
23 23 checking manifests
24 24 crosschecking files in changesets and manifests
25 25 checking files
26 26 1 files, 1 changesets, 1 total revisions
27 27
28 28 $ hg serve -p $HGPORT -d --pid-file=hg.pid
29 29 $ cat hg.pid >> $DAEMON_PIDS
30 30 $ cd ..
31 31
32 32 $ hg clone --pull http://foo:bar@localhost:$HGPORT/ copy
33 33 requesting all changes
34 34 adding changesets
35 35 adding manifests
36 36 adding file changes
37 37 added 1 changesets with 1 changes to 1 files
38 38 new changesets 340e38bdcde4
39 39 updating to branch default
40 40 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
41 41
42 42 $ cd copy
43 43 $ hg verify
44 44 checking changesets
45 45 checking manifests
46 46 crosschecking files in changesets and manifests
47 47 checking files
48 48 1 files, 1 changesets, 1 total revisions
49 49
50 50 $ hg co
51 51 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
52 52 $ cat foo
53 53 foo
54 54
55 55 $ hg manifest --debug
56 56 2ed2a3912a0b24502043eae84ee4b279c18b90dd 644 foo
57 57
58 58 $ hg pull
59 59 pulling from http://foo@localhost:$HGPORT/
60 60 searching for changes
61 61 no changes found
62 62
63 63 $ hg rollback --dry-run --verbose
64 64 repository tip rolled back to revision -1 (undo pull: http://foo:***@localhost:$HGPORT/)
65 65
66 66 Test pull of non-existing 20 character revision specification, making sure plain ascii identifiers
67 67 not are encoded like a node:
68 68
69 69 $ hg pull -r 'xxxxxxxxxxxxxxxxxxxy'
70 70 pulling from http://foo@localhost:$HGPORT/
71 71 abort: unknown revision 'xxxxxxxxxxxxxxxxxxxy'!
72 72 [255]
73 73 $ hg pull -r 'xxxxxxxxxxxxxxxxxx y'
74 74 pulling from http://foo@localhost:$HGPORT/
75 abort: unknown revision '7878787878787878787878787878787878782079'!
75 abort: unknown revision 'xxxxxxxxxxxxxxxxxx y'!
76 76 [255]
77 77
78 78 Issue622: hg init && hg pull -u URL doesn't checkout default branch
79 79
80 80 $ cd ..
81 81 $ hg init empty
82 82 $ cd empty
83 83 $ hg pull -u ../test
84 84 pulling from ../test
85 85 requesting all changes
86 86 adding changesets
87 87 adding manifests
88 88 adding file changes
89 89 added 1 changesets with 1 changes to 1 files
90 90 new changesets 340e38bdcde4
91 91 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
92 92
93 93 Test 'file:' uri handling:
94 94
95 95 $ hg pull -q file://../test-does-not-exist
96 96 abort: file:// URLs can only refer to localhost
97 97 [255]
98 98
99 99 $ hg pull -q file://../test
100 100 abort: file:// URLs can only refer to localhost
101 101 [255]
102 102
103 103 MSYS changes 'file:' into 'file;'
104 104
105 105 #if no-msys
106 106 $ hg pull -q file:../test # no-msys
107 107 #endif
108 108
109 109 It's tricky to make file:// URLs working on every platform with
110 110 regular shell commands.
111 111
112 112 $ URL=`$PYTHON -c "import os; print 'file://foobar' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test'"`
113 113 $ hg pull -q "$URL"
114 114 abort: file:// URLs can only refer to localhost
115 115 [255]
116 116
117 117 $ URL=`$PYTHON -c "import os; print 'file://localhost' + ('/' + os.getcwd().replace(os.sep, '/')).replace('//', '/') + '/../test'"`
118 118 $ hg pull -q "$URL"
119 119
120 120 SEC: check for unsafe ssh url
121 121
122 122 $ cat >> $HGRCPATH << EOF
123 123 > [ui]
124 124 > ssh = sh -c "read l; read l; read l"
125 125 > EOF
126 126
127 127 $ hg pull 'ssh://-oProxyCommand=touch${IFS}owned/path'
128 128 pulling from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
129 129 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
130 130 [255]
131 131 $ hg pull 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
132 132 pulling from ssh://-oProxyCommand%3Dtouch%24%7BIFS%7Downed/path
133 133 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
134 134 [255]
135 135 $ hg pull 'ssh://fakehost|touch${IFS}owned/path'
136 136 pulling from ssh://fakehost%7Ctouch%24%7BIFS%7Downed/path
137 137 abort: no suitable response from remote hg!
138 138 [255]
139 139 $ hg pull 'ssh://fakehost%7Ctouch%20owned/path'
140 140 pulling from ssh://fakehost%7Ctouch%20owned/path
141 141 abort: no suitable response from remote hg!
142 142 [255]
143 143
144 144 $ [ ! -f owned ] || echo 'you got owned'
145 145
146 146 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now