##// END OF EJS Templates
py3: force bytestr conversion of "reason" in scmutil.callcatch()...
Denis Laxalde -
r44774:b4c82b70 5.2.2 stable
parent child Browse files
Show More
@@ -1,2221 +1,2221 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import posixpath
15 15 import re
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29 from .pycompat import getattr
30 30
31 31 from . import (
32 32 copies as copiesmod,
33 33 encoding,
34 34 error,
35 35 match as matchmod,
36 36 obsolete,
37 37 obsutil,
38 38 pathutil,
39 39 phases,
40 40 policy,
41 41 pycompat,
42 42 revsetlang,
43 43 similar,
44 44 smartset,
45 45 url,
46 46 util,
47 47 vfs,
48 48 )
49 49
50 50 from .utils import (
51 51 procutil,
52 52 stringutil,
53 53 )
54 54
55 55 if pycompat.iswindows:
56 56 from . import scmwindows as scmplatform
57 57 else:
58 58 from . import scmposix as scmplatform
59 59
60 60 parsers = policy.importmod(r'parsers')
61 61
62 62 termsize = scmplatform.termsize
63 63
64 64
65 65 class status(tuple):
66 66 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
67 67 and 'ignored' properties are only relevant to the working copy.
68 68 '''
69 69
70 70 __slots__ = ()
71 71
72 72 def __new__(
73 73 cls, modified, added, removed, deleted, unknown, ignored, clean
74 74 ):
75 75 return tuple.__new__(
76 76 cls, (modified, added, removed, deleted, unknown, ignored, clean)
77 77 )
78 78
79 79 @property
80 80 def modified(self):
81 81 '''files that have been modified'''
82 82 return self[0]
83 83
84 84 @property
85 85 def added(self):
86 86 '''files that have been added'''
87 87 return self[1]
88 88
89 89 @property
90 90 def removed(self):
91 91 '''files that have been removed'''
92 92 return self[2]
93 93
94 94 @property
95 95 def deleted(self):
96 96 '''files that are in the dirstate, but have been deleted from the
97 97 working copy (aka "missing")
98 98 '''
99 99 return self[3]
100 100
101 101 @property
102 102 def unknown(self):
103 103 '''files not in the dirstate that are not ignored'''
104 104 return self[4]
105 105
106 106 @property
107 107 def ignored(self):
108 108 '''files not in the dirstate that are ignored (by _dirignore())'''
109 109 return self[5]
110 110
111 111 @property
112 112 def clean(self):
113 113 '''files that have not been modified'''
114 114 return self[6]
115 115
116 116 def __repr__(self, *args, **kwargs):
117 117 return (
118 118 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
119 119 r'unknown=%s, ignored=%s, clean=%s>'
120 120 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
121 121
122 122
123 123 def itersubrepos(ctx1, ctx2):
124 124 """find subrepos in ctx1 or ctx2"""
125 125 # Create a (subpath, ctx) mapping where we prefer subpaths from
126 126 # ctx1. The subpaths from ctx2 are important when the .hgsub file
127 127 # has been modified (in ctx2) but not yet committed (in ctx1).
128 128 subpaths = dict.fromkeys(ctx2.substate, ctx2)
129 129 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
130 130
131 131 missing = set()
132 132
133 133 for subpath in ctx2.substate:
134 134 if subpath not in ctx1.substate:
135 135 del subpaths[subpath]
136 136 missing.add(subpath)
137 137
138 138 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
139 139 yield subpath, ctx.sub(subpath)
140 140
141 141 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
142 142 # status and diff will have an accurate result when it does
143 143 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
144 144 # against itself.
145 145 for subpath in missing:
146 146 yield subpath, ctx2.nullsub(subpath, ctx1)
147 147
148 148
149 149 def nochangesfound(ui, repo, excluded=None):
150 150 '''Report no changes for push/pull, excluded is None or a list of
151 151 nodes excluded from the push/pull.
152 152 '''
153 153 secretlist = []
154 154 if excluded:
155 155 for n in excluded:
156 156 ctx = repo[n]
157 157 if ctx.phase() >= phases.secret and not ctx.extinct():
158 158 secretlist.append(n)
159 159
160 160 if secretlist:
161 161 ui.status(
162 162 _(b"no changes found (ignored %d secret changesets)\n")
163 163 % len(secretlist)
164 164 )
165 165 else:
166 166 ui.status(_(b"no changes found\n"))
167 167
168 168
169 169 def callcatch(ui, func):
170 170 """call func() with global exception handling
171 171
172 172 return func() if no exception happens. otherwise do some error handling
173 173 and return an exit code accordingly. does not handle all exceptions.
174 174 """
175 175 try:
176 176 try:
177 177 return func()
178 178 except: # re-raises
179 179 ui.traceback()
180 180 raise
181 181 # Global exception handling, alphabetically
182 182 # Mercurial-specific first, followed by built-in and library exceptions
183 183 except error.LockHeld as inst:
184 184 if inst.errno == errno.ETIMEDOUT:
185 185 reason = _(b'timed out waiting for lock held by %r') % (
186 186 pycompat.bytestr(inst.locker)
187 187 )
188 188 else:
189 189 reason = _(b'lock held by %r') % inst.locker
190 190 ui.error(
191 191 _(b"abort: %s: %s\n")
192 192 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
193 193 )
194 194 if not inst.locker:
195 195 ui.error(_(b"(lock might be very busy)\n"))
196 196 except error.LockUnavailable as inst:
197 197 ui.error(
198 198 _(b"abort: could not lock %s: %s\n")
199 199 % (
200 200 inst.desc or stringutil.forcebytestr(inst.filename),
201 201 encoding.strtolocal(inst.strerror),
202 202 )
203 203 )
204 204 except error.OutOfBandError as inst:
205 205 if inst.args:
206 206 msg = _(b"abort: remote error:\n")
207 207 else:
208 208 msg = _(b"abort: remote error\n")
209 209 ui.error(msg)
210 210 if inst.args:
211 211 ui.error(b''.join(inst.args))
212 212 if inst.hint:
213 213 ui.error(b'(%s)\n' % inst.hint)
214 214 except error.RepoError as inst:
215 215 ui.error(_(b"abort: %s!\n") % inst)
216 216 if inst.hint:
217 217 ui.error(_(b"(%s)\n") % inst.hint)
218 218 except error.ResponseError as inst:
219 219 ui.error(_(b"abort: %s") % inst.args[0])
220 220 msg = inst.args[1]
221 221 if isinstance(msg, type(u'')):
222 222 msg = pycompat.sysbytes(msg)
223 223 if not isinstance(msg, bytes):
224 224 ui.error(b" %r\n" % (msg,))
225 225 elif not msg:
226 226 ui.error(_(b" empty string\n"))
227 227 else:
228 228 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
229 229 except error.CensoredNodeError as inst:
230 230 ui.error(_(b"abort: file censored %s!\n") % inst)
231 231 except error.StorageError as inst:
232 232 ui.error(_(b"abort: %s!\n") % inst)
233 233 if inst.hint:
234 234 ui.error(_(b"(%s)\n") % inst.hint)
235 235 except error.InterventionRequired as inst:
236 236 ui.error(b"%s\n" % inst)
237 237 if inst.hint:
238 238 ui.error(_(b"(%s)\n") % inst.hint)
239 239 return 1
240 240 except error.WdirUnsupported:
241 241 ui.error(_(b"abort: working directory revision cannot be specified\n"))
242 242 except error.Abort as inst:
243 243 ui.error(_(b"abort: %s\n") % inst)
244 244 if inst.hint:
245 245 ui.error(_(b"(%s)\n") % inst.hint)
246 246 except ImportError as inst:
247 247 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
248 248 m = stringutil.forcebytestr(inst).split()[-1]
249 249 if m in b"mpatch bdiff".split():
250 250 ui.error(_(b"(did you forget to compile extensions?)\n"))
251 251 elif m in b"zlib".split():
252 252 ui.error(_(b"(is your Python install correct?)\n"))
253 253 except (IOError, OSError) as inst:
254 254 if util.safehasattr(inst, b"code"): # HTTPError
255 255 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
256 256 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
257 257 try: # usually it is in the form (errno, strerror)
258 258 reason = inst.reason.args[1]
259 259 except (AttributeError, IndexError):
260 260 # it might be anything, for example a string
261 261 reason = inst.reason
262 262 if isinstance(reason, pycompat.unicode):
263 263 # SSLError of Python 2.7.9 contains a unicode
264 264 reason = encoding.unitolocal(reason)
265 ui.error(_(b"abort: error: %s\n") % reason)
265 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
266 266 elif (
267 267 util.safehasattr(inst, b"args")
268 268 and inst.args
269 269 and inst.args[0] == errno.EPIPE
270 270 ):
271 271 pass
272 272 elif getattr(inst, "strerror", None): # common IOError or OSError
273 273 if getattr(inst, "filename", None) is not None:
274 274 ui.error(
275 275 _(b"abort: %s: '%s'\n")
276 276 % (
277 277 encoding.strtolocal(inst.strerror),
278 278 stringutil.forcebytestr(inst.filename),
279 279 )
280 280 )
281 281 else:
282 282 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
283 283 else: # suspicious IOError
284 284 raise
285 285 except MemoryError:
286 286 ui.error(_(b"abort: out of memory\n"))
287 287 except SystemExit as inst:
288 288 # Commands shouldn't sys.exit directly, but give a return code.
289 289 # Just in case catch this and and pass exit code to caller.
290 290 return inst.code
291 291
292 292 return -1
293 293
294 294
295 295 def checknewlabel(repo, lbl, kind):
296 296 # Do not use the "kind" parameter in ui output.
297 297 # It makes strings difficult to translate.
298 298 if lbl in [b'tip', b'.', b'null']:
299 299 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
300 300 for c in (b':', b'\0', b'\n', b'\r'):
301 301 if c in lbl:
302 302 raise error.Abort(
303 303 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
304 304 )
305 305 try:
306 306 int(lbl)
307 307 raise error.Abort(_(b"cannot use an integer as a name"))
308 308 except ValueError:
309 309 pass
310 310 if lbl.strip() != lbl:
311 311 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
312 312
313 313
314 314 def checkfilename(f):
315 315 '''Check that the filename f is an acceptable filename for a tracked file'''
316 316 if b'\r' in f or b'\n' in f:
317 317 raise error.Abort(
318 318 _(b"'\\n' and '\\r' disallowed in filenames: %r")
319 319 % pycompat.bytestr(f)
320 320 )
321 321
322 322
323 323 def checkportable(ui, f):
324 324 '''Check if filename f is portable and warn or abort depending on config'''
325 325 checkfilename(f)
326 326 abort, warn = checkportabilityalert(ui)
327 327 if abort or warn:
328 328 msg = util.checkwinfilename(f)
329 329 if msg:
330 330 msg = b"%s: %s" % (msg, procutil.shellquote(f))
331 331 if abort:
332 332 raise error.Abort(msg)
333 333 ui.warn(_(b"warning: %s\n") % msg)
334 334
335 335
336 336 def checkportabilityalert(ui):
337 337 '''check if the user's config requests nothing, a warning, or abort for
338 338 non-portable filenames'''
339 339 val = ui.config(b'ui', b'portablefilenames')
340 340 lval = val.lower()
341 341 bval = stringutil.parsebool(val)
342 342 abort = pycompat.iswindows or lval == b'abort'
343 343 warn = bval or lval == b'warn'
344 344 if bval is None and not (warn or abort or lval == b'ignore'):
345 345 raise error.ConfigError(
346 346 _(b"ui.portablefilenames value is invalid ('%s')") % val
347 347 )
348 348 return abort, warn
349 349
350 350
351 351 class casecollisionauditor(object):
352 352 def __init__(self, ui, abort, dirstate):
353 353 self._ui = ui
354 354 self._abort = abort
355 355 allfiles = b'\0'.join(dirstate)
356 356 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
357 357 self._dirstate = dirstate
358 358 # The purpose of _newfiles is so that we don't complain about
359 359 # case collisions if someone were to call this object with the
360 360 # same filename twice.
361 361 self._newfiles = set()
362 362
363 363 def __call__(self, f):
364 364 if f in self._newfiles:
365 365 return
366 366 fl = encoding.lower(f)
367 367 if fl in self._loweredfiles and f not in self._dirstate:
368 368 msg = _(b'possible case-folding collision for %s') % f
369 369 if self._abort:
370 370 raise error.Abort(msg)
371 371 self._ui.warn(_(b"warning: %s\n") % msg)
372 372 self._loweredfiles.add(fl)
373 373 self._newfiles.add(f)
374 374
375 375
376 376 def filteredhash(repo, maxrev):
377 377 """build hash of filtered revisions in the current repoview.
378 378
379 379 Multiple caches perform up-to-date validation by checking that the
380 380 tiprev and tipnode stored in the cache file match the current repository.
381 381 However, this is not sufficient for validating repoviews because the set
382 382 of revisions in the view may change without the repository tiprev and
383 383 tipnode changing.
384 384
385 385 This function hashes all the revs filtered from the view and returns
386 386 that SHA-1 digest.
387 387 """
388 388 cl = repo.changelog
389 389 if not cl.filteredrevs:
390 390 return None
391 391 key = None
392 392 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
393 393 if revs:
394 394 s = hashlib.sha1()
395 395 for rev in revs:
396 396 s.update(b'%d;' % rev)
397 397 key = s.digest()
398 398 return key
399 399
400 400
401 401 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
402 402 '''yield every hg repository under path, always recursively.
403 403 The recurse flag will only control recursion into repo working dirs'''
404 404
405 405 def errhandler(err):
406 406 if err.filename == path:
407 407 raise err
408 408
409 409 samestat = getattr(os.path, 'samestat', None)
410 410 if followsym and samestat is not None:
411 411
412 412 def adddir(dirlst, dirname):
413 413 dirstat = os.stat(dirname)
414 414 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
415 415 if not match:
416 416 dirlst.append(dirstat)
417 417 return not match
418 418
419 419 else:
420 420 followsym = False
421 421
422 422 if (seen_dirs is None) and followsym:
423 423 seen_dirs = []
424 424 adddir(seen_dirs, path)
425 425 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
426 426 dirs.sort()
427 427 if b'.hg' in dirs:
428 428 yield root # found a repository
429 429 qroot = os.path.join(root, b'.hg', b'patches')
430 430 if os.path.isdir(os.path.join(qroot, b'.hg')):
431 431 yield qroot # we have a patch queue repo here
432 432 if recurse:
433 433 # avoid recursing inside the .hg directory
434 434 dirs.remove(b'.hg')
435 435 else:
436 436 dirs[:] = [] # don't descend further
437 437 elif followsym:
438 438 newdirs = []
439 439 for d in dirs:
440 440 fname = os.path.join(root, d)
441 441 if adddir(seen_dirs, fname):
442 442 if os.path.islink(fname):
443 443 for hgname in walkrepos(fname, True, seen_dirs):
444 444 yield hgname
445 445 else:
446 446 newdirs.append(d)
447 447 dirs[:] = newdirs
448 448
449 449
450 450 def binnode(ctx):
451 451 """Return binary node id for a given basectx"""
452 452 node = ctx.node()
453 453 if node is None:
454 454 return wdirid
455 455 return node
456 456
457 457
458 458 def intrev(ctx):
459 459 """Return integer for a given basectx that can be used in comparison or
460 460 arithmetic operation"""
461 461 rev = ctx.rev()
462 462 if rev is None:
463 463 return wdirrev
464 464 return rev
465 465
466 466
467 467 def formatchangeid(ctx):
468 468 """Format changectx as '{rev}:{node|formatnode}', which is the default
469 469 template provided by logcmdutil.changesettemplater"""
470 470 repo = ctx.repo()
471 471 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
472 472
473 473
474 474 def formatrevnode(ui, rev, node):
475 475 """Format given revision and node depending on the current verbosity"""
476 476 if ui.debugflag:
477 477 hexfunc = hex
478 478 else:
479 479 hexfunc = short
480 480 return b'%d:%s' % (rev, hexfunc(node))
481 481
482 482
483 483 def resolvehexnodeidprefix(repo, prefix):
484 484 if prefix.startswith(b'x') and repo.ui.configbool(
485 485 b'experimental', b'revisions.prefixhexnode'
486 486 ):
487 487 prefix = prefix[1:]
488 488 try:
489 489 # Uses unfiltered repo because it's faster when prefix is ambiguous/
490 490 # This matches the shortesthexnodeidprefix() function below.
491 491 node = repo.unfiltered().changelog._partialmatch(prefix)
492 492 except error.AmbiguousPrefixLookupError:
493 493 revset = repo.ui.config(
494 494 b'experimental', b'revisions.disambiguatewithin'
495 495 )
496 496 if revset:
497 497 # Clear config to avoid infinite recursion
498 498 configoverrides = {
499 499 (b'experimental', b'revisions.disambiguatewithin'): None
500 500 }
501 501 with repo.ui.configoverride(configoverrides):
502 502 revs = repo.anyrevs([revset], user=True)
503 503 matches = []
504 504 for rev in revs:
505 505 node = repo.changelog.node(rev)
506 506 if hex(node).startswith(prefix):
507 507 matches.append(node)
508 508 if len(matches) == 1:
509 509 return matches[0]
510 510 raise
511 511 if node is None:
512 512 return
513 513 repo.changelog.rev(node) # make sure node isn't filtered
514 514 return node
515 515
516 516
517 517 def mayberevnum(repo, prefix):
518 518 """Checks if the given prefix may be mistaken for a revision number"""
519 519 try:
520 520 i = int(prefix)
521 521 # if we are a pure int, then starting with zero will not be
522 522 # confused as a rev; or, obviously, if the int is larger
523 523 # than the value of the tip rev. We still need to disambiguate if
524 524 # prefix == '0', since that *is* a valid revnum.
525 525 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
526 526 return False
527 527 return True
528 528 except ValueError:
529 529 return False
530 530
531 531
532 532 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
533 533 """Find the shortest unambiguous prefix that matches hexnode.
534 534
535 535 If "cache" is not None, it must be a dictionary that can be used for
536 536 caching between calls to this method.
537 537 """
538 538 # _partialmatch() of filtered changelog could take O(len(repo)) time,
539 539 # which would be unacceptably slow. so we look for hash collision in
540 540 # unfiltered space, which means some hashes may be slightly longer.
541 541
542 542 minlength = max(minlength, 1)
543 543
544 544 def disambiguate(prefix):
545 545 """Disambiguate against revnums."""
546 546 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
547 547 if mayberevnum(repo, prefix):
548 548 return b'x' + prefix
549 549 else:
550 550 return prefix
551 551
552 552 hexnode = hex(node)
553 553 for length in range(len(prefix), len(hexnode) + 1):
554 554 prefix = hexnode[:length]
555 555 if not mayberevnum(repo, prefix):
556 556 return prefix
557 557
558 558 cl = repo.unfiltered().changelog
559 559 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
560 560 if revset:
561 561 revs = None
562 562 if cache is not None:
563 563 revs = cache.get(b'disambiguationrevset')
564 564 if revs is None:
565 565 revs = repo.anyrevs([revset], user=True)
566 566 if cache is not None:
567 567 cache[b'disambiguationrevset'] = revs
568 568 if cl.rev(node) in revs:
569 569 hexnode = hex(node)
570 570 nodetree = None
571 571 if cache is not None:
572 572 nodetree = cache.get(b'disambiguationnodetree')
573 573 if not nodetree:
574 574 try:
575 575 nodetree = parsers.nodetree(cl.index, len(revs))
576 576 except AttributeError:
577 577 # no native nodetree
578 578 pass
579 579 else:
580 580 for r in revs:
581 581 nodetree.insert(r)
582 582 if cache is not None:
583 583 cache[b'disambiguationnodetree'] = nodetree
584 584 if nodetree is not None:
585 585 length = max(nodetree.shortest(node), minlength)
586 586 prefix = hexnode[:length]
587 587 return disambiguate(prefix)
588 588 for length in range(minlength, len(hexnode) + 1):
589 589 matches = []
590 590 prefix = hexnode[:length]
591 591 for rev in revs:
592 592 otherhexnode = repo[rev].hex()
593 593 if prefix == otherhexnode[:length]:
594 594 matches.append(otherhexnode)
595 595 if len(matches) == 1:
596 596 return disambiguate(prefix)
597 597
598 598 try:
599 599 return disambiguate(cl.shortest(node, minlength))
600 600 except error.LookupError:
601 601 raise error.RepoLookupError()
602 602
603 603
604 604 def isrevsymbol(repo, symbol):
605 605 """Checks if a symbol exists in the repo.
606 606
607 607 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
608 608 symbol is an ambiguous nodeid prefix.
609 609 """
610 610 try:
611 611 revsymbol(repo, symbol)
612 612 return True
613 613 except error.RepoLookupError:
614 614 return False
615 615
616 616
617 617 def revsymbol(repo, symbol):
618 618 """Returns a context given a single revision symbol (as string).
619 619
620 620 This is similar to revsingle(), but accepts only a single revision symbol,
621 621 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
622 622 not "max(public())".
623 623 """
624 624 if not isinstance(symbol, bytes):
625 625 msg = (
626 626 b"symbol (%s of type %s) was not a string, did you mean "
627 627 b"repo[symbol]?" % (symbol, type(symbol))
628 628 )
629 629 raise error.ProgrammingError(msg)
630 630 try:
631 631 if symbol in (b'.', b'tip', b'null'):
632 632 return repo[symbol]
633 633
634 634 try:
635 635 r = int(symbol)
636 636 if b'%d' % r != symbol:
637 637 raise ValueError
638 638 l = len(repo.changelog)
639 639 if r < 0:
640 640 r += l
641 641 if r < 0 or r >= l and r != wdirrev:
642 642 raise ValueError
643 643 return repo[r]
644 644 except error.FilteredIndexError:
645 645 raise
646 646 except (ValueError, OverflowError, IndexError):
647 647 pass
648 648
649 649 if len(symbol) == 40:
650 650 try:
651 651 node = bin(symbol)
652 652 rev = repo.changelog.rev(node)
653 653 return repo[rev]
654 654 except error.FilteredLookupError:
655 655 raise
656 656 except (TypeError, LookupError):
657 657 pass
658 658
659 659 # look up bookmarks through the name interface
660 660 try:
661 661 node = repo.names.singlenode(repo, symbol)
662 662 rev = repo.changelog.rev(node)
663 663 return repo[rev]
664 664 except KeyError:
665 665 pass
666 666
667 667 node = resolvehexnodeidprefix(repo, symbol)
668 668 if node is not None:
669 669 rev = repo.changelog.rev(node)
670 670 return repo[rev]
671 671
672 672 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
673 673
674 674 except error.WdirUnsupported:
675 675 return repo[None]
676 676 except (
677 677 error.FilteredIndexError,
678 678 error.FilteredLookupError,
679 679 error.FilteredRepoLookupError,
680 680 ):
681 681 raise _filterederror(repo, symbol)
682 682
683 683
684 684 def _filterederror(repo, changeid):
685 685 """build an exception to be raised about a filtered changeid
686 686
687 687 This is extracted in a function to help extensions (eg: evolve) to
688 688 experiment with various message variants."""
689 689 if repo.filtername.startswith(b'visible'):
690 690
691 691 # Check if the changeset is obsolete
692 692 unfilteredrepo = repo.unfiltered()
693 693 ctx = revsymbol(unfilteredrepo, changeid)
694 694
695 695 # If the changeset is obsolete, enrich the message with the reason
696 696 # that made this changeset not visible
697 697 if ctx.obsolete():
698 698 msg = obsutil._getfilteredreason(repo, changeid, ctx)
699 699 else:
700 700 msg = _(b"hidden revision '%s'") % changeid
701 701
702 702 hint = _(b'use --hidden to access hidden revisions')
703 703
704 704 return error.FilteredRepoLookupError(msg, hint=hint)
705 705 msg = _(b"filtered revision '%s' (not in '%s' subset)")
706 706 msg %= (changeid, repo.filtername)
707 707 return error.FilteredRepoLookupError(msg)
708 708
709 709
710 710 def revsingle(repo, revspec, default=b'.', localalias=None):
711 711 if not revspec and revspec != 0:
712 712 return repo[default]
713 713
714 714 l = revrange(repo, [revspec], localalias=localalias)
715 715 if not l:
716 716 raise error.Abort(_(b'empty revision set'))
717 717 return repo[l.last()]
718 718
719 719
720 720 def _pairspec(revspec):
721 721 tree = revsetlang.parse(revspec)
722 722 return tree and tree[0] in (
723 723 b'range',
724 724 b'rangepre',
725 725 b'rangepost',
726 726 b'rangeall',
727 727 )
728 728
729 729
730 730 def revpair(repo, revs):
731 731 if not revs:
732 732 return repo[b'.'], repo[None]
733 733
734 734 l = revrange(repo, revs)
735 735
736 736 if not l:
737 737 raise error.Abort(_(b'empty revision range'))
738 738
739 739 first = l.first()
740 740 second = l.last()
741 741
742 742 if (
743 743 first == second
744 744 and len(revs) >= 2
745 745 and not all(revrange(repo, [r]) for r in revs)
746 746 ):
747 747 raise error.Abort(_(b'empty revision on one side of range'))
748 748
749 749 # if top-level is range expression, the result must always be a pair
750 750 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
751 751 return repo[first], repo[None]
752 752
753 753 return repo[first], repo[second]
754 754
755 755
756 756 def revrange(repo, specs, localalias=None):
757 757 """Execute 1 to many revsets and return the union.
758 758
759 759 This is the preferred mechanism for executing revsets using user-specified
760 760 config options, such as revset aliases.
761 761
762 762 The revsets specified by ``specs`` will be executed via a chained ``OR``
763 763 expression. If ``specs`` is empty, an empty result is returned.
764 764
765 765 ``specs`` can contain integers, in which case they are assumed to be
766 766 revision numbers.
767 767
768 768 It is assumed the revsets are already formatted. If you have arguments
769 769 that need to be expanded in the revset, call ``revsetlang.formatspec()``
770 770 and pass the result as an element of ``specs``.
771 771
772 772 Specifying a single revset is allowed.
773 773
774 774 Returns a ``revset.abstractsmartset`` which is a list-like interface over
775 775 integer revisions.
776 776 """
777 777 allspecs = []
778 778 for spec in specs:
779 779 if isinstance(spec, int):
780 780 spec = revsetlang.formatspec(b'%d', spec)
781 781 allspecs.append(spec)
782 782 return repo.anyrevs(allspecs, user=True, localalias=localalias)
783 783
784 784
785 785 def meaningfulparents(repo, ctx):
786 786 """Return list of meaningful (or all if debug) parentrevs for rev.
787 787
788 788 For merges (two non-nullrev revisions) both parents are meaningful.
789 789 Otherwise the first parent revision is considered meaningful if it
790 790 is not the preceding revision.
791 791 """
792 792 parents = ctx.parents()
793 793 if len(parents) > 1:
794 794 return parents
795 795 if repo.ui.debugflag:
796 796 return [parents[0], repo[nullrev]]
797 797 if parents[0].rev() >= intrev(ctx) - 1:
798 798 return []
799 799 return parents
800 800
801 801
802 802 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
803 803 """Return a function that produced paths for presenting to the user.
804 804
805 805 The returned function takes a repo-relative path and produces a path
806 806 that can be presented in the UI.
807 807
808 808 Depending on the value of ui.relative-paths, either a repo-relative or
809 809 cwd-relative path will be produced.
810 810
811 811 legacyrelativevalue is the value to use if ui.relative-paths=legacy
812 812
813 813 If forcerelativevalue is not None, then that value will be used regardless
814 814 of what ui.relative-paths is set to.
815 815 """
816 816 if forcerelativevalue is not None:
817 817 relative = forcerelativevalue
818 818 else:
819 819 config = repo.ui.config(b'ui', b'relative-paths')
820 820 if config == b'legacy':
821 821 relative = legacyrelativevalue
822 822 else:
823 823 relative = stringutil.parsebool(config)
824 824 if relative is None:
825 825 raise error.ConfigError(
826 826 _(b"ui.relative-paths is not a boolean ('%s')") % config
827 827 )
828 828
829 829 if relative:
830 830 cwd = repo.getcwd()
831 831 pathto = repo.pathto
832 832 return lambda f: pathto(f, cwd)
833 833 elif repo.ui.configbool(b'ui', b'slash'):
834 834 return lambda f: f
835 835 else:
836 836 return util.localpath
837 837
838 838
839 839 def subdiruipathfn(subpath, uipathfn):
840 840 '''Create a new uipathfn that treats the file as relative to subpath.'''
841 841 return lambda f: uipathfn(posixpath.join(subpath, f))
842 842
843 843
844 844 def anypats(pats, opts):
845 845 '''Checks if any patterns, including --include and --exclude were given.
846 846
847 847 Some commands (e.g. addremove) use this condition for deciding whether to
848 848 print absolute or relative paths.
849 849 '''
850 850 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
851 851
852 852
853 853 def expandpats(pats):
854 854 '''Expand bare globs when running on windows.
855 855 On posix we assume it already has already been done by sh.'''
856 856 if not util.expandglobs:
857 857 return list(pats)
858 858 ret = []
859 859 for kindpat in pats:
860 860 kind, pat = matchmod._patsplit(kindpat, None)
861 861 if kind is None:
862 862 try:
863 863 globbed = glob.glob(pat)
864 864 except re.error:
865 865 globbed = [pat]
866 866 if globbed:
867 867 ret.extend(globbed)
868 868 continue
869 869 ret.append(kindpat)
870 870 return ret
871 871
872 872
873 873 def matchandpats(
874 874 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
875 875 ):
876 876 '''Return a matcher and the patterns that were used.
877 877 The matcher will warn about bad matches, unless an alternate badfn callback
878 878 is provided.'''
879 879 if opts is None:
880 880 opts = {}
881 881 if not globbed and default == b'relpath':
882 882 pats = expandpats(pats or [])
883 883
884 884 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
885 885
886 886 def bad(f, msg):
887 887 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
888 888
889 889 if badfn is None:
890 890 badfn = bad
891 891
892 892 m = ctx.match(
893 893 pats,
894 894 opts.get(b'include'),
895 895 opts.get(b'exclude'),
896 896 default,
897 897 listsubrepos=opts.get(b'subrepos'),
898 898 badfn=badfn,
899 899 )
900 900
901 901 if m.always():
902 902 pats = []
903 903 return m, pats
904 904
905 905
906 906 def match(
907 907 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
908 908 ):
909 909 '''Return a matcher that will warn about bad matches.'''
910 910 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
911 911
912 912
913 913 def matchall(repo):
914 914 '''Return a matcher that will efficiently match everything.'''
915 915 return matchmod.always()
916 916
917 917
918 918 def matchfiles(repo, files, badfn=None):
919 919 '''Return a matcher that will efficiently match exactly these files.'''
920 920 return matchmod.exact(files, badfn=badfn)
921 921
922 922
923 923 def parsefollowlinespattern(repo, rev, pat, msg):
924 924 """Return a file name from `pat` pattern suitable for usage in followlines
925 925 logic.
926 926 """
927 927 if not matchmod.patkind(pat):
928 928 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
929 929 else:
930 930 ctx = repo[rev]
931 931 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
932 932 files = [f for f in ctx if m(f)]
933 933 if len(files) != 1:
934 934 raise error.ParseError(msg)
935 935 return files[0]
936 936
937 937
938 938 def getorigvfs(ui, repo):
939 939 """return a vfs suitable to save 'orig' file
940 940
941 941 return None if no special directory is configured"""
942 942 origbackuppath = ui.config(b'ui', b'origbackuppath')
943 943 if not origbackuppath:
944 944 return None
945 945 return vfs.vfs(repo.wvfs.join(origbackuppath))
946 946
947 947
948 948 def backuppath(ui, repo, filepath):
949 949 '''customize where working copy backup files (.orig files) are created
950 950
951 951 Fetch user defined path from config file: [ui] origbackuppath = <path>
952 952 Fall back to default (filepath with .orig suffix) if not specified
953 953
954 954 filepath is repo-relative
955 955
956 956 Returns an absolute path
957 957 '''
958 958 origvfs = getorigvfs(ui, repo)
959 959 if origvfs is None:
960 960 return repo.wjoin(filepath + b".orig")
961 961
962 962 origbackupdir = origvfs.dirname(filepath)
963 963 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
964 964 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
965 965
966 966 # Remove any files that conflict with the backup file's path
967 967 for f in reversed(list(util.finddirs(filepath))):
968 968 if origvfs.isfileorlink(f):
969 969 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
970 970 origvfs.unlink(f)
971 971 break
972 972
973 973 origvfs.makedirs(origbackupdir)
974 974
975 975 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
976 976 ui.note(
977 977 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
978 978 )
979 979 origvfs.rmtree(filepath, forcibly=True)
980 980
981 981 return origvfs.join(filepath)
982 982
983 983
984 984 class _containsnode(object):
985 985 """proxy __contains__(node) to container.__contains__ which accepts revs"""
986 986
987 987 def __init__(self, repo, revcontainer):
988 988 self._torev = repo.changelog.rev
989 989 self._revcontains = revcontainer.__contains__
990 990
991 991 def __contains__(self, node):
992 992 return self._revcontains(self._torev(node))
993 993
994 994
995 995 def cleanupnodes(
996 996 repo,
997 997 replacements,
998 998 operation,
999 999 moves=None,
1000 1000 metadata=None,
1001 1001 fixphase=False,
1002 1002 targetphase=None,
1003 1003 backup=True,
1004 1004 ):
1005 1005 """do common cleanups when old nodes are replaced by new nodes
1006 1006
1007 1007 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1008 1008 (we might also want to move working directory parent in the future)
1009 1009
1010 1010 By default, bookmark moves are calculated automatically from 'replacements',
1011 1011 but 'moves' can be used to override that. Also, 'moves' may include
1012 1012 additional bookmark moves that should not have associated obsmarkers.
1013 1013
1014 1014 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1015 1015 have replacements. operation is a string, like "rebase".
1016 1016
1017 1017 metadata is dictionary containing metadata to be stored in obsmarker if
1018 1018 obsolescence is enabled.
1019 1019 """
1020 1020 assert fixphase or targetphase is None
1021 1021 if not replacements and not moves:
1022 1022 return
1023 1023
1024 1024 # translate mapping's other forms
1025 1025 if not util.safehasattr(replacements, b'items'):
1026 1026 replacements = {(n,): () for n in replacements}
1027 1027 else:
1028 1028 # upgrading non tuple "source" to tuple ones for BC
1029 1029 repls = {}
1030 1030 for key, value in replacements.items():
1031 1031 if not isinstance(key, tuple):
1032 1032 key = (key,)
1033 1033 repls[key] = value
1034 1034 replacements = repls
1035 1035
1036 1036 # Unfiltered repo is needed since nodes in replacements might be hidden.
1037 1037 unfi = repo.unfiltered()
1038 1038
1039 1039 # Calculate bookmark movements
1040 1040 if moves is None:
1041 1041 moves = {}
1042 1042 for oldnodes, newnodes in replacements.items():
1043 1043 for oldnode in oldnodes:
1044 1044 if oldnode in moves:
1045 1045 continue
1046 1046 if len(newnodes) > 1:
1047 1047 # usually a split, take the one with biggest rev number
1048 1048 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1049 1049 elif len(newnodes) == 0:
1050 1050 # move bookmark backwards
1051 1051 allreplaced = []
1052 1052 for rep in replacements:
1053 1053 allreplaced.extend(rep)
1054 1054 roots = list(
1055 1055 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1056 1056 )
1057 1057 if roots:
1058 1058 newnode = roots[0].node()
1059 1059 else:
1060 1060 newnode = nullid
1061 1061 else:
1062 1062 newnode = newnodes[0]
1063 1063 moves[oldnode] = newnode
1064 1064
1065 1065 allnewnodes = [n for ns in replacements.values() for n in ns]
1066 1066 toretract = {}
1067 1067 toadvance = {}
1068 1068 if fixphase:
1069 1069 precursors = {}
1070 1070 for oldnodes, newnodes in replacements.items():
1071 1071 for oldnode in oldnodes:
1072 1072 for newnode in newnodes:
1073 1073 precursors.setdefault(newnode, []).append(oldnode)
1074 1074
1075 1075 allnewnodes.sort(key=lambda n: unfi[n].rev())
1076 1076 newphases = {}
1077 1077
1078 1078 def phase(ctx):
1079 1079 return newphases.get(ctx.node(), ctx.phase())
1080 1080
1081 1081 for newnode in allnewnodes:
1082 1082 ctx = unfi[newnode]
1083 1083 parentphase = max(phase(p) for p in ctx.parents())
1084 1084 if targetphase is None:
1085 1085 oldphase = max(
1086 1086 unfi[oldnode].phase() for oldnode in precursors[newnode]
1087 1087 )
1088 1088 newphase = max(oldphase, parentphase)
1089 1089 else:
1090 1090 newphase = max(targetphase, parentphase)
1091 1091 newphases[newnode] = newphase
1092 1092 if newphase > ctx.phase():
1093 1093 toretract.setdefault(newphase, []).append(newnode)
1094 1094 elif newphase < ctx.phase():
1095 1095 toadvance.setdefault(newphase, []).append(newnode)
1096 1096
1097 1097 with repo.transaction(b'cleanup') as tr:
1098 1098 # Move bookmarks
1099 1099 bmarks = repo._bookmarks
1100 1100 bmarkchanges = []
1101 1101 for oldnode, newnode in moves.items():
1102 1102 oldbmarks = repo.nodebookmarks(oldnode)
1103 1103 if not oldbmarks:
1104 1104 continue
1105 1105 from . import bookmarks # avoid import cycle
1106 1106
1107 1107 repo.ui.debug(
1108 1108 b'moving bookmarks %r from %s to %s\n'
1109 1109 % (
1110 1110 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1111 1111 hex(oldnode),
1112 1112 hex(newnode),
1113 1113 )
1114 1114 )
1115 1115 # Delete divergent bookmarks being parents of related newnodes
1116 1116 deleterevs = repo.revs(
1117 1117 b'parents(roots(%ln & (::%n))) - parents(%n)',
1118 1118 allnewnodes,
1119 1119 newnode,
1120 1120 oldnode,
1121 1121 )
1122 1122 deletenodes = _containsnode(repo, deleterevs)
1123 1123 for name in oldbmarks:
1124 1124 bmarkchanges.append((name, newnode))
1125 1125 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1126 1126 bmarkchanges.append((b, None))
1127 1127
1128 1128 if bmarkchanges:
1129 1129 bmarks.applychanges(repo, tr, bmarkchanges)
1130 1130
1131 1131 for phase, nodes in toretract.items():
1132 1132 phases.retractboundary(repo, tr, phase, nodes)
1133 1133 for phase, nodes in toadvance.items():
1134 1134 phases.advanceboundary(repo, tr, phase, nodes)
1135 1135
1136 1136 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1137 1137 # Obsolete or strip nodes
1138 1138 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1139 1139 # If a node is already obsoleted, and we want to obsolete it
1140 1140 # without a successor, skip that obssolete request since it's
1141 1141 # unnecessary. That's the "if s or not isobs(n)" check below.
1142 1142 # Also sort the node in topology order, that might be useful for
1143 1143 # some obsstore logic.
1144 1144 # NOTE: the sorting might belong to createmarkers.
1145 1145 torev = unfi.changelog.rev
1146 1146 sortfunc = lambda ns: torev(ns[0][0])
1147 1147 rels = []
1148 1148 for ns, s in sorted(replacements.items(), key=sortfunc):
1149 1149 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1150 1150 rels.append(rel)
1151 1151 if rels:
1152 1152 obsolete.createmarkers(
1153 1153 repo, rels, operation=operation, metadata=metadata
1154 1154 )
1155 1155 elif phases.supportinternal(repo) and mayusearchived:
1156 1156 # this assume we do not have "unstable" nodes above the cleaned ones
1157 1157 allreplaced = set()
1158 1158 for ns in replacements.keys():
1159 1159 allreplaced.update(ns)
1160 1160 if backup:
1161 1161 from . import repair # avoid import cycle
1162 1162
1163 1163 node = min(allreplaced, key=repo.changelog.rev)
1164 1164 repair.backupbundle(
1165 1165 repo, allreplaced, allreplaced, node, operation
1166 1166 )
1167 1167 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1168 1168 else:
1169 1169 from . import repair # avoid import cycle
1170 1170
1171 1171 tostrip = list(n for ns in replacements for n in ns)
1172 1172 if tostrip:
1173 1173 repair.delayedstrip(
1174 1174 repo.ui, repo, tostrip, operation, backup=backup
1175 1175 )
1176 1176
1177 1177
1178 1178 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1179 1179 if opts is None:
1180 1180 opts = {}
1181 1181 m = matcher
1182 1182 dry_run = opts.get(b'dry_run')
1183 1183 try:
1184 1184 similarity = float(opts.get(b'similarity') or 0)
1185 1185 except ValueError:
1186 1186 raise error.Abort(_(b'similarity must be a number'))
1187 1187 if similarity < 0 or similarity > 100:
1188 1188 raise error.Abort(_(b'similarity must be between 0 and 100'))
1189 1189 similarity /= 100.0
1190 1190
1191 1191 ret = 0
1192 1192
1193 1193 wctx = repo[None]
1194 1194 for subpath in sorted(wctx.substate):
1195 1195 submatch = matchmod.subdirmatcher(subpath, m)
1196 1196 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1197 1197 sub = wctx.sub(subpath)
1198 1198 subprefix = repo.wvfs.reljoin(prefix, subpath)
1199 1199 subuipathfn = subdiruipathfn(subpath, uipathfn)
1200 1200 try:
1201 1201 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1202 1202 ret = 1
1203 1203 except error.LookupError:
1204 1204 repo.ui.status(
1205 1205 _(b"skipping missing subrepository: %s\n")
1206 1206 % uipathfn(subpath)
1207 1207 )
1208 1208
1209 1209 rejected = []
1210 1210
1211 1211 def badfn(f, msg):
1212 1212 if f in m.files():
1213 1213 m.bad(f, msg)
1214 1214 rejected.append(f)
1215 1215
1216 1216 badmatch = matchmod.badmatch(m, badfn)
1217 1217 added, unknown, deleted, removed, forgotten = _interestingfiles(
1218 1218 repo, badmatch
1219 1219 )
1220 1220
1221 1221 unknownset = set(unknown + forgotten)
1222 1222 toprint = unknownset.copy()
1223 1223 toprint.update(deleted)
1224 1224 for abs in sorted(toprint):
1225 1225 if repo.ui.verbose or not m.exact(abs):
1226 1226 if abs in unknownset:
1227 1227 status = _(b'adding %s\n') % uipathfn(abs)
1228 1228 label = b'ui.addremove.added'
1229 1229 else:
1230 1230 status = _(b'removing %s\n') % uipathfn(abs)
1231 1231 label = b'ui.addremove.removed'
1232 1232 repo.ui.status(status, label=label)
1233 1233
1234 1234 renames = _findrenames(
1235 1235 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1236 1236 )
1237 1237
1238 1238 if not dry_run:
1239 1239 _markchanges(repo, unknown + forgotten, deleted, renames)
1240 1240
1241 1241 for f in rejected:
1242 1242 if f in m.files():
1243 1243 return 1
1244 1244 return ret
1245 1245
1246 1246
1247 1247 def marktouched(repo, files, similarity=0.0):
1248 1248 '''Assert that files have somehow been operated upon. files are relative to
1249 1249 the repo root.'''
1250 1250 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1251 1251 rejected = []
1252 1252
1253 1253 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1254 1254
1255 1255 if repo.ui.verbose:
1256 1256 unknownset = set(unknown + forgotten)
1257 1257 toprint = unknownset.copy()
1258 1258 toprint.update(deleted)
1259 1259 for abs in sorted(toprint):
1260 1260 if abs in unknownset:
1261 1261 status = _(b'adding %s\n') % abs
1262 1262 else:
1263 1263 status = _(b'removing %s\n') % abs
1264 1264 repo.ui.status(status)
1265 1265
1266 1266 # TODO: We should probably have the caller pass in uipathfn and apply it to
1267 1267 # the messages above too. legacyrelativevalue=True is consistent with how
1268 1268 # it used to work.
1269 1269 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1270 1270 renames = _findrenames(
1271 1271 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1272 1272 )
1273 1273
1274 1274 _markchanges(repo, unknown + forgotten, deleted, renames)
1275 1275
1276 1276 for f in rejected:
1277 1277 if f in m.files():
1278 1278 return 1
1279 1279 return 0
1280 1280
1281 1281
1282 1282 def _interestingfiles(repo, matcher):
1283 1283 '''Walk dirstate with matcher, looking for files that addremove would care
1284 1284 about.
1285 1285
1286 1286 This is different from dirstate.status because it doesn't care about
1287 1287 whether files are modified or clean.'''
1288 1288 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1289 1289 audit_path = pathutil.pathauditor(repo.root, cached=True)
1290 1290
1291 1291 ctx = repo[None]
1292 1292 dirstate = repo.dirstate
1293 1293 matcher = repo.narrowmatch(matcher, includeexact=True)
1294 1294 walkresults = dirstate.walk(
1295 1295 matcher,
1296 1296 subrepos=sorted(ctx.substate),
1297 1297 unknown=True,
1298 1298 ignored=False,
1299 1299 full=False,
1300 1300 )
1301 1301 for abs, st in pycompat.iteritems(walkresults):
1302 1302 dstate = dirstate[abs]
1303 1303 if dstate == b'?' and audit_path.check(abs):
1304 1304 unknown.append(abs)
1305 1305 elif dstate != b'r' and not st:
1306 1306 deleted.append(abs)
1307 1307 elif dstate == b'r' and st:
1308 1308 forgotten.append(abs)
1309 1309 # for finding renames
1310 1310 elif dstate == b'r' and not st:
1311 1311 removed.append(abs)
1312 1312 elif dstate == b'a':
1313 1313 added.append(abs)
1314 1314
1315 1315 return added, unknown, deleted, removed, forgotten
1316 1316
1317 1317
1318 1318 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1319 1319 '''Find renames from removed files to added ones.'''
1320 1320 renames = {}
1321 1321 if similarity > 0:
1322 1322 for old, new, score in similar.findrenames(
1323 1323 repo, added, removed, similarity
1324 1324 ):
1325 1325 if (
1326 1326 repo.ui.verbose
1327 1327 or not matcher.exact(old)
1328 1328 or not matcher.exact(new)
1329 1329 ):
1330 1330 repo.ui.status(
1331 1331 _(
1332 1332 b'recording removal of %s as rename to %s '
1333 1333 b'(%d%% similar)\n'
1334 1334 )
1335 1335 % (uipathfn(old), uipathfn(new), score * 100)
1336 1336 )
1337 1337 renames[new] = old
1338 1338 return renames
1339 1339
1340 1340
1341 1341 def _markchanges(repo, unknown, deleted, renames):
1342 1342 '''Marks the files in unknown as added, the files in deleted as removed,
1343 1343 and the files in renames as copied.'''
1344 1344 wctx = repo[None]
1345 1345 with repo.wlock():
1346 1346 wctx.forget(deleted)
1347 1347 wctx.add(unknown)
1348 1348 for new, old in pycompat.iteritems(renames):
1349 1349 wctx.copy(old, new)
1350 1350
1351 1351
1352 1352 def getrenamedfn(repo, endrev=None):
1353 1353 if copiesmod.usechangesetcentricalgo(repo):
1354 1354
1355 1355 def getrenamed(fn, rev):
1356 1356 ctx = repo[rev]
1357 1357 p1copies = ctx.p1copies()
1358 1358 if fn in p1copies:
1359 1359 return p1copies[fn]
1360 1360 p2copies = ctx.p2copies()
1361 1361 if fn in p2copies:
1362 1362 return p2copies[fn]
1363 1363 return None
1364 1364
1365 1365 return getrenamed
1366 1366
1367 1367 rcache = {}
1368 1368 if endrev is None:
1369 1369 endrev = len(repo)
1370 1370
1371 1371 def getrenamed(fn, rev):
1372 1372 '''looks up all renames for a file (up to endrev) the first
1373 1373 time the file is given. It indexes on the changerev and only
1374 1374 parses the manifest if linkrev != changerev.
1375 1375 Returns rename info for fn at changerev rev.'''
1376 1376 if fn not in rcache:
1377 1377 rcache[fn] = {}
1378 1378 fl = repo.file(fn)
1379 1379 for i in fl:
1380 1380 lr = fl.linkrev(i)
1381 1381 renamed = fl.renamed(fl.node(i))
1382 1382 rcache[fn][lr] = renamed and renamed[0]
1383 1383 if lr >= endrev:
1384 1384 break
1385 1385 if rev in rcache[fn]:
1386 1386 return rcache[fn][rev]
1387 1387
1388 1388 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1389 1389 # filectx logic.
1390 1390 try:
1391 1391 return repo[rev][fn].copysource()
1392 1392 except error.LookupError:
1393 1393 return None
1394 1394
1395 1395 return getrenamed
1396 1396
1397 1397
1398 1398 def getcopiesfn(repo, endrev=None):
1399 1399 if copiesmod.usechangesetcentricalgo(repo):
1400 1400
1401 1401 def copiesfn(ctx):
1402 1402 if ctx.p2copies():
1403 1403 allcopies = ctx.p1copies().copy()
1404 1404 # There should be no overlap
1405 1405 allcopies.update(ctx.p2copies())
1406 1406 return sorted(allcopies.items())
1407 1407 else:
1408 1408 return sorted(ctx.p1copies().items())
1409 1409
1410 1410 else:
1411 1411 getrenamed = getrenamedfn(repo, endrev)
1412 1412
1413 1413 def copiesfn(ctx):
1414 1414 copies = []
1415 1415 for fn in ctx.files():
1416 1416 rename = getrenamed(fn, ctx.rev())
1417 1417 if rename:
1418 1418 copies.append((fn, rename))
1419 1419 return copies
1420 1420
1421 1421 return copiesfn
1422 1422
1423 1423
1424 1424 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1425 1425 """Update the dirstate to reflect the intent of copying src to dst. For
1426 1426 different reasons it might not end with dst being marked as copied from src.
1427 1427 """
1428 1428 origsrc = repo.dirstate.copied(src) or src
1429 1429 if dst == origsrc: # copying back a copy?
1430 1430 if repo.dirstate[dst] not in b'mn' and not dryrun:
1431 1431 repo.dirstate.normallookup(dst)
1432 1432 else:
1433 1433 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1434 1434 if not ui.quiet:
1435 1435 ui.warn(
1436 1436 _(
1437 1437 b"%s has not been committed yet, so no copy "
1438 1438 b"data will be stored for %s.\n"
1439 1439 )
1440 1440 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1441 1441 )
1442 1442 if repo.dirstate[dst] in b'?r' and not dryrun:
1443 1443 wctx.add([dst])
1444 1444 elif not dryrun:
1445 1445 wctx.copy(origsrc, dst)
1446 1446
1447 1447
1448 1448 def movedirstate(repo, newctx, match=None):
1449 1449 """Move the dirstate to newctx and adjust it as necessary.
1450 1450
1451 1451 A matcher can be provided as an optimization. It is probably a bug to pass
1452 1452 a matcher that doesn't match all the differences between the parent of the
1453 1453 working copy and newctx.
1454 1454 """
1455 1455 oldctx = repo[b'.']
1456 1456 ds = repo.dirstate
1457 1457 ds.setparents(newctx.node(), nullid)
1458 1458 copies = dict(ds.copies())
1459 1459 s = newctx.status(oldctx, match=match)
1460 1460 for f in s.modified:
1461 1461 if ds[f] == b'r':
1462 1462 # modified + removed -> removed
1463 1463 continue
1464 1464 ds.normallookup(f)
1465 1465
1466 1466 for f in s.added:
1467 1467 if ds[f] == b'r':
1468 1468 # added + removed -> unknown
1469 1469 ds.drop(f)
1470 1470 elif ds[f] != b'a':
1471 1471 ds.add(f)
1472 1472
1473 1473 for f in s.removed:
1474 1474 if ds[f] == b'a':
1475 1475 # removed + added -> normal
1476 1476 ds.normallookup(f)
1477 1477 elif ds[f] != b'r':
1478 1478 ds.remove(f)
1479 1479
1480 1480 # Merge old parent and old working dir copies
1481 1481 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1482 1482 oldcopies.update(copies)
1483 1483 copies = dict(
1484 1484 (dst, oldcopies.get(src, src))
1485 1485 for dst, src in pycompat.iteritems(oldcopies)
1486 1486 )
1487 1487 # Adjust the dirstate copies
1488 1488 for dst, src in pycompat.iteritems(copies):
1489 1489 if src not in newctx or dst in newctx or ds[dst] != b'a':
1490 1490 src = None
1491 1491 ds.copy(src, dst)
1492 1492
1493 1493
1494 1494 def writerequires(opener, requirements):
1495 1495 with opener(b'requires', b'w', atomictemp=True) as fp:
1496 1496 for r in sorted(requirements):
1497 1497 fp.write(b"%s\n" % r)
1498 1498
1499 1499
1500 1500 class filecachesubentry(object):
1501 1501 def __init__(self, path, stat):
1502 1502 self.path = path
1503 1503 self.cachestat = None
1504 1504 self._cacheable = None
1505 1505
1506 1506 if stat:
1507 1507 self.cachestat = filecachesubentry.stat(self.path)
1508 1508
1509 1509 if self.cachestat:
1510 1510 self._cacheable = self.cachestat.cacheable()
1511 1511 else:
1512 1512 # None means we don't know yet
1513 1513 self._cacheable = None
1514 1514
1515 1515 def refresh(self):
1516 1516 if self.cacheable():
1517 1517 self.cachestat = filecachesubentry.stat(self.path)
1518 1518
1519 1519 def cacheable(self):
1520 1520 if self._cacheable is not None:
1521 1521 return self._cacheable
1522 1522
1523 1523 # we don't know yet, assume it is for now
1524 1524 return True
1525 1525
1526 1526 def changed(self):
1527 1527 # no point in going further if we can't cache it
1528 1528 if not self.cacheable():
1529 1529 return True
1530 1530
1531 1531 newstat = filecachesubentry.stat(self.path)
1532 1532
1533 1533 # we may not know if it's cacheable yet, check again now
1534 1534 if newstat and self._cacheable is None:
1535 1535 self._cacheable = newstat.cacheable()
1536 1536
1537 1537 # check again
1538 1538 if not self._cacheable:
1539 1539 return True
1540 1540
1541 1541 if self.cachestat != newstat:
1542 1542 self.cachestat = newstat
1543 1543 return True
1544 1544 else:
1545 1545 return False
1546 1546
1547 1547 @staticmethod
1548 1548 def stat(path):
1549 1549 try:
1550 1550 return util.cachestat(path)
1551 1551 except OSError as e:
1552 1552 if e.errno != errno.ENOENT:
1553 1553 raise
1554 1554
1555 1555
1556 1556 class filecacheentry(object):
1557 1557 def __init__(self, paths, stat=True):
1558 1558 self._entries = []
1559 1559 for path in paths:
1560 1560 self._entries.append(filecachesubentry(path, stat))
1561 1561
1562 1562 def changed(self):
1563 1563 '''true if any entry has changed'''
1564 1564 for entry in self._entries:
1565 1565 if entry.changed():
1566 1566 return True
1567 1567 return False
1568 1568
1569 1569 def refresh(self):
1570 1570 for entry in self._entries:
1571 1571 entry.refresh()
1572 1572
1573 1573
1574 1574 class filecache(object):
1575 1575 """A property like decorator that tracks files under .hg/ for updates.
1576 1576
1577 1577 On first access, the files defined as arguments are stat()ed and the
1578 1578 results cached. The decorated function is called. The results are stashed
1579 1579 away in a ``_filecache`` dict on the object whose method is decorated.
1580 1580
1581 1581 On subsequent access, the cached result is used as it is set to the
1582 1582 instance dictionary.
1583 1583
1584 1584 On external property set/delete operations, the caller must update the
1585 1585 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1586 1586 instead of directly setting <attr>.
1587 1587
1588 1588 When using the property API, the cached data is always used if available.
1589 1589 No stat() is performed to check if the file has changed.
1590 1590
1591 1591 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1592 1592 can populate an entry before the property's getter is called. In this case,
1593 1593 entries in ``_filecache`` will be used during property operations,
1594 1594 if available. If the underlying file changes, it is up to external callers
1595 1595 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1596 1596 method result as well as possibly calling ``del obj._filecache[attr]`` to
1597 1597 remove the ``filecacheentry``.
1598 1598 """
1599 1599
1600 1600 def __init__(self, *paths):
1601 1601 self.paths = paths
1602 1602
1603 1603 def join(self, obj, fname):
1604 1604 """Used to compute the runtime path of a cached file.
1605 1605
1606 1606 Users should subclass filecache and provide their own version of this
1607 1607 function to call the appropriate join function on 'obj' (an instance
1608 1608 of the class that its member function was decorated).
1609 1609 """
1610 1610 raise NotImplementedError
1611 1611
1612 1612 def __call__(self, func):
1613 1613 self.func = func
1614 1614 self.sname = func.__name__
1615 1615 self.name = pycompat.sysbytes(self.sname)
1616 1616 return self
1617 1617
1618 1618 def __get__(self, obj, type=None):
1619 1619 # if accessed on the class, return the descriptor itself.
1620 1620 if obj is None:
1621 1621 return self
1622 1622
1623 1623 assert self.sname not in obj.__dict__
1624 1624
1625 1625 entry = obj._filecache.get(self.name)
1626 1626
1627 1627 if entry:
1628 1628 if entry.changed():
1629 1629 entry.obj = self.func(obj)
1630 1630 else:
1631 1631 paths = [self.join(obj, path) for path in self.paths]
1632 1632
1633 1633 # We stat -before- creating the object so our cache doesn't lie if
1634 1634 # a writer modified between the time we read and stat
1635 1635 entry = filecacheentry(paths, True)
1636 1636 entry.obj = self.func(obj)
1637 1637
1638 1638 obj._filecache[self.name] = entry
1639 1639
1640 1640 obj.__dict__[self.sname] = entry.obj
1641 1641 return entry.obj
1642 1642
1643 1643 # don't implement __set__(), which would make __dict__ lookup as slow as
1644 1644 # function call.
1645 1645
1646 1646 def set(self, obj, value):
1647 1647 if self.name not in obj._filecache:
1648 1648 # we add an entry for the missing value because X in __dict__
1649 1649 # implies X in _filecache
1650 1650 paths = [self.join(obj, path) for path in self.paths]
1651 1651 ce = filecacheentry(paths, False)
1652 1652 obj._filecache[self.name] = ce
1653 1653 else:
1654 1654 ce = obj._filecache[self.name]
1655 1655
1656 1656 ce.obj = value # update cached copy
1657 1657 obj.__dict__[self.sname] = value # update copy returned by obj.x
1658 1658
1659 1659
1660 1660 def extdatasource(repo, source):
1661 1661 """Gather a map of rev -> value dict from the specified source
1662 1662
1663 1663 A source spec is treated as a URL, with a special case shell: type
1664 1664 for parsing the output from a shell command.
1665 1665
1666 1666 The data is parsed as a series of newline-separated records where
1667 1667 each record is a revision specifier optionally followed by a space
1668 1668 and a freeform string value. If the revision is known locally, it
1669 1669 is converted to a rev, otherwise the record is skipped.
1670 1670
1671 1671 Note that both key and value are treated as UTF-8 and converted to
1672 1672 the local encoding. This allows uniformity between local and
1673 1673 remote data sources.
1674 1674 """
1675 1675
1676 1676 spec = repo.ui.config(b"extdata", source)
1677 1677 if not spec:
1678 1678 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1679 1679
1680 1680 data = {}
1681 1681 src = proc = None
1682 1682 try:
1683 1683 if spec.startswith(b"shell:"):
1684 1684 # external commands should be run relative to the repo root
1685 1685 cmd = spec[6:]
1686 1686 proc = subprocess.Popen(
1687 1687 procutil.tonativestr(cmd),
1688 1688 shell=True,
1689 1689 bufsize=-1,
1690 1690 close_fds=procutil.closefds,
1691 1691 stdout=subprocess.PIPE,
1692 1692 cwd=procutil.tonativestr(repo.root),
1693 1693 )
1694 1694 src = proc.stdout
1695 1695 else:
1696 1696 # treat as a URL or file
1697 1697 src = url.open(repo.ui, spec)
1698 1698 for l in src:
1699 1699 if b" " in l:
1700 1700 k, v = l.strip().split(b" ", 1)
1701 1701 else:
1702 1702 k, v = l.strip(), b""
1703 1703
1704 1704 k = encoding.tolocal(k)
1705 1705 try:
1706 1706 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1707 1707 except (error.LookupError, error.RepoLookupError):
1708 1708 pass # we ignore data for nodes that don't exist locally
1709 1709 finally:
1710 1710 if proc:
1711 1711 try:
1712 1712 proc.communicate()
1713 1713 except ValueError:
1714 1714 # This happens if we started iterating src and then
1715 1715 # get a parse error on a line. It should be safe to ignore.
1716 1716 pass
1717 1717 if src:
1718 1718 src.close()
1719 1719 if proc and proc.returncode != 0:
1720 1720 raise error.Abort(
1721 1721 _(b"extdata command '%s' failed: %s")
1722 1722 % (cmd, procutil.explainexit(proc.returncode))
1723 1723 )
1724 1724
1725 1725 return data
1726 1726
1727 1727
1728 1728 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1729 1729 if lock is None:
1730 1730 raise error.LockInheritanceContractViolation(
1731 1731 b'lock can only be inherited while held'
1732 1732 )
1733 1733 if environ is None:
1734 1734 environ = {}
1735 1735 with lock.inherit() as locker:
1736 1736 environ[envvar] = locker
1737 1737 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1738 1738
1739 1739
1740 1740 def wlocksub(repo, cmd, *args, **kwargs):
1741 1741 """run cmd as a subprocess that allows inheriting repo's wlock
1742 1742
1743 1743 This can only be called while the wlock is held. This takes all the
1744 1744 arguments that ui.system does, and returns the exit code of the
1745 1745 subprocess."""
1746 1746 return _locksub(
1747 1747 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1748 1748 )
1749 1749
1750 1750
1751 1751 class progress(object):
1752 1752 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1753 1753 self.ui = ui
1754 1754 self.pos = 0
1755 1755 self.topic = topic
1756 1756 self.unit = unit
1757 1757 self.total = total
1758 1758 self.debug = ui.configbool(b'progress', b'debug')
1759 1759 self._updatebar = updatebar
1760 1760
1761 1761 def __enter__(self):
1762 1762 return self
1763 1763
1764 1764 def __exit__(self, exc_type, exc_value, exc_tb):
1765 1765 self.complete()
1766 1766
1767 1767 def update(self, pos, item=b"", total=None):
1768 1768 assert pos is not None
1769 1769 if total:
1770 1770 self.total = total
1771 1771 self.pos = pos
1772 1772 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1773 1773 if self.debug:
1774 1774 self._printdebug(item)
1775 1775
1776 1776 def increment(self, step=1, item=b"", total=None):
1777 1777 self.update(self.pos + step, item, total)
1778 1778
1779 1779 def complete(self):
1780 1780 self.pos = None
1781 1781 self.unit = b""
1782 1782 self.total = None
1783 1783 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1784 1784
1785 1785 def _printdebug(self, item):
1786 1786 if self.unit:
1787 1787 unit = b' ' + self.unit
1788 1788 if item:
1789 1789 item = b' ' + item
1790 1790
1791 1791 if self.total:
1792 1792 pct = 100.0 * self.pos / self.total
1793 1793 self.ui.debug(
1794 1794 b'%s:%s %d/%d%s (%4.2f%%)\n'
1795 1795 % (self.topic, item, self.pos, self.total, unit, pct)
1796 1796 )
1797 1797 else:
1798 1798 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1799 1799
1800 1800
1801 1801 def gdinitconfig(ui):
1802 1802 """helper function to know if a repo should be created as general delta
1803 1803 """
1804 1804 # experimental config: format.generaldelta
1805 1805 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1806 1806 b'format', b'usegeneraldelta'
1807 1807 )
1808 1808
1809 1809
1810 1810 def gddeltaconfig(ui):
1811 1811 """helper function to know if incoming delta should be optimised
1812 1812 """
1813 1813 # experimental config: format.generaldelta
1814 1814 return ui.configbool(b'format', b'generaldelta')
1815 1815
1816 1816
1817 1817 class simplekeyvaluefile(object):
1818 1818 """A simple file with key=value lines
1819 1819
1820 1820 Keys must be alphanumerics and start with a letter, values must not
1821 1821 contain '\n' characters"""
1822 1822
1823 1823 firstlinekey = b'__firstline'
1824 1824
1825 1825 def __init__(self, vfs, path, keys=None):
1826 1826 self.vfs = vfs
1827 1827 self.path = path
1828 1828
1829 1829 def read(self, firstlinenonkeyval=False):
1830 1830 """Read the contents of a simple key-value file
1831 1831
1832 1832 'firstlinenonkeyval' indicates whether the first line of file should
1833 1833 be treated as a key-value pair or reuturned fully under the
1834 1834 __firstline key."""
1835 1835 lines = self.vfs.readlines(self.path)
1836 1836 d = {}
1837 1837 if firstlinenonkeyval:
1838 1838 if not lines:
1839 1839 e = _(b"empty simplekeyvalue file")
1840 1840 raise error.CorruptedState(e)
1841 1841 # we don't want to include '\n' in the __firstline
1842 1842 d[self.firstlinekey] = lines[0][:-1]
1843 1843 del lines[0]
1844 1844
1845 1845 try:
1846 1846 # the 'if line.strip()' part prevents us from failing on empty
1847 1847 # lines which only contain '\n' therefore are not skipped
1848 1848 # by 'if line'
1849 1849 updatedict = dict(
1850 1850 line[:-1].split(b'=', 1) for line in lines if line.strip()
1851 1851 )
1852 1852 if self.firstlinekey in updatedict:
1853 1853 e = _(b"%r can't be used as a key")
1854 1854 raise error.CorruptedState(e % self.firstlinekey)
1855 1855 d.update(updatedict)
1856 1856 except ValueError as e:
1857 1857 raise error.CorruptedState(stringutil.forcebytestr(e))
1858 1858 return d
1859 1859
1860 1860 def write(self, data, firstline=None):
1861 1861 """Write key=>value mapping to a file
1862 1862 data is a dict. Keys must be alphanumerical and start with a letter.
1863 1863 Values must not contain newline characters.
1864 1864
1865 1865 If 'firstline' is not None, it is written to file before
1866 1866 everything else, as it is, not in a key=value form"""
1867 1867 lines = []
1868 1868 if firstline is not None:
1869 1869 lines.append(b'%s\n' % firstline)
1870 1870
1871 1871 for k, v in data.items():
1872 1872 if k == self.firstlinekey:
1873 1873 e = b"key name '%s' is reserved" % self.firstlinekey
1874 1874 raise error.ProgrammingError(e)
1875 1875 if not k[0:1].isalpha():
1876 1876 e = b"keys must start with a letter in a key-value file"
1877 1877 raise error.ProgrammingError(e)
1878 1878 if not k.isalnum():
1879 1879 e = b"invalid key name in a simple key-value file"
1880 1880 raise error.ProgrammingError(e)
1881 1881 if b'\n' in v:
1882 1882 e = b"invalid value in a simple key-value file"
1883 1883 raise error.ProgrammingError(e)
1884 1884 lines.append(b"%s=%s\n" % (k, v))
1885 1885 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1886 1886 fp.write(b''.join(lines))
1887 1887
1888 1888
1889 1889 _reportobsoletedsource = [
1890 1890 b'debugobsolete',
1891 1891 b'pull',
1892 1892 b'push',
1893 1893 b'serve',
1894 1894 b'unbundle',
1895 1895 ]
1896 1896
1897 1897 _reportnewcssource = [
1898 1898 b'pull',
1899 1899 b'unbundle',
1900 1900 ]
1901 1901
1902 1902
1903 1903 def prefetchfiles(repo, revs, match):
1904 1904 """Invokes the registered file prefetch functions, allowing extensions to
1905 1905 ensure the corresponding files are available locally, before the command
1906 1906 uses them."""
1907 1907 if match:
1908 1908 # The command itself will complain about files that don't exist, so
1909 1909 # don't duplicate the message.
1910 1910 match = matchmod.badmatch(match, lambda fn, msg: None)
1911 1911 else:
1912 1912 match = matchall(repo)
1913 1913
1914 1914 fileprefetchhooks(repo, revs, match)
1915 1915
1916 1916
1917 1917 # a list of (repo, revs, match) prefetch functions
1918 1918 fileprefetchhooks = util.hooks()
1919 1919
1920 1920 # A marker that tells the evolve extension to suppress its own reporting
1921 1921 _reportstroubledchangesets = True
1922 1922
1923 1923
1924 1924 def registersummarycallback(repo, otr, txnname=b''):
1925 1925 """register a callback to issue a summary after the transaction is closed
1926 1926 """
1927 1927
1928 1928 def txmatch(sources):
1929 1929 return any(txnname.startswith(source) for source in sources)
1930 1930
1931 1931 categories = []
1932 1932
1933 1933 def reportsummary(func):
1934 1934 """decorator for report callbacks."""
1935 1935 # The repoview life cycle is shorter than the one of the actual
1936 1936 # underlying repository. So the filtered object can die before the
1937 1937 # weakref is used leading to troubles. We keep a reference to the
1938 1938 # unfiltered object and restore the filtering when retrieving the
1939 1939 # repository through the weakref.
1940 1940 filtername = repo.filtername
1941 1941 reporef = weakref.ref(repo.unfiltered())
1942 1942
1943 1943 def wrapped(tr):
1944 1944 repo = reporef()
1945 1945 if filtername:
1946 1946 repo = repo.filtered(filtername)
1947 1947 func(repo, tr)
1948 1948
1949 1949 newcat = b'%02i-txnreport' % len(categories)
1950 1950 otr.addpostclose(newcat, wrapped)
1951 1951 categories.append(newcat)
1952 1952 return wrapped
1953 1953
1954 1954 @reportsummary
1955 1955 def reportchangegroup(repo, tr):
1956 1956 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1957 1957 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1958 1958 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1959 1959 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1960 1960 if cgchangesets or cgrevisions or cgfiles:
1961 1961 htext = b""
1962 1962 if cgheads:
1963 1963 htext = _(b" (%+d heads)") % cgheads
1964 1964 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1965 1965 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1966 1966
1967 1967 if txmatch(_reportobsoletedsource):
1968 1968
1969 1969 @reportsummary
1970 1970 def reportobsoleted(repo, tr):
1971 1971 obsoleted = obsutil.getobsoleted(repo, tr)
1972 1972 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1973 1973 if newmarkers:
1974 1974 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1975 1975 if obsoleted:
1976 1976 repo.ui.status(_(b'obsoleted %i changesets\n') % len(obsoleted))
1977 1977
1978 1978 if obsolete.isenabled(
1979 1979 repo, obsolete.createmarkersopt
1980 1980 ) and repo.ui.configbool(
1981 1981 b'experimental', b'evolution.report-instabilities'
1982 1982 ):
1983 1983 instabilitytypes = [
1984 1984 (b'orphan', b'orphan'),
1985 1985 (b'phase-divergent', b'phasedivergent'),
1986 1986 (b'content-divergent', b'contentdivergent'),
1987 1987 ]
1988 1988
1989 1989 def getinstabilitycounts(repo):
1990 1990 filtered = repo.changelog.filteredrevs
1991 1991 counts = {}
1992 1992 for instability, revset in instabilitytypes:
1993 1993 counts[instability] = len(
1994 1994 set(obsolete.getrevs(repo, revset)) - filtered
1995 1995 )
1996 1996 return counts
1997 1997
1998 1998 oldinstabilitycounts = getinstabilitycounts(repo)
1999 1999
2000 2000 @reportsummary
2001 2001 def reportnewinstabilities(repo, tr):
2002 2002 newinstabilitycounts = getinstabilitycounts(repo)
2003 2003 for instability, revset in instabilitytypes:
2004 2004 delta = (
2005 2005 newinstabilitycounts[instability]
2006 2006 - oldinstabilitycounts[instability]
2007 2007 )
2008 2008 msg = getinstabilitymessage(delta, instability)
2009 2009 if msg:
2010 2010 repo.ui.warn(msg)
2011 2011
2012 2012 if txmatch(_reportnewcssource):
2013 2013
2014 2014 @reportsummary
2015 2015 def reportnewcs(repo, tr):
2016 2016 """Report the range of new revisions pulled/unbundled."""
2017 2017 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2018 2018 unfi = repo.unfiltered()
2019 2019 if origrepolen >= len(unfi):
2020 2020 return
2021 2021
2022 2022 # Compute the bounds of new visible revisions' range.
2023 2023 revs = smartset.spanset(repo, start=origrepolen)
2024 2024 if revs:
2025 2025 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2026 2026
2027 2027 if minrev == maxrev:
2028 2028 revrange = minrev
2029 2029 else:
2030 2030 revrange = b'%s:%s' % (minrev, maxrev)
2031 2031 draft = len(repo.revs(b'%ld and draft()', revs))
2032 2032 secret = len(repo.revs(b'%ld and secret()', revs))
2033 2033 if not (draft or secret):
2034 2034 msg = _(b'new changesets %s\n') % revrange
2035 2035 elif draft and secret:
2036 2036 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2037 2037 msg %= (revrange, draft, secret)
2038 2038 elif draft:
2039 2039 msg = _(b'new changesets %s (%d drafts)\n')
2040 2040 msg %= (revrange, draft)
2041 2041 elif secret:
2042 2042 msg = _(b'new changesets %s (%d secrets)\n')
2043 2043 msg %= (revrange, secret)
2044 2044 else:
2045 2045 errormsg = b'entered unreachable condition'
2046 2046 raise error.ProgrammingError(errormsg)
2047 2047 repo.ui.status(msg)
2048 2048
2049 2049 # search new changesets directly pulled as obsolete
2050 2050 duplicates = tr.changes.get(b'revduplicates', ())
2051 2051 obsadded = unfi.revs(
2052 2052 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2053 2053 )
2054 2054 cl = repo.changelog
2055 2055 extinctadded = [r for r in obsadded if r not in cl]
2056 2056 if extinctadded:
2057 2057 # They are not just obsolete, but obsolete and invisible
2058 2058 # we call them "extinct" internally but the terms have not been
2059 2059 # exposed to users.
2060 2060 msg = b'(%d other changesets obsolete on arrival)\n'
2061 2061 repo.ui.status(msg % len(extinctadded))
2062 2062
2063 2063 @reportsummary
2064 2064 def reportphasechanges(repo, tr):
2065 2065 """Report statistics of phase changes for changesets pre-existing
2066 2066 pull/unbundle.
2067 2067 """
2068 2068 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2069 2069 phasetracking = tr.changes.get(b'phases', {})
2070 2070 if not phasetracking:
2071 2071 return
2072 2072 published = [
2073 2073 rev
2074 2074 for rev, (old, new) in pycompat.iteritems(phasetracking)
2075 2075 if new == phases.public and rev < origrepolen
2076 2076 ]
2077 2077 if not published:
2078 2078 return
2079 2079 repo.ui.status(
2080 2080 _(b'%d local changesets published\n') % len(published)
2081 2081 )
2082 2082
2083 2083
2084 2084 def getinstabilitymessage(delta, instability):
2085 2085 """function to return the message to show warning about new instabilities
2086 2086
2087 2087 exists as a separate function so that extension can wrap to show more
2088 2088 information like how to fix instabilities"""
2089 2089 if delta > 0:
2090 2090 return _(b'%i new %s changesets\n') % (delta, instability)
2091 2091
2092 2092
2093 2093 def nodesummaries(repo, nodes, maxnumnodes=4):
2094 2094 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2095 2095 return b' '.join(short(h) for h in nodes)
2096 2096 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2097 2097 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2098 2098
2099 2099
2100 2100 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2101 2101 """check that no named branch has multiple heads"""
2102 2102 if desc in (b'strip', b'repair'):
2103 2103 # skip the logic during strip
2104 2104 return
2105 2105 visible = repo.filtered(b'visible')
2106 2106 # possible improvement: we could restrict the check to affected branch
2107 2107 bm = visible.branchmap()
2108 2108 for name in bm:
2109 2109 heads = bm.branchheads(name, closed=accountclosed)
2110 2110 if len(heads) > 1:
2111 2111 msg = _(b'rejecting multiple heads on branch "%s"')
2112 2112 msg %= name
2113 2113 hint = _(b'%d heads: %s')
2114 2114 hint %= (len(heads), nodesummaries(repo, heads))
2115 2115 raise error.Abort(msg, hint=hint)
2116 2116
2117 2117
2118 2118 def wrapconvertsink(sink):
2119 2119 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2120 2120 before it is used, whether or not the convert extension was formally loaded.
2121 2121 """
2122 2122 return sink
2123 2123
2124 2124
2125 2125 def unhidehashlikerevs(repo, specs, hiddentype):
2126 2126 """parse the user specs and unhide changesets whose hash or revision number
2127 2127 is passed.
2128 2128
2129 2129 hiddentype can be: 1) 'warn': warn while unhiding changesets
2130 2130 2) 'nowarn': don't warn while unhiding changesets
2131 2131
2132 2132 returns a repo object with the required changesets unhidden
2133 2133 """
2134 2134 if not repo.filtername or not repo.ui.configbool(
2135 2135 b'experimental', b'directaccess'
2136 2136 ):
2137 2137 return repo
2138 2138
2139 2139 if repo.filtername not in (b'visible', b'visible-hidden'):
2140 2140 return repo
2141 2141
2142 2142 symbols = set()
2143 2143 for spec in specs:
2144 2144 try:
2145 2145 tree = revsetlang.parse(spec)
2146 2146 except error.ParseError: # will be reported by scmutil.revrange()
2147 2147 continue
2148 2148
2149 2149 symbols.update(revsetlang.gethashlikesymbols(tree))
2150 2150
2151 2151 if not symbols:
2152 2152 return repo
2153 2153
2154 2154 revs = _getrevsfromsymbols(repo, symbols)
2155 2155
2156 2156 if not revs:
2157 2157 return repo
2158 2158
2159 2159 if hiddentype == b'warn':
2160 2160 unfi = repo.unfiltered()
2161 2161 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2162 2162 repo.ui.warn(
2163 2163 _(
2164 2164 b"warning: accessing hidden changesets for write "
2165 2165 b"operation: %s\n"
2166 2166 )
2167 2167 % revstr
2168 2168 )
2169 2169
2170 2170 # we have to use new filtername to separate branch/tags cache until we can
2171 2171 # disbale these cache when revisions are dynamically pinned.
2172 2172 return repo.filtered(b'visible-hidden', revs)
2173 2173
2174 2174
2175 2175 def _getrevsfromsymbols(repo, symbols):
2176 2176 """parse the list of symbols and returns a set of revision numbers of hidden
2177 2177 changesets present in symbols"""
2178 2178 revs = set()
2179 2179 unfi = repo.unfiltered()
2180 2180 unficl = unfi.changelog
2181 2181 cl = repo.changelog
2182 2182 tiprev = len(unficl)
2183 2183 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2184 2184 for s in symbols:
2185 2185 try:
2186 2186 n = int(s)
2187 2187 if n <= tiprev:
2188 2188 if not allowrevnums:
2189 2189 continue
2190 2190 else:
2191 2191 if n not in cl:
2192 2192 revs.add(n)
2193 2193 continue
2194 2194 except ValueError:
2195 2195 pass
2196 2196
2197 2197 try:
2198 2198 s = resolvehexnodeidprefix(unfi, s)
2199 2199 except (error.LookupError, error.WdirUnsupported):
2200 2200 s = None
2201 2201
2202 2202 if s is not None:
2203 2203 rev = unficl.rev(s)
2204 2204 if rev not in cl:
2205 2205 revs.add(rev)
2206 2206
2207 2207 return revs
2208 2208
2209 2209
2210 2210 def bookmarkrevs(repo, mark):
2211 2211 """
2212 2212 Select revisions reachable by a given bookmark
2213 2213 """
2214 2214 return repo.revs(
2215 2215 b"ancestors(bookmark(%s)) - "
2216 2216 b"ancestors(head() and not bookmark(%s)) - "
2217 2217 b"ancestors(bookmark() and not bookmark(%s))",
2218 2218 mark,
2219 2219 mark,
2220 2220 mark,
2221 2221 )
@@ -1,1301 +1,1307 b''
1 1 #testcases sshv1 sshv2
2 2
3 3 #if sshv2
4 4 $ cat >> $HGRCPATH << EOF
5 5 > [experimental]
6 6 > sshpeer.advertise-v2 = true
7 7 > sshserver.support-v2 = true
8 8 > EOF
9 9 #endif
10 10
11 11 Prepare repo a:
12 12
13 13 $ hg init a
14 14 $ cd a
15 15 $ echo a > a
16 16 $ hg add a
17 17 $ hg commit -m test
18 18 $ echo first line > b
19 19 $ hg add b
20 20
21 21 Create a non-inlined filelog:
22 22
23 23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
24 24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
25 25 > cat data1 >> b
26 26 > hg commit -m test
27 27 > done
28 28
29 29 List files in store/data (should show a 'b.d'):
30 30
31 31 #if reporevlogstore
32 32 $ for i in .hg/store/data/*; do
33 33 > echo $i
34 34 > done
35 35 .hg/store/data/a.i
36 36 .hg/store/data/b.d
37 37 .hg/store/data/b.i
38 38 #endif
39 39
40 40 Trigger branchcache creation:
41 41
42 42 $ hg branches
43 43 default 10:a7949464abda
44 44 $ ls .hg/cache
45 45 branch2-served
46 46 rbc-names-v1
47 47 rbc-revs-v1
48 48
49 49 Default operation:
50 50
51 51 $ hg clone . ../b
52 52 updating to branch default
53 53 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 54 $ cd ../b
55 55
56 56 Ensure branchcache got copied over:
57 57
58 58 $ ls .hg/cache
59 59 branch2-served
60 60 rbc-names-v1
61 61 rbc-revs-v1
62 62
63 63 $ cat a
64 64 a
65 65 $ hg verify
66 66 checking changesets
67 67 checking manifests
68 68 crosschecking files in changesets and manifests
69 69 checking files
70 70 checked 11 changesets with 11 changes to 2 files
71 71
72 72 Invalid dest '' must abort:
73 73
74 74 $ hg clone . ''
75 75 abort: empty destination path is not valid
76 76 [255]
77 77
78 78 No update, with debug option:
79 79
80 80 #if hardlink
81 81 $ hg --debug clone -U . ../c --config progress.debug=true
82 82 linking: 1 files
83 83 linking: 2 files
84 84 linking: 3 files
85 85 linking: 4 files
86 86 linking: 5 files
87 87 linking: 6 files
88 88 linking: 7 files
89 89 linking: 8 files
90 90 linked 8 files (reporevlogstore !)
91 91 linking: 9 files (reposimplestore !)
92 92 linking: 10 files (reposimplestore !)
93 93 linking: 11 files (reposimplestore !)
94 94 linking: 12 files (reposimplestore !)
95 95 linking: 13 files (reposimplestore !)
96 96 linking: 14 files (reposimplestore !)
97 97 linking: 15 files (reposimplestore !)
98 98 linking: 16 files (reposimplestore !)
99 99 linking: 17 files (reposimplestore !)
100 100 linking: 18 files (reposimplestore !)
101 101 linked 18 files (reposimplestore !)
102 102 #else
103 103 $ hg --debug clone -U . ../c --config progress.debug=true
104 104 linking: 1 files
105 105 copying: 2 files
106 106 copying: 3 files
107 107 copying: 4 files
108 108 copying: 5 files
109 109 copying: 6 files
110 110 copying: 7 files
111 111 copying: 8 files
112 112 copied 8 files (reporevlogstore !)
113 113 copying: 9 files (reposimplestore !)
114 114 copying: 10 files (reposimplestore !)
115 115 copying: 11 files (reposimplestore !)
116 116 copying: 12 files (reposimplestore !)
117 117 copying: 13 files (reposimplestore !)
118 118 copying: 14 files (reposimplestore !)
119 119 copying: 15 files (reposimplestore !)
120 120 copying: 16 files (reposimplestore !)
121 121 copying: 17 files (reposimplestore !)
122 122 copying: 18 files (reposimplestore !)
123 123 copied 18 files (reposimplestore !)
124 124 #endif
125 125 $ cd ../c
126 126
127 127 Ensure branchcache got copied over:
128 128
129 129 $ ls .hg/cache
130 130 branch2-served
131 131 rbc-names-v1
132 132 rbc-revs-v1
133 133
134 134 $ cat a 2>/dev/null || echo "a not present"
135 135 a not present
136 136 $ hg verify
137 137 checking changesets
138 138 checking manifests
139 139 crosschecking files in changesets and manifests
140 140 checking files
141 141 checked 11 changesets with 11 changes to 2 files
142 142
143 143 Default destination:
144 144
145 145 $ mkdir ../d
146 146 $ cd ../d
147 147 $ hg clone ../a
148 148 destination directory: a
149 149 updating to branch default
150 150 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 151 $ cd a
152 152 $ hg cat a
153 153 a
154 154 $ cd ../..
155 155
156 156 Check that we drop the 'file:' from the path before writing the .hgrc:
157 157
158 158 $ hg clone file:a e
159 159 updating to branch default
160 160 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
161 161 $ grep 'file:' e/.hg/hgrc
162 162 [1]
163 163
164 164 Check that path aliases are expanded:
165 165
166 166 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
167 167 $ hg -R f showconfig paths.default
168 168 $TESTTMP/a#0
169 169
170 170 Use --pull:
171 171
172 172 $ hg clone --pull a g
173 173 requesting all changes
174 174 adding changesets
175 175 adding manifests
176 176 adding file changes
177 177 added 11 changesets with 11 changes to 2 files
178 178 new changesets acb14030fe0a:a7949464abda
179 179 updating to branch default
180 180 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
181 181 $ hg -R g verify
182 182 checking changesets
183 183 checking manifests
184 184 crosschecking files in changesets and manifests
185 185 checking files
186 186 checked 11 changesets with 11 changes to 2 files
187 187
188 188 Invalid dest '' with --pull must abort (issue2528):
189 189
190 190 $ hg clone --pull a ''
191 191 abort: empty destination path is not valid
192 192 [255]
193 193
194 194 Clone to '.':
195 195
196 196 $ mkdir h
197 197 $ cd h
198 198 $ hg clone ../a .
199 199 updating to branch default
200 200 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 201 $ cd ..
202 202
203 203
204 204 *** Tests for option -u ***
205 205
206 206 Adding some more history to repo a:
207 207
208 208 $ cd a
209 209 $ hg tag ref1
210 210 $ echo the quick brown fox >a
211 211 $ hg ci -m "hacked default"
212 212 $ hg up ref1
213 213 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
214 214 $ hg branch stable
215 215 marked working directory as branch stable
216 216 (branches are permanent and global, did you want a bookmark?)
217 217 $ echo some text >a
218 218 $ hg ci -m "starting branch stable"
219 219 $ hg tag ref2
220 220 $ echo some more text >a
221 221 $ hg ci -m "another change for branch stable"
222 222 $ hg up ref2
223 223 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
224 224 $ hg parents
225 225 changeset: 13:e8ece76546a6
226 226 branch: stable
227 227 tag: ref2
228 228 parent: 10:a7949464abda
229 229 user: test
230 230 date: Thu Jan 01 00:00:00 1970 +0000
231 231 summary: starting branch stable
232 232
233 233
234 234 Repo a has two heads:
235 235
236 236 $ hg heads
237 237 changeset: 15:0aae7cf88f0d
238 238 branch: stable
239 239 tag: tip
240 240 user: test
241 241 date: Thu Jan 01 00:00:00 1970 +0000
242 242 summary: another change for branch stable
243 243
244 244 changeset: 12:f21241060d6a
245 245 user: test
246 246 date: Thu Jan 01 00:00:00 1970 +0000
247 247 summary: hacked default
248 248
249 249
250 250 $ cd ..
251 251
252 252
253 253 Testing --noupdate with --updaterev (must abort):
254 254
255 255 $ hg clone --noupdate --updaterev 1 a ua
256 256 abort: cannot specify both --noupdate and --updaterev
257 257 [255]
258 258
259 259
260 260 Testing clone -u:
261 261
262 262 $ hg clone -u . a ua
263 263 updating to branch stable
264 264 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 265
266 266 Repo ua has both heads:
267 267
268 268 $ hg -R ua heads
269 269 changeset: 15:0aae7cf88f0d
270 270 branch: stable
271 271 tag: tip
272 272 user: test
273 273 date: Thu Jan 01 00:00:00 1970 +0000
274 274 summary: another change for branch stable
275 275
276 276 changeset: 12:f21241060d6a
277 277 user: test
278 278 date: Thu Jan 01 00:00:00 1970 +0000
279 279 summary: hacked default
280 280
281 281
282 282 Same revision checked out in repo a and ua:
283 283
284 284 $ hg -R a parents --template "{node|short}\n"
285 285 e8ece76546a6
286 286 $ hg -R ua parents --template "{node|short}\n"
287 287 e8ece76546a6
288 288
289 289 $ rm -r ua
290 290
291 291
292 292 Testing clone --pull -u:
293 293
294 294 $ hg clone --pull -u . a ua
295 295 requesting all changes
296 296 adding changesets
297 297 adding manifests
298 298 adding file changes
299 299 added 16 changesets with 16 changes to 3 files (+1 heads)
300 300 new changesets acb14030fe0a:0aae7cf88f0d
301 301 updating to branch stable
302 302 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
303 303
304 304 Repo ua has both heads:
305 305
306 306 $ hg -R ua heads
307 307 changeset: 15:0aae7cf88f0d
308 308 branch: stable
309 309 tag: tip
310 310 user: test
311 311 date: Thu Jan 01 00:00:00 1970 +0000
312 312 summary: another change for branch stable
313 313
314 314 changeset: 12:f21241060d6a
315 315 user: test
316 316 date: Thu Jan 01 00:00:00 1970 +0000
317 317 summary: hacked default
318 318
319 319
320 320 Same revision checked out in repo a and ua:
321 321
322 322 $ hg -R a parents --template "{node|short}\n"
323 323 e8ece76546a6
324 324 $ hg -R ua parents --template "{node|short}\n"
325 325 e8ece76546a6
326 326
327 327 $ rm -r ua
328 328
329 329
330 330 Testing clone -u <branch>:
331 331
332 332 $ hg clone -u stable a ua
333 333 updating to branch stable
334 334 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
335 335
336 336 Repo ua has both heads:
337 337
338 338 $ hg -R ua heads
339 339 changeset: 15:0aae7cf88f0d
340 340 branch: stable
341 341 tag: tip
342 342 user: test
343 343 date: Thu Jan 01 00:00:00 1970 +0000
344 344 summary: another change for branch stable
345 345
346 346 changeset: 12:f21241060d6a
347 347 user: test
348 348 date: Thu Jan 01 00:00:00 1970 +0000
349 349 summary: hacked default
350 350
351 351
352 352 Branch 'stable' is checked out:
353 353
354 354 $ hg -R ua parents
355 355 changeset: 15:0aae7cf88f0d
356 356 branch: stable
357 357 tag: tip
358 358 user: test
359 359 date: Thu Jan 01 00:00:00 1970 +0000
360 360 summary: another change for branch stable
361 361
362 362
363 363 $ rm -r ua
364 364
365 365
366 366 Testing default checkout:
367 367
368 368 $ hg clone a ua
369 369 updating to branch default
370 370 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
371 371
372 372 Repo ua has both heads:
373 373
374 374 $ hg -R ua heads
375 375 changeset: 15:0aae7cf88f0d
376 376 branch: stable
377 377 tag: tip
378 378 user: test
379 379 date: Thu Jan 01 00:00:00 1970 +0000
380 380 summary: another change for branch stable
381 381
382 382 changeset: 12:f21241060d6a
383 383 user: test
384 384 date: Thu Jan 01 00:00:00 1970 +0000
385 385 summary: hacked default
386 386
387 387
388 388 Branch 'default' is checked out:
389 389
390 390 $ hg -R ua parents
391 391 changeset: 12:f21241060d6a
392 392 user: test
393 393 date: Thu Jan 01 00:00:00 1970 +0000
394 394 summary: hacked default
395 395
396 396 Test clone with a branch named "@" (issue3677)
397 397
398 398 $ hg -R ua branch @
399 399 marked working directory as branch @
400 400 $ hg -R ua commit -m 'created branch @'
401 401 $ hg clone ua atbranch
402 402 updating to branch default
403 403 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
404 404 $ hg -R atbranch heads
405 405 changeset: 16:798b6d97153e
406 406 branch: @
407 407 tag: tip
408 408 parent: 12:f21241060d6a
409 409 user: test
410 410 date: Thu Jan 01 00:00:00 1970 +0000
411 411 summary: created branch @
412 412
413 413 changeset: 15:0aae7cf88f0d
414 414 branch: stable
415 415 user: test
416 416 date: Thu Jan 01 00:00:00 1970 +0000
417 417 summary: another change for branch stable
418 418
419 419 changeset: 12:f21241060d6a
420 420 user: test
421 421 date: Thu Jan 01 00:00:00 1970 +0000
422 422 summary: hacked default
423 423
424 424 $ hg -R atbranch parents
425 425 changeset: 12:f21241060d6a
426 426 user: test
427 427 date: Thu Jan 01 00:00:00 1970 +0000
428 428 summary: hacked default
429 429
430 430
431 431 $ rm -r ua atbranch
432 432
433 433
434 434 Testing #<branch>:
435 435
436 436 $ hg clone -u . a#stable ua
437 437 adding changesets
438 438 adding manifests
439 439 adding file changes
440 440 added 14 changesets with 14 changes to 3 files
441 441 new changesets acb14030fe0a:0aae7cf88f0d
442 442 updating to branch stable
443 443 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
444 444
445 445 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
446 446
447 447 $ hg -R ua heads
448 448 changeset: 13:0aae7cf88f0d
449 449 branch: stable
450 450 tag: tip
451 451 user: test
452 452 date: Thu Jan 01 00:00:00 1970 +0000
453 453 summary: another change for branch stable
454 454
455 455 changeset: 10:a7949464abda
456 456 user: test
457 457 date: Thu Jan 01 00:00:00 1970 +0000
458 458 summary: test
459 459
460 460
461 461 Same revision checked out in repo a and ua:
462 462
463 463 $ hg -R a parents --template "{node|short}\n"
464 464 e8ece76546a6
465 465 $ hg -R ua parents --template "{node|short}\n"
466 466 e8ece76546a6
467 467
468 468 $ rm -r ua
469 469
470 470
471 471 Testing -u -r <branch>:
472 472
473 473 $ hg clone -u . -r stable a ua
474 474 adding changesets
475 475 adding manifests
476 476 adding file changes
477 477 added 14 changesets with 14 changes to 3 files
478 478 new changesets acb14030fe0a:0aae7cf88f0d
479 479 updating to branch stable
480 480 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
481 481
482 482 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
483 483
484 484 $ hg -R ua heads
485 485 changeset: 13:0aae7cf88f0d
486 486 branch: stable
487 487 tag: tip
488 488 user: test
489 489 date: Thu Jan 01 00:00:00 1970 +0000
490 490 summary: another change for branch stable
491 491
492 492 changeset: 10:a7949464abda
493 493 user: test
494 494 date: Thu Jan 01 00:00:00 1970 +0000
495 495 summary: test
496 496
497 497
498 498 Same revision checked out in repo a and ua:
499 499
500 500 $ hg -R a parents --template "{node|short}\n"
501 501 e8ece76546a6
502 502 $ hg -R ua parents --template "{node|short}\n"
503 503 e8ece76546a6
504 504
505 505 $ rm -r ua
506 506
507 507
508 508 Testing -r <branch>:
509 509
510 510 $ hg clone -r stable a ua
511 511 adding changesets
512 512 adding manifests
513 513 adding file changes
514 514 added 14 changesets with 14 changes to 3 files
515 515 new changesets acb14030fe0a:0aae7cf88f0d
516 516 updating to branch stable
517 517 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
518 518
519 519 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
520 520
521 521 $ hg -R ua heads
522 522 changeset: 13:0aae7cf88f0d
523 523 branch: stable
524 524 tag: tip
525 525 user: test
526 526 date: Thu Jan 01 00:00:00 1970 +0000
527 527 summary: another change for branch stable
528 528
529 529 changeset: 10:a7949464abda
530 530 user: test
531 531 date: Thu Jan 01 00:00:00 1970 +0000
532 532 summary: test
533 533
534 534
535 535 Branch 'stable' is checked out:
536 536
537 537 $ hg -R ua parents
538 538 changeset: 13:0aae7cf88f0d
539 539 branch: stable
540 540 tag: tip
541 541 user: test
542 542 date: Thu Jan 01 00:00:00 1970 +0000
543 543 summary: another change for branch stable
544 544
545 545
546 546 $ rm -r ua
547 547
548 548
549 549 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
550 550 iterable in addbranchrevs()
551 551
552 552 $ cat <<EOF > simpleclone.py
553 553 > from mercurial import hg, ui as uimod
554 554 > myui = uimod.ui.load()
555 555 > repo = hg.repository(myui, b'a')
556 556 > hg.clone(myui, {}, repo, dest=b"ua")
557 557 > EOF
558 558
559 559 $ "$PYTHON" simpleclone.py
560 560 updating to branch default
561 561 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
562 562
563 563 $ rm -r ua
564 564
565 565 $ cat <<EOF > branchclone.py
566 566 > from mercurial import extensions, hg, ui as uimod
567 567 > myui = uimod.ui.load()
568 568 > extensions.loadall(myui)
569 569 > extensions.populateui(myui)
570 570 > repo = hg.repository(myui, b'a')
571 571 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable"])
572 572 > EOF
573 573
574 574 $ "$PYTHON" branchclone.py
575 575 adding changesets
576 576 adding manifests
577 577 adding file changes
578 578 added 14 changesets with 14 changes to 3 files
579 579 new changesets acb14030fe0a:0aae7cf88f0d
580 580 updating to branch stable
581 581 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
582 582 $ rm -r ua
583 583
584 584
585 585 Test clone with special '@' bookmark:
586 586 $ cd a
587 587 $ hg bookmark -r a7949464abda @ # branch point of stable from default
588 588 $ hg clone . ../i
589 589 updating to bookmark @
590 590 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
591 591 $ hg id -i ../i
592 592 a7949464abda
593 593 $ rm -r ../i
594 594
595 595 $ hg bookmark -f -r stable @
596 596 $ hg bookmarks
597 597 @ 15:0aae7cf88f0d
598 598 $ hg clone . ../i
599 599 updating to bookmark @ on branch stable
600 600 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
601 601 $ hg id -i ../i
602 602 0aae7cf88f0d
603 603 $ cd "$TESTTMP"
604 604
605 605
606 606 Testing failures:
607 607
608 608 $ mkdir fail
609 609 $ cd fail
610 610
611 611 No local source
612 612
613 613 $ hg clone a b
614 614 abort: repository a not found!
615 615 [255]
616 616
617 Invalid URL
618
619 $ hg clone http://invalid:url/a b
620 abort: error: nonnumeric port: 'url'
621 [255]
622
617 623 No remote source
618 624
619 625 #if windows
620 626 $ hg clone http://$LOCALIP:3121/a b
621 627 abort: error: * (glob)
622 628 [255]
623 629 #else
624 630 $ hg clone http://$LOCALIP:3121/a b
625 631 abort: error: *refused* (glob)
626 632 [255]
627 633 #endif
628 634 $ rm -rf b # work around bug with http clone
629 635
630 636
631 637 #if unix-permissions no-root
632 638
633 639 Inaccessible source
634 640
635 641 $ mkdir a
636 642 $ chmod 000 a
637 643 $ hg clone a b
638 644 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
639 645 [255]
640 646
641 647 Inaccessible destination
642 648
643 649 $ hg init b
644 650 $ cd b
645 651 $ hg clone . ../a
646 652 abort: Permission denied: *../a* (glob)
647 653 [255]
648 654 $ cd ..
649 655 $ chmod 700 a
650 656 $ rm -r a b
651 657
652 658 #endif
653 659
654 660
655 661 #if fifo
656 662
657 663 Source of wrong type
658 664
659 665 $ mkfifo a
660 666 $ hg clone a b
661 667 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
662 668 [255]
663 669 $ rm a
664 670
665 671 #endif
666 672
667 673 Default destination, same directory
668 674
669 675 $ hg init q
670 676 $ hg clone q
671 677 destination directory: q
672 678 abort: destination 'q' is not empty
673 679 [255]
674 680
675 681 destination directory not empty
676 682
677 683 $ mkdir a
678 684 $ echo stuff > a/a
679 685 $ hg clone q a
680 686 abort: destination 'a' is not empty
681 687 [255]
682 688
683 689
684 690 #if unix-permissions no-root
685 691
686 692 leave existing directory in place after clone failure
687 693
688 694 $ hg init c
689 695 $ cd c
690 696 $ echo c > c
691 697 $ hg commit -A -m test
692 698 adding c
693 699 $ chmod -rx .hg/store/data
694 700 $ cd ..
695 701 $ mkdir d
696 702 $ hg clone c d 2> err
697 703 [255]
698 704 $ test -d d
699 705 $ test -d d/.hg
700 706 [1]
701 707
702 708 re-enable perm to allow deletion
703 709
704 710 $ chmod +rx c/.hg/store/data
705 711
706 712 #endif
707 713
708 714 $ cd ..
709 715
710 716 Test clone from the repository in (emulated) revlog format 0 (issue4203):
711 717
712 718 $ mkdir issue4203
713 719 $ mkdir -p src/.hg
714 720 $ echo foo > src/foo
715 721 $ hg -R src add src/foo
716 722 $ hg -R src commit -m '#0'
717 723 $ hg -R src log -q
718 724 0:e1bab28bca43
719 725 $ hg -R src debugrevlog -c | egrep 'format|flags'
720 726 format : 0
721 727 flags : (none)
722 728 $ hg root -R src -T json | sed 's|\\\\|\\|g'
723 729 [
724 730 {
725 731 "hgpath": "$TESTTMP/src/.hg",
726 732 "reporoot": "$TESTTMP/src",
727 733 "storepath": "$TESTTMP/src/.hg"
728 734 }
729 735 ]
730 736 $ hg clone -U -q src dst
731 737 $ hg -R dst log -q
732 738 0:e1bab28bca43
733 739
734 740 Create repositories to test auto sharing functionality
735 741
736 742 $ cat >> $HGRCPATH << EOF
737 743 > [extensions]
738 744 > share=
739 745 > EOF
740 746
741 747 $ hg init empty
742 748 $ hg init source1a
743 749 $ cd source1a
744 750 $ echo initial1 > foo
745 751 $ hg -q commit -A -m initial
746 752 $ echo second > foo
747 753 $ hg commit -m second
748 754 $ cd ..
749 755
750 756 $ hg init filteredrev0
751 757 $ cd filteredrev0
752 758 $ cat >> .hg/hgrc << EOF
753 759 > [experimental]
754 760 > evolution.createmarkers=True
755 761 > EOF
756 762 $ echo initial1 > foo
757 763 $ hg -q commit -A -m initial0
758 764 $ hg -q up -r null
759 765 $ echo initial2 > foo
760 766 $ hg -q commit -A -m initial1
761 767 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
762 768 1 new obsolescence markers
763 769 obsoleted 1 changesets
764 770 $ cd ..
765 771
766 772 $ hg -q clone --pull source1a source1b
767 773 $ cd source1a
768 774 $ hg bookmark bookA
769 775 $ echo 1a > foo
770 776 $ hg commit -m 1a
771 777 $ cd ../source1b
772 778 $ hg -q up -r 0
773 779 $ echo head1 > foo
774 780 $ hg commit -m head1
775 781 created new head
776 782 $ hg bookmark head1
777 783 $ hg -q up -r 0
778 784 $ echo head2 > foo
779 785 $ hg commit -m head2
780 786 created new head
781 787 $ hg bookmark head2
782 788 $ hg -q up -r 0
783 789 $ hg branch branch1
784 790 marked working directory as branch branch1
785 791 (branches are permanent and global, did you want a bookmark?)
786 792 $ echo branch1 > foo
787 793 $ hg commit -m branch1
788 794 $ hg -q up -r 0
789 795 $ hg branch branch2
790 796 marked working directory as branch branch2
791 797 $ echo branch2 > foo
792 798 $ hg commit -m branch2
793 799 $ cd ..
794 800 $ hg init source2
795 801 $ cd source2
796 802 $ echo initial2 > foo
797 803 $ hg -q commit -A -m initial2
798 804 $ echo second > foo
799 805 $ hg commit -m second
800 806 $ cd ..
801 807
802 808 Clone with auto share from an empty repo should not result in share
803 809
804 810 $ mkdir share
805 811 $ hg --config share.pool=share clone empty share-empty
806 812 (not using pooled storage: remote appears to be empty)
807 813 updating to branch default
808 814 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
809 815 $ ls share
810 816 $ test -d share-empty/.hg/store
811 817 $ test -f share-empty/.hg/sharedpath
812 818 [1]
813 819
814 820 Clone with auto share from a repo with filtered revision 0 should not result in share
815 821
816 822 $ hg --config share.pool=share clone filteredrev0 share-filtered
817 823 (not using pooled storage: unable to resolve identity of remote)
818 824 requesting all changes
819 825 adding changesets
820 826 adding manifests
821 827 adding file changes
822 828 added 1 changesets with 1 changes to 1 files
823 829 new changesets e082c1832e09
824 830 updating to branch default
825 831 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
826 832
827 833 Clone from repo with content should result in shared store being created
828 834
829 835 $ hg --config share.pool=share clone source1a share-dest1a
830 836 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
831 837 requesting all changes
832 838 adding changesets
833 839 adding manifests
834 840 adding file changes
835 841 added 3 changesets with 3 changes to 1 files
836 842 new changesets b5f04eac9d8f:e5bfe23c0b47
837 843 searching for changes
838 844 no changes found
839 845 adding remote bookmark bookA
840 846 updating working directory
841 847 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
842 848
843 849 The shared repo should have been created
844 850
845 851 $ ls share
846 852 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
847 853
848 854 The destination should point to it
849 855
850 856 $ cat share-dest1a/.hg/sharedpath; echo
851 857 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
852 858
853 859 The destination should have bookmarks
854 860
855 861 $ hg -R share-dest1a bookmarks
856 862 bookA 2:e5bfe23c0b47
857 863
858 864 The default path should be the remote, not the share
859 865
860 866 $ hg -R share-dest1a config paths.default
861 867 $TESTTMP/source1a
862 868
863 869 Clone with existing share dir should result in pull + share
864 870
865 871 $ hg --config share.pool=share clone source1b share-dest1b
866 872 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
867 873 searching for changes
868 874 adding changesets
869 875 adding manifests
870 876 adding file changes
871 877 adding remote bookmark head1
872 878 adding remote bookmark head2
873 879 added 4 changesets with 4 changes to 1 files (+4 heads)
874 880 new changesets 4a8dc1ab4c13:6bacf4683960
875 881 updating working directory
876 882 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
877 883
878 884 $ ls share
879 885 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
880 886
881 887 $ cat share-dest1b/.hg/sharedpath; echo
882 888 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
883 889
884 890 We only get bookmarks from the remote, not everything in the share
885 891
886 892 $ hg -R share-dest1b bookmarks
887 893 head1 3:4a8dc1ab4c13
888 894 head2 4:99f71071f117
889 895
890 896 Default path should be source, not share.
891 897
892 898 $ hg -R share-dest1b config paths.default
893 899 $TESTTMP/source1b
894 900
895 901 Checked out revision should be head of default branch
896 902
897 903 $ hg -R share-dest1b log -r .
898 904 changeset: 4:99f71071f117
899 905 bookmark: head2
900 906 parent: 0:b5f04eac9d8f
901 907 user: test
902 908 date: Thu Jan 01 00:00:00 1970 +0000
903 909 summary: head2
904 910
905 911
906 912 Clone from unrelated repo should result in new share
907 913
908 914 $ hg --config share.pool=share clone source2 share-dest2
909 915 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
910 916 requesting all changes
911 917 adding changesets
912 918 adding manifests
913 919 adding file changes
914 920 added 2 changesets with 2 changes to 1 files
915 921 new changesets 22aeff664783:63cf6c3dba4a
916 922 searching for changes
917 923 no changes found
918 924 updating working directory
919 925 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
920 926
921 927 $ ls share
922 928 22aeff664783fd44c6d9b435618173c118c3448e
923 929 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
924 930
925 931 remote naming mode works as advertised
926 932
927 933 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
928 934 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
929 935 requesting all changes
930 936 adding changesets
931 937 adding manifests
932 938 adding file changes
933 939 added 3 changesets with 3 changes to 1 files
934 940 new changesets b5f04eac9d8f:e5bfe23c0b47
935 941 searching for changes
936 942 no changes found
937 943 adding remote bookmark bookA
938 944 updating working directory
939 945 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
940 946
941 947 $ ls shareremote
942 948 195bb1fcdb595c14a6c13e0269129ed78f6debde
943 949
944 950 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
945 951 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
946 952 requesting all changes
947 953 adding changesets
948 954 adding manifests
949 955 adding file changes
950 956 added 6 changesets with 6 changes to 1 files (+4 heads)
951 957 new changesets b5f04eac9d8f:6bacf4683960
952 958 searching for changes
953 959 no changes found
954 960 adding remote bookmark head1
955 961 adding remote bookmark head2
956 962 updating working directory
957 963 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
958 964
959 965 $ ls shareremote
960 966 195bb1fcdb595c14a6c13e0269129ed78f6debde
961 967 c0d4f83847ca2a873741feb7048a45085fd47c46
962 968
963 969 request to clone a single revision is respected in sharing mode
964 970
965 971 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
966 972 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
967 973 adding changesets
968 974 adding manifests
969 975 adding file changes
970 976 added 2 changesets with 2 changes to 1 files
971 977 new changesets b5f04eac9d8f:4a8dc1ab4c13
972 978 no changes found
973 979 adding remote bookmark head1
974 980 updating working directory
975 981 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
976 982
977 983 $ hg -R share-1arev log -G
978 984 @ changeset: 1:4a8dc1ab4c13
979 985 | bookmark: head1
980 986 | tag: tip
981 987 | user: test
982 988 | date: Thu Jan 01 00:00:00 1970 +0000
983 989 | summary: head1
984 990 |
985 991 o changeset: 0:b5f04eac9d8f
986 992 user: test
987 993 date: Thu Jan 01 00:00:00 1970 +0000
988 994 summary: initial
989 995
990 996
991 997 making another clone should only pull down requested rev
992 998
993 999 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
994 1000 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
995 1001 searching for changes
996 1002 adding changesets
997 1003 adding manifests
998 1004 adding file changes
999 1005 adding remote bookmark head1
1000 1006 adding remote bookmark head2
1001 1007 added 1 changesets with 1 changes to 1 files (+1 heads)
1002 1008 new changesets 99f71071f117
1003 1009 updating working directory
1004 1010 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1005 1011
1006 1012 $ hg -R share-1brev log -G
1007 1013 @ changeset: 2:99f71071f117
1008 1014 | bookmark: head2
1009 1015 | tag: tip
1010 1016 | parent: 0:b5f04eac9d8f
1011 1017 | user: test
1012 1018 | date: Thu Jan 01 00:00:00 1970 +0000
1013 1019 | summary: head2
1014 1020 |
1015 1021 | o changeset: 1:4a8dc1ab4c13
1016 1022 |/ bookmark: head1
1017 1023 | user: test
1018 1024 | date: Thu Jan 01 00:00:00 1970 +0000
1019 1025 | summary: head1
1020 1026 |
1021 1027 o changeset: 0:b5f04eac9d8f
1022 1028 user: test
1023 1029 date: Thu Jan 01 00:00:00 1970 +0000
1024 1030 summary: initial
1025 1031
1026 1032
1027 1033 Request to clone a single branch is respected in sharing mode
1028 1034
1029 1035 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1030 1036 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1031 1037 adding changesets
1032 1038 adding manifests
1033 1039 adding file changes
1034 1040 added 2 changesets with 2 changes to 1 files
1035 1041 new changesets b5f04eac9d8f:5f92a6c1a1b1
1036 1042 no changes found
1037 1043 updating working directory
1038 1044 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1039 1045
1040 1046 $ hg -R share-1bbranch1 log -G
1041 1047 o changeset: 1:5f92a6c1a1b1
1042 1048 | branch: branch1
1043 1049 | tag: tip
1044 1050 | user: test
1045 1051 | date: Thu Jan 01 00:00:00 1970 +0000
1046 1052 | summary: branch1
1047 1053 |
1048 1054 @ changeset: 0:b5f04eac9d8f
1049 1055 user: test
1050 1056 date: Thu Jan 01 00:00:00 1970 +0000
1051 1057 summary: initial
1052 1058
1053 1059
1054 1060 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1055 1061 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1056 1062 searching for changes
1057 1063 adding changesets
1058 1064 adding manifests
1059 1065 adding file changes
1060 1066 added 1 changesets with 1 changes to 1 files (+1 heads)
1061 1067 new changesets 6bacf4683960
1062 1068 updating working directory
1063 1069 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1064 1070
1065 1071 $ hg -R share-1bbranch2 log -G
1066 1072 o changeset: 2:6bacf4683960
1067 1073 | branch: branch2
1068 1074 | tag: tip
1069 1075 | parent: 0:b5f04eac9d8f
1070 1076 | user: test
1071 1077 | date: Thu Jan 01 00:00:00 1970 +0000
1072 1078 | summary: branch2
1073 1079 |
1074 1080 | o changeset: 1:5f92a6c1a1b1
1075 1081 |/ branch: branch1
1076 1082 | user: test
1077 1083 | date: Thu Jan 01 00:00:00 1970 +0000
1078 1084 | summary: branch1
1079 1085 |
1080 1086 @ changeset: 0:b5f04eac9d8f
1081 1087 user: test
1082 1088 date: Thu Jan 01 00:00:00 1970 +0000
1083 1089 summary: initial
1084 1090
1085 1091
1086 1092 -U is respected in share clone mode
1087 1093
1088 1094 $ hg --config share.pool=share clone -U source1a share-1anowc
1089 1095 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1090 1096 searching for changes
1091 1097 no changes found
1092 1098 adding remote bookmark bookA
1093 1099
1094 1100 $ ls share-1anowc
1095 1101
1096 1102 Test that auto sharing doesn't cause failure of "hg clone local remote"
1097 1103
1098 1104 $ cd $TESTTMP
1099 1105 $ hg -R a id -r 0
1100 1106 acb14030fe0a
1101 1107 $ hg id -R remote -r 0
1102 1108 abort: repository remote not found!
1103 1109 [255]
1104 1110 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1105 1111 $ hg -R remote id -r 0
1106 1112 acb14030fe0a
1107 1113
1108 1114 Cloning into pooled storage doesn't race (issue5104)
1109 1115
1110 1116 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1111 1117 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1112 1118 $ wait
1113 1119
1114 1120 $ hg -R share-destrace1 log -r tip
1115 1121 changeset: 2:e5bfe23c0b47
1116 1122 bookmark: bookA
1117 1123 tag: tip
1118 1124 user: test
1119 1125 date: Thu Jan 01 00:00:00 1970 +0000
1120 1126 summary: 1a
1121 1127
1122 1128
1123 1129 $ hg -R share-destrace2 log -r tip
1124 1130 changeset: 2:e5bfe23c0b47
1125 1131 bookmark: bookA
1126 1132 tag: tip
1127 1133 user: test
1128 1134 date: Thu Jan 01 00:00:00 1970 +0000
1129 1135 summary: 1a
1130 1136
1131 1137 One repo should be new, the other should be shared from the pool. We
1132 1138 don't care which is which, so we just make sure we always print the
1133 1139 one containing "new pooled" first, then one one containing "existing
1134 1140 pooled".
1135 1141
1136 1142 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1137 1143 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1138 1144 requesting all changes
1139 1145 adding changesets
1140 1146 adding manifests
1141 1147 adding file changes
1142 1148 added 3 changesets with 3 changes to 1 files
1143 1149 new changesets b5f04eac9d8f:e5bfe23c0b47
1144 1150 searching for changes
1145 1151 no changes found
1146 1152 adding remote bookmark bookA
1147 1153 updating working directory
1148 1154 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1149 1155
1150 1156 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1151 1157 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1152 1158 searching for changes
1153 1159 no changes found
1154 1160 adding remote bookmark bookA
1155 1161 updating working directory
1156 1162 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1157 1163
1158 1164 SEC: check for unsafe ssh url
1159 1165
1160 1166 $ cat >> $HGRCPATH << EOF
1161 1167 > [ui]
1162 1168 > ssh = sh -c "read l; read l; read l"
1163 1169 > EOF
1164 1170
1165 1171 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1166 1172 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1167 1173 [255]
1168 1174 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1169 1175 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1170 1176 [255]
1171 1177 $ hg clone 'ssh://fakehost|touch%20owned/path'
1172 1178 abort: no suitable response from remote hg!
1173 1179 [255]
1174 1180 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1175 1181 abort: no suitable response from remote hg!
1176 1182 [255]
1177 1183
1178 1184 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1179 1185 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1180 1186 [255]
1181 1187
1182 1188 #if windows
1183 1189 $ hg clone "ssh://%26touch%20owned%20/" --debug
1184 1190 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1185 1191 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1186 1192 sending hello command
1187 1193 sending between command
1188 1194 abort: no suitable response from remote hg!
1189 1195 [255]
1190 1196 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1191 1197 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1192 1198 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1193 1199 sending hello command
1194 1200 sending between command
1195 1201 abort: no suitable response from remote hg!
1196 1202 [255]
1197 1203 #else
1198 1204 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1199 1205 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1200 1206 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1201 1207 sending hello command
1202 1208 sending between command
1203 1209 abort: no suitable response from remote hg!
1204 1210 [255]
1205 1211 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1206 1212 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1207 1213 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1208 1214 sending hello command
1209 1215 sending between command
1210 1216 abort: no suitable response from remote hg!
1211 1217 [255]
1212 1218 #endif
1213 1219
1214 1220 $ hg clone "ssh://v-alid.example.com/" --debug
1215 1221 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1216 1222 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1217 1223 sending hello command
1218 1224 sending between command
1219 1225 abort: no suitable response from remote hg!
1220 1226 [255]
1221 1227
1222 1228 We should not have created a file named owned - if it exists, the
1223 1229 attack succeeded.
1224 1230 $ if test -f owned; then echo 'you got owned'; fi
1225 1231
1226 1232 Cloning without fsmonitor enabled does not print a warning for small repos
1227 1233
1228 1234 $ hg clone a fsmonitor-default
1229 1235 updating to bookmark @ on branch stable
1230 1236 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1231 1237
1232 1238 Lower the warning threshold to simulate a large repo
1233 1239
1234 1240 $ cat >> $HGRCPATH << EOF
1235 1241 > [fsmonitor]
1236 1242 > warn_update_file_count = 2
1237 1243 > EOF
1238 1244
1239 1245 We should see a warning about no fsmonitor on supported platforms
1240 1246
1241 1247 #if linuxormacos no-fsmonitor
1242 1248 $ hg clone a nofsmonitor
1243 1249 updating to bookmark @ on branch stable
1244 1250 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1245 1251 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1246 1252 #else
1247 1253 $ hg clone a nofsmonitor
1248 1254 updating to bookmark @ on branch stable
1249 1255 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1250 1256 #endif
1251 1257
1252 1258 We should not see warning about fsmonitor when it is enabled
1253 1259
1254 1260 #if fsmonitor
1255 1261 $ hg clone a fsmonitor-enabled
1256 1262 updating to bookmark @ on branch stable
1257 1263 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1258 1264 #endif
1259 1265
1260 1266 We can disable the fsmonitor warning
1261 1267
1262 1268 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1263 1269 updating to bookmark @ on branch stable
1264 1270 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1265 1271
1266 1272 Loaded fsmonitor but disabled in config should still print warning
1267 1273
1268 1274 #if linuxormacos fsmonitor
1269 1275 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1270 1276 updating to bookmark @ on branch stable
1271 1277 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1272 1278 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1273 1279 #endif
1274 1280
1275 1281 Warning not printed if working directory isn't empty
1276 1282
1277 1283 $ hg -q clone a fsmonitor-update
1278 1284 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1279 1285 $ cd fsmonitor-update
1280 1286 $ hg up acb14030fe0a
1281 1287 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1282 1288 (leaving bookmark @)
1283 1289 $ hg up cf0fe1914066
1284 1290 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1285 1291
1286 1292 `hg update` from null revision also prints
1287 1293
1288 1294 $ hg up null
1289 1295 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1290 1296
1291 1297 #if linuxormacos no-fsmonitor
1292 1298 $ hg up cf0fe1914066
1293 1299 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1294 1300 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1295 1301 #else
1296 1302 $ hg up cf0fe1914066
1297 1303 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1298 1304 #endif
1299 1305
1300 1306 $ cd ..
1301 1307
General Comments 0
You need to be logged in to leave comments. Login now