##// END OF EJS Templates
errors: use InputError for errors about bad paths...
Martin von Zweigbergk -
r46448:3175b0e0 default
parent child Browse files
Show More
@@ -1,2308 +1,2308 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28 from .pycompat import getattr
29 29 from .thirdparty import attr
30 30 from . import (
31 31 copies as copiesmod,
32 32 encoding,
33 33 error,
34 34 match as matchmod,
35 35 obsolete,
36 36 obsutil,
37 37 pathutil,
38 38 phases,
39 39 policy,
40 40 pycompat,
41 41 requirements as requirementsmod,
42 42 revsetlang,
43 43 similar,
44 44 smartset,
45 45 url,
46 46 util,
47 47 vfs,
48 48 )
49 49
50 50 from .utils import (
51 51 hashutil,
52 52 procutil,
53 53 stringutil,
54 54 )
55 55
56 56 if pycompat.iswindows:
57 57 from . import scmwindows as scmplatform
58 58 else:
59 59 from . import scmposix as scmplatform
60 60
61 61 parsers = policy.importmod('parsers')
62 62 rustrevlog = policy.importrust('revlog')
63 63
64 64 termsize = scmplatform.termsize
65 65
66 66
67 67 @attr.s(slots=True, repr=False)
68 68 class status(object):
69 69 '''Struct with a list of files per status.
70 70
71 71 The 'deleted', 'unknown' and 'ignored' properties are only
72 72 relevant to the working copy.
73 73 '''
74 74
75 75 modified = attr.ib(default=attr.Factory(list))
76 76 added = attr.ib(default=attr.Factory(list))
77 77 removed = attr.ib(default=attr.Factory(list))
78 78 deleted = attr.ib(default=attr.Factory(list))
79 79 unknown = attr.ib(default=attr.Factory(list))
80 80 ignored = attr.ib(default=attr.Factory(list))
81 81 clean = attr.ib(default=attr.Factory(list))
82 82
83 83 def __iter__(self):
84 84 yield self.modified
85 85 yield self.added
86 86 yield self.removed
87 87 yield self.deleted
88 88 yield self.unknown
89 89 yield self.ignored
90 90 yield self.clean
91 91
92 92 def __repr__(self):
93 93 return (
94 94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
95 95 r'unknown=%s, ignored=%s, clean=%s>'
96 96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
97 97
98 98
99 99 def itersubrepos(ctx1, ctx2):
100 100 """find subrepos in ctx1 or ctx2"""
101 101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106 106
107 107 missing = set()
108 108
109 109 for subpath in ctx2.substate:
110 110 if subpath not in ctx1.substate:
111 111 del subpaths[subpath]
112 112 missing.add(subpath)
113 113
114 114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
115 115 yield subpath, ctx.sub(subpath)
116 116
117 117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 118 # status and diff will have an accurate result when it does
119 119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 120 # against itself.
121 121 for subpath in missing:
122 122 yield subpath, ctx2.nullsub(subpath, ctx1)
123 123
124 124
125 125 def nochangesfound(ui, repo, excluded=None):
126 126 '''Report no changes for push/pull, excluded is None or a list of
127 127 nodes excluded from the push/pull.
128 128 '''
129 129 secretlist = []
130 130 if excluded:
131 131 for n in excluded:
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(
138 138 _(b"no changes found (ignored %d secret changesets)\n")
139 139 % len(secretlist)
140 140 )
141 141 else:
142 142 ui.status(_(b"no changes found\n"))
143 143
144 144
145 145 def callcatch(ui, func):
146 146 """call func() with global exception handling
147 147
148 148 return func() if no exception happens. otherwise do some error handling
149 149 and return an exit code accordingly. does not handle all exceptions.
150 150 """
151 151 coarse_exit_code = -1
152 152 detailed_exit_code = -1
153 153 try:
154 154 try:
155 155 return func()
156 156 except: # re-raises
157 157 ui.traceback()
158 158 raise
159 159 # Global exception handling, alphabetically
160 160 # Mercurial-specific first, followed by built-in and library exceptions
161 161 except error.LockHeld as inst:
162 162 detailed_exit_code = 20
163 163 if inst.errno == errno.ETIMEDOUT:
164 164 reason = _(b'timed out waiting for lock held by %r') % (
165 165 pycompat.bytestr(inst.locker)
166 166 )
167 167 else:
168 168 reason = _(b'lock held by %r') % inst.locker
169 169 ui.error(
170 170 _(b"abort: %s: %s\n")
171 171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
172 172 )
173 173 if not inst.locker:
174 174 ui.error(_(b"(lock might be very busy)\n"))
175 175 except error.LockUnavailable as inst:
176 176 detailed_exit_code = 20
177 177 ui.error(
178 178 _(b"abort: could not lock %s: %s\n")
179 179 % (
180 180 inst.desc or stringutil.forcebytestr(inst.filename),
181 181 encoding.strtolocal(inst.strerror),
182 182 )
183 183 )
184 184 except error.OutOfBandError as inst:
185 185 detailed_exit_code = 100
186 186 if inst.args:
187 187 msg = _(b"abort: remote error:\n")
188 188 else:
189 189 msg = _(b"abort: remote error\n")
190 190 ui.error(msg)
191 191 if inst.args:
192 192 ui.error(b''.join(inst.args))
193 193 if inst.hint:
194 194 ui.error(b'(%s)\n' % inst.hint)
195 195 except error.RepoError as inst:
196 196 ui.error(_(b"abort: %s!\n") % inst)
197 197 if inst.hint:
198 198 ui.error(_(b"(%s)\n") % inst.hint)
199 199 except error.ResponseError as inst:
200 200 ui.error(_(b"abort: %s") % inst.args[0])
201 201 msg = inst.args[1]
202 202 if isinstance(msg, type(u'')):
203 203 msg = pycompat.sysbytes(msg)
204 204 if not isinstance(msg, bytes):
205 205 ui.error(b" %r\n" % (msg,))
206 206 elif not msg:
207 207 ui.error(_(b" empty string\n"))
208 208 else:
209 209 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
210 210 except error.CensoredNodeError as inst:
211 211 ui.error(_(b"abort: file censored %s!\n") % inst)
212 212 except error.StorageError as inst:
213 213 ui.error(_(b"abort: %s!\n") % inst)
214 214 if inst.hint:
215 215 ui.error(_(b"(%s)\n") % inst.hint)
216 216 except error.InterventionRequired as inst:
217 217 ui.error(b"%s\n" % inst)
218 218 if inst.hint:
219 219 ui.error(_(b"(%s)\n") % inst.hint)
220 220 detailed_exit_code = 240
221 221 coarse_exit_code = 1
222 222 except error.WdirUnsupported:
223 223 ui.error(_(b"abort: working directory revision cannot be specified\n"))
224 224 except error.Abort as inst:
225 225 if isinstance(inst, error.InputError):
226 226 detailed_exit_code = 10
227 227 elif isinstance(inst, error.StateError):
228 228 detailed_exit_code = 20
229 229 elif isinstance(inst, error.ConfigError):
230 230 detailed_exit_code = 30
231 231 ui.error(_(b"abort: %s\n") % inst.message)
232 232 if inst.hint:
233 233 ui.error(_(b"(%s)\n") % inst.hint)
234 234 except error.WorkerError as inst:
235 235 # Don't print a message -- the worker already should have
236 236 return inst.status_code
237 237 except ImportError as inst:
238 238 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
239 239 m = stringutil.forcebytestr(inst).split()[-1]
240 240 if m in b"mpatch bdiff".split():
241 241 ui.error(_(b"(did you forget to compile extensions?)\n"))
242 242 elif m in b"zlib".split():
243 243 ui.error(_(b"(is your Python install correct?)\n"))
244 244 except util.urlerr.httperror as inst:
245 245 detailed_exit_code = 100
246 246 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
247 247 except util.urlerr.urlerror as inst:
248 248 detailed_exit_code = 100
249 249 try: # usually it is in the form (errno, strerror)
250 250 reason = inst.reason.args[1]
251 251 except (AttributeError, IndexError):
252 252 # it might be anything, for example a string
253 253 reason = inst.reason
254 254 if isinstance(reason, pycompat.unicode):
255 255 # SSLError of Python 2.7.9 contains a unicode
256 256 reason = encoding.unitolocal(reason)
257 257 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
258 258 except (IOError, OSError) as inst:
259 259 if (
260 260 util.safehasattr(inst, b"args")
261 261 and inst.args
262 262 and inst.args[0] == errno.EPIPE
263 263 ):
264 264 pass
265 265 elif getattr(inst, "strerror", None): # common IOError or OSError
266 266 if getattr(inst, "filename", None) is not None:
267 267 ui.error(
268 268 _(b"abort: %s: '%s'\n")
269 269 % (
270 270 encoding.strtolocal(inst.strerror),
271 271 stringutil.forcebytestr(inst.filename),
272 272 )
273 273 )
274 274 else:
275 275 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
276 276 else: # suspicious IOError
277 277 raise
278 278 except MemoryError:
279 279 ui.error(_(b"abort: out of memory\n"))
280 280 except SystemExit as inst:
281 281 # Commands shouldn't sys.exit directly, but give a return code.
282 282 # Just in case catch this and and pass exit code to caller.
283 283 detailed_exit_code = 254
284 284 coarse_exit_code = inst.code
285 285
286 286 if ui.configbool(b'ui', b'detailed-exit-code'):
287 287 return detailed_exit_code
288 288 else:
289 289 return coarse_exit_code
290 290
291 291
292 292 def checknewlabel(repo, lbl, kind):
293 293 # Do not use the "kind" parameter in ui output.
294 294 # It makes strings difficult to translate.
295 295 if lbl in [b'tip', b'.', b'null']:
296 296 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
297 297 for c in (b':', b'\0', b'\n', b'\r'):
298 298 if c in lbl:
299 299 raise error.Abort(
300 300 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
301 301 )
302 302 try:
303 303 int(lbl)
304 304 raise error.Abort(_(b"cannot use an integer as a name"))
305 305 except ValueError:
306 306 pass
307 307 if lbl.strip() != lbl:
308 308 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
309 309
310 310
311 311 def checkfilename(f):
312 312 '''Check that the filename f is an acceptable filename for a tracked file'''
313 313 if b'\r' in f or b'\n' in f:
314 raise error.Abort(
314 raise error.InputError(
315 315 _(b"'\\n' and '\\r' disallowed in filenames: %r")
316 316 % pycompat.bytestr(f)
317 317 )
318 318
319 319
320 320 def checkportable(ui, f):
321 321 '''Check if filename f is portable and warn or abort depending on config'''
322 322 checkfilename(f)
323 323 abort, warn = checkportabilityalert(ui)
324 324 if abort or warn:
325 325 msg = util.checkwinfilename(f)
326 326 if msg:
327 327 msg = b"%s: %s" % (msg, procutil.shellquote(f))
328 328 if abort:
329 raise error.Abort(msg)
329 raise error.InputError(msg)
330 330 ui.warn(_(b"warning: %s\n") % msg)
331 331
332 332
333 333 def checkportabilityalert(ui):
334 334 '''check if the user's config requests nothing, a warning, or abort for
335 335 non-portable filenames'''
336 336 val = ui.config(b'ui', b'portablefilenames')
337 337 lval = val.lower()
338 338 bval = stringutil.parsebool(val)
339 339 abort = pycompat.iswindows or lval == b'abort'
340 340 warn = bval or lval == b'warn'
341 341 if bval is None and not (warn or abort or lval == b'ignore'):
342 342 raise error.ConfigError(
343 343 _(b"ui.portablefilenames value is invalid ('%s')") % val
344 344 )
345 345 return abort, warn
346 346
347 347
348 348 class casecollisionauditor(object):
349 349 def __init__(self, ui, abort, dirstate):
350 350 self._ui = ui
351 351 self._abort = abort
352 352 allfiles = b'\0'.join(dirstate)
353 353 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
354 354 self._dirstate = dirstate
355 355 # The purpose of _newfiles is so that we don't complain about
356 356 # case collisions if someone were to call this object with the
357 357 # same filename twice.
358 358 self._newfiles = set()
359 359
360 360 def __call__(self, f):
361 361 if f in self._newfiles:
362 362 return
363 363 fl = encoding.lower(f)
364 364 if fl in self._loweredfiles and f not in self._dirstate:
365 365 msg = _(b'possible case-folding collision for %s') % f
366 366 if self._abort:
367 367 raise error.Abort(msg)
368 368 self._ui.warn(_(b"warning: %s\n") % msg)
369 369 self._loweredfiles.add(fl)
370 370 self._newfiles.add(f)
371 371
372 372
373 373 def filteredhash(repo, maxrev):
374 374 """build hash of filtered revisions in the current repoview.
375 375
376 376 Multiple caches perform up-to-date validation by checking that the
377 377 tiprev and tipnode stored in the cache file match the current repository.
378 378 However, this is not sufficient for validating repoviews because the set
379 379 of revisions in the view may change without the repository tiprev and
380 380 tipnode changing.
381 381
382 382 This function hashes all the revs filtered from the view and returns
383 383 that SHA-1 digest.
384 384 """
385 385 cl = repo.changelog
386 386 if not cl.filteredrevs:
387 387 return None
388 388 key = cl._filteredrevs_hashcache.get(maxrev)
389 389 if not key:
390 390 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
391 391 if revs:
392 392 s = hashutil.sha1()
393 393 for rev in revs:
394 394 s.update(b'%d;' % rev)
395 395 key = s.digest()
396 396 cl._filteredrevs_hashcache[maxrev] = key
397 397 return key
398 398
399 399
400 400 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
401 401 '''yield every hg repository under path, always recursively.
402 402 The recurse flag will only control recursion into repo working dirs'''
403 403
404 404 def errhandler(err):
405 405 if err.filename == path:
406 406 raise err
407 407
408 408 samestat = getattr(os.path, 'samestat', None)
409 409 if followsym and samestat is not None:
410 410
411 411 def adddir(dirlst, dirname):
412 412 dirstat = os.stat(dirname)
413 413 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
414 414 if not match:
415 415 dirlst.append(dirstat)
416 416 return not match
417 417
418 418 else:
419 419 followsym = False
420 420
421 421 if (seen_dirs is None) and followsym:
422 422 seen_dirs = []
423 423 adddir(seen_dirs, path)
424 424 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
425 425 dirs.sort()
426 426 if b'.hg' in dirs:
427 427 yield root # found a repository
428 428 qroot = os.path.join(root, b'.hg', b'patches')
429 429 if os.path.isdir(os.path.join(qroot, b'.hg')):
430 430 yield qroot # we have a patch queue repo here
431 431 if recurse:
432 432 # avoid recursing inside the .hg directory
433 433 dirs.remove(b'.hg')
434 434 else:
435 435 dirs[:] = [] # don't descend further
436 436 elif followsym:
437 437 newdirs = []
438 438 for d in dirs:
439 439 fname = os.path.join(root, d)
440 440 if adddir(seen_dirs, fname):
441 441 if os.path.islink(fname):
442 442 for hgname in walkrepos(fname, True, seen_dirs):
443 443 yield hgname
444 444 else:
445 445 newdirs.append(d)
446 446 dirs[:] = newdirs
447 447
448 448
449 449 def binnode(ctx):
450 450 """Return binary node id for a given basectx"""
451 451 node = ctx.node()
452 452 if node is None:
453 453 return wdirid
454 454 return node
455 455
456 456
457 457 def intrev(ctx):
458 458 """Return integer for a given basectx that can be used in comparison or
459 459 arithmetic operation"""
460 460 rev = ctx.rev()
461 461 if rev is None:
462 462 return wdirrev
463 463 return rev
464 464
465 465
466 466 def formatchangeid(ctx):
467 467 """Format changectx as '{rev}:{node|formatnode}', which is the default
468 468 template provided by logcmdutil.changesettemplater"""
469 469 repo = ctx.repo()
470 470 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
471 471
472 472
473 473 def formatrevnode(ui, rev, node):
474 474 """Format given revision and node depending on the current verbosity"""
475 475 if ui.debugflag:
476 476 hexfunc = hex
477 477 else:
478 478 hexfunc = short
479 479 return b'%d:%s' % (rev, hexfunc(node))
480 480
481 481
482 482 def resolvehexnodeidprefix(repo, prefix):
483 483 if prefix.startswith(b'x'):
484 484 prefix = prefix[1:]
485 485 try:
486 486 # Uses unfiltered repo because it's faster when prefix is ambiguous/
487 487 # This matches the shortesthexnodeidprefix() function below.
488 488 node = repo.unfiltered().changelog._partialmatch(prefix)
489 489 except error.AmbiguousPrefixLookupError:
490 490 revset = repo.ui.config(
491 491 b'experimental', b'revisions.disambiguatewithin'
492 492 )
493 493 if revset:
494 494 # Clear config to avoid infinite recursion
495 495 configoverrides = {
496 496 (b'experimental', b'revisions.disambiguatewithin'): None
497 497 }
498 498 with repo.ui.configoverride(configoverrides):
499 499 revs = repo.anyrevs([revset], user=True)
500 500 matches = []
501 501 for rev in revs:
502 502 node = repo.changelog.node(rev)
503 503 if hex(node).startswith(prefix):
504 504 matches.append(node)
505 505 if len(matches) == 1:
506 506 return matches[0]
507 507 raise
508 508 if node is None:
509 509 return
510 510 repo.changelog.rev(node) # make sure node isn't filtered
511 511 return node
512 512
513 513
514 514 def mayberevnum(repo, prefix):
515 515 """Checks if the given prefix may be mistaken for a revision number"""
516 516 try:
517 517 i = int(prefix)
518 518 # if we are a pure int, then starting with zero will not be
519 519 # confused as a rev; or, obviously, if the int is larger
520 520 # than the value of the tip rev. We still need to disambiguate if
521 521 # prefix == '0', since that *is* a valid revnum.
522 522 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
523 523 return False
524 524 return True
525 525 except ValueError:
526 526 return False
527 527
528 528
529 529 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
530 530 """Find the shortest unambiguous prefix that matches hexnode.
531 531
532 532 If "cache" is not None, it must be a dictionary that can be used for
533 533 caching between calls to this method.
534 534 """
535 535 # _partialmatch() of filtered changelog could take O(len(repo)) time,
536 536 # which would be unacceptably slow. so we look for hash collision in
537 537 # unfiltered space, which means some hashes may be slightly longer.
538 538
539 539 minlength = max(minlength, 1)
540 540
541 541 def disambiguate(prefix):
542 542 """Disambiguate against revnums."""
543 543 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
544 544 if mayberevnum(repo, prefix):
545 545 return b'x' + prefix
546 546 else:
547 547 return prefix
548 548
549 549 hexnode = hex(node)
550 550 for length in range(len(prefix), len(hexnode) + 1):
551 551 prefix = hexnode[:length]
552 552 if not mayberevnum(repo, prefix):
553 553 return prefix
554 554
555 555 cl = repo.unfiltered().changelog
556 556 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
557 557 if revset:
558 558 revs = None
559 559 if cache is not None:
560 560 revs = cache.get(b'disambiguationrevset')
561 561 if revs is None:
562 562 revs = repo.anyrevs([revset], user=True)
563 563 if cache is not None:
564 564 cache[b'disambiguationrevset'] = revs
565 565 if cl.rev(node) in revs:
566 566 hexnode = hex(node)
567 567 nodetree = None
568 568 if cache is not None:
569 569 nodetree = cache.get(b'disambiguationnodetree')
570 570 if not nodetree:
571 571 if util.safehasattr(parsers, 'nodetree'):
572 572 # The CExt is the only implementation to provide a nodetree
573 573 # class so far.
574 574 index = cl.index
575 575 if util.safehasattr(index, 'get_cindex'):
576 576 # the rust wrapped need to give access to its internal index
577 577 index = index.get_cindex()
578 578 nodetree = parsers.nodetree(index, len(revs))
579 579 for r in revs:
580 580 nodetree.insert(r)
581 581 if cache is not None:
582 582 cache[b'disambiguationnodetree'] = nodetree
583 583 if nodetree is not None:
584 584 length = max(nodetree.shortest(node), minlength)
585 585 prefix = hexnode[:length]
586 586 return disambiguate(prefix)
587 587 for length in range(minlength, len(hexnode) + 1):
588 588 matches = []
589 589 prefix = hexnode[:length]
590 590 for rev in revs:
591 591 otherhexnode = repo[rev].hex()
592 592 if prefix == otherhexnode[:length]:
593 593 matches.append(otherhexnode)
594 594 if len(matches) == 1:
595 595 return disambiguate(prefix)
596 596
597 597 try:
598 598 return disambiguate(cl.shortest(node, minlength))
599 599 except error.LookupError:
600 600 raise error.RepoLookupError()
601 601
602 602
603 603 def isrevsymbol(repo, symbol):
604 604 """Checks if a symbol exists in the repo.
605 605
606 606 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
607 607 symbol is an ambiguous nodeid prefix.
608 608 """
609 609 try:
610 610 revsymbol(repo, symbol)
611 611 return True
612 612 except error.RepoLookupError:
613 613 return False
614 614
615 615
616 616 def revsymbol(repo, symbol):
617 617 """Returns a context given a single revision symbol (as string).
618 618
619 619 This is similar to revsingle(), but accepts only a single revision symbol,
620 620 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
621 621 not "max(public())".
622 622 """
623 623 if not isinstance(symbol, bytes):
624 624 msg = (
625 625 b"symbol (%s of type %s) was not a string, did you mean "
626 626 b"repo[symbol]?" % (symbol, type(symbol))
627 627 )
628 628 raise error.ProgrammingError(msg)
629 629 try:
630 630 if symbol in (b'.', b'tip', b'null'):
631 631 return repo[symbol]
632 632
633 633 try:
634 634 r = int(symbol)
635 635 if b'%d' % r != symbol:
636 636 raise ValueError
637 637 l = len(repo.changelog)
638 638 if r < 0:
639 639 r += l
640 640 if r < 0 or r >= l and r != wdirrev:
641 641 raise ValueError
642 642 return repo[r]
643 643 except error.FilteredIndexError:
644 644 raise
645 645 except (ValueError, OverflowError, IndexError):
646 646 pass
647 647
648 648 if len(symbol) == 40:
649 649 try:
650 650 node = bin(symbol)
651 651 rev = repo.changelog.rev(node)
652 652 return repo[rev]
653 653 except error.FilteredLookupError:
654 654 raise
655 655 except (TypeError, LookupError):
656 656 pass
657 657
658 658 # look up bookmarks through the name interface
659 659 try:
660 660 node = repo.names.singlenode(repo, symbol)
661 661 rev = repo.changelog.rev(node)
662 662 return repo[rev]
663 663 except KeyError:
664 664 pass
665 665
666 666 node = resolvehexnodeidprefix(repo, symbol)
667 667 if node is not None:
668 668 rev = repo.changelog.rev(node)
669 669 return repo[rev]
670 670
671 671 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
672 672
673 673 except error.WdirUnsupported:
674 674 return repo[None]
675 675 except (
676 676 error.FilteredIndexError,
677 677 error.FilteredLookupError,
678 678 error.FilteredRepoLookupError,
679 679 ):
680 680 raise _filterederror(repo, symbol)
681 681
682 682
683 683 def _filterederror(repo, changeid):
684 684 """build an exception to be raised about a filtered changeid
685 685
686 686 This is extracted in a function to help extensions (eg: evolve) to
687 687 experiment with various message variants."""
688 688 if repo.filtername.startswith(b'visible'):
689 689
690 690 # Check if the changeset is obsolete
691 691 unfilteredrepo = repo.unfiltered()
692 692 ctx = revsymbol(unfilteredrepo, changeid)
693 693
694 694 # If the changeset is obsolete, enrich the message with the reason
695 695 # that made this changeset not visible
696 696 if ctx.obsolete():
697 697 msg = obsutil._getfilteredreason(repo, changeid, ctx)
698 698 else:
699 699 msg = _(b"hidden revision '%s'") % changeid
700 700
701 701 hint = _(b'use --hidden to access hidden revisions')
702 702
703 703 return error.FilteredRepoLookupError(msg, hint=hint)
704 704 msg = _(b"filtered revision '%s' (not in '%s' subset)")
705 705 msg %= (changeid, repo.filtername)
706 706 return error.FilteredRepoLookupError(msg)
707 707
708 708
709 709 def revsingle(repo, revspec, default=b'.', localalias=None):
710 710 if not revspec and revspec != 0:
711 711 return repo[default]
712 712
713 713 l = revrange(repo, [revspec], localalias=localalias)
714 714 if not l:
715 715 raise error.Abort(_(b'empty revision set'))
716 716 return repo[l.last()]
717 717
718 718
719 719 def _pairspec(revspec):
720 720 tree = revsetlang.parse(revspec)
721 721 return tree and tree[0] in (
722 722 b'range',
723 723 b'rangepre',
724 724 b'rangepost',
725 725 b'rangeall',
726 726 )
727 727
728 728
729 729 def revpair(repo, revs):
730 730 if not revs:
731 731 return repo[b'.'], repo[None]
732 732
733 733 l = revrange(repo, revs)
734 734
735 735 if not l:
736 736 raise error.Abort(_(b'empty revision range'))
737 737
738 738 first = l.first()
739 739 second = l.last()
740 740
741 741 if (
742 742 first == second
743 743 and len(revs) >= 2
744 744 and not all(revrange(repo, [r]) for r in revs)
745 745 ):
746 746 raise error.Abort(_(b'empty revision on one side of range'))
747 747
748 748 # if top-level is range expression, the result must always be a pair
749 749 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
750 750 return repo[first], repo[None]
751 751
752 752 return repo[first], repo[second]
753 753
754 754
755 755 def revrange(repo, specs, localalias=None):
756 756 """Execute 1 to many revsets and return the union.
757 757
758 758 This is the preferred mechanism for executing revsets using user-specified
759 759 config options, such as revset aliases.
760 760
761 761 The revsets specified by ``specs`` will be executed via a chained ``OR``
762 762 expression. If ``specs`` is empty, an empty result is returned.
763 763
764 764 ``specs`` can contain integers, in which case they are assumed to be
765 765 revision numbers.
766 766
767 767 It is assumed the revsets are already formatted. If you have arguments
768 768 that need to be expanded in the revset, call ``revsetlang.formatspec()``
769 769 and pass the result as an element of ``specs``.
770 770
771 771 Specifying a single revset is allowed.
772 772
773 773 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
774 774 integer revisions.
775 775 """
776 776 allspecs = []
777 777 for spec in specs:
778 778 if isinstance(spec, int):
779 779 spec = revsetlang.formatspec(b'%d', spec)
780 780 allspecs.append(spec)
781 781 return repo.anyrevs(allspecs, user=True, localalias=localalias)
782 782
783 783
784 784 def increasingwindows(windowsize=8, sizelimit=512):
785 785 while True:
786 786 yield windowsize
787 787 if windowsize < sizelimit:
788 788 windowsize *= 2
789 789
790 790
791 791 def walkchangerevs(repo, revs, makefilematcher, prepare):
792 792 '''Iterate over files and the revs in a "windowed" way.
793 793
794 794 Callers most commonly need to iterate backwards over the history
795 795 in which they are interested. Doing so has awful (quadratic-looking)
796 796 performance, so we use iterators in a "windowed" way.
797 797
798 798 We walk a window of revisions in the desired order. Within the
799 799 window, we first walk forwards to gather data, then in the desired
800 800 order (usually backwards) to display it.
801 801
802 802 This function returns an iterator yielding contexts. Before
803 803 yielding each context, the iterator will first call the prepare
804 804 function on each context in the window in forward order.'''
805 805
806 806 if not revs:
807 807 return []
808 808 change = repo.__getitem__
809 809
810 810 def iterate():
811 811 it = iter(revs)
812 812 stopiteration = False
813 813 for windowsize in increasingwindows():
814 814 nrevs = []
815 815 for i in pycompat.xrange(windowsize):
816 816 rev = next(it, None)
817 817 if rev is None:
818 818 stopiteration = True
819 819 break
820 820 nrevs.append(rev)
821 821 for rev in sorted(nrevs):
822 822 ctx = change(rev)
823 823 prepare(ctx, makefilematcher(ctx))
824 824 for rev in nrevs:
825 825 yield change(rev)
826 826
827 827 if stopiteration:
828 828 break
829 829
830 830 return iterate()
831 831
832 832
833 833 def meaningfulparents(repo, ctx):
834 834 """Return list of meaningful (or all if debug) parentrevs for rev.
835 835
836 836 For merges (two non-nullrev revisions) both parents are meaningful.
837 837 Otherwise the first parent revision is considered meaningful if it
838 838 is not the preceding revision.
839 839 """
840 840 parents = ctx.parents()
841 841 if len(parents) > 1:
842 842 return parents
843 843 if repo.ui.debugflag:
844 844 return [parents[0], repo[nullrev]]
845 845 if parents[0].rev() >= intrev(ctx) - 1:
846 846 return []
847 847 return parents
848 848
849 849
850 850 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
851 851 """Return a function that produced paths for presenting to the user.
852 852
853 853 The returned function takes a repo-relative path and produces a path
854 854 that can be presented in the UI.
855 855
856 856 Depending on the value of ui.relative-paths, either a repo-relative or
857 857 cwd-relative path will be produced.
858 858
859 859 legacyrelativevalue is the value to use if ui.relative-paths=legacy
860 860
861 861 If forcerelativevalue is not None, then that value will be used regardless
862 862 of what ui.relative-paths is set to.
863 863 """
864 864 if forcerelativevalue is not None:
865 865 relative = forcerelativevalue
866 866 else:
867 867 config = repo.ui.config(b'ui', b'relative-paths')
868 868 if config == b'legacy':
869 869 relative = legacyrelativevalue
870 870 else:
871 871 relative = stringutil.parsebool(config)
872 872 if relative is None:
873 873 raise error.ConfigError(
874 874 _(b"ui.relative-paths is not a boolean ('%s')") % config
875 875 )
876 876
877 877 if relative:
878 878 cwd = repo.getcwd()
879 879 if cwd != b'':
880 880 # this branch would work even if cwd == b'' (ie cwd = repo
881 881 # root), but its generality makes the returned function slower
882 882 pathto = repo.pathto
883 883 return lambda f: pathto(f, cwd)
884 884 if repo.ui.configbool(b'ui', b'slash'):
885 885 return lambda f: f
886 886 else:
887 887 return util.localpath
888 888
889 889
890 890 def subdiruipathfn(subpath, uipathfn):
891 891 '''Create a new uipathfn that treats the file as relative to subpath.'''
892 892 return lambda f: uipathfn(posixpath.join(subpath, f))
893 893
894 894
895 895 def anypats(pats, opts):
896 896 '''Checks if any patterns, including --include and --exclude were given.
897 897
898 898 Some commands (e.g. addremove) use this condition for deciding whether to
899 899 print absolute or relative paths.
900 900 '''
901 901 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
902 902
903 903
904 904 def expandpats(pats):
905 905 '''Expand bare globs when running on windows.
906 906 On posix we assume it already has already been done by sh.'''
907 907 if not util.expandglobs:
908 908 return list(pats)
909 909 ret = []
910 910 for kindpat in pats:
911 911 kind, pat = matchmod._patsplit(kindpat, None)
912 912 if kind is None:
913 913 try:
914 914 globbed = glob.glob(pat)
915 915 except re.error:
916 916 globbed = [pat]
917 917 if globbed:
918 918 ret.extend(globbed)
919 919 continue
920 920 ret.append(kindpat)
921 921 return ret
922 922
923 923
924 924 def matchandpats(
925 925 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
926 926 ):
927 927 '''Return a matcher and the patterns that were used.
928 928 The matcher will warn about bad matches, unless an alternate badfn callback
929 929 is provided.'''
930 930 if opts is None:
931 931 opts = {}
932 932 if not globbed and default == b'relpath':
933 933 pats = expandpats(pats or [])
934 934
935 935 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
936 936
937 937 def bad(f, msg):
938 938 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
939 939
940 940 if badfn is None:
941 941 badfn = bad
942 942
943 943 m = ctx.match(
944 944 pats,
945 945 opts.get(b'include'),
946 946 opts.get(b'exclude'),
947 947 default,
948 948 listsubrepos=opts.get(b'subrepos'),
949 949 badfn=badfn,
950 950 )
951 951
952 952 if m.always():
953 953 pats = []
954 954 return m, pats
955 955
956 956
957 957 def match(
958 958 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
959 959 ):
960 960 '''Return a matcher that will warn about bad matches.'''
961 961 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
962 962
963 963
964 964 def matchall(repo):
965 965 '''Return a matcher that will efficiently match everything.'''
966 966 return matchmod.always()
967 967
968 968
969 969 def matchfiles(repo, files, badfn=None):
970 970 '''Return a matcher that will efficiently match exactly these files.'''
971 971 return matchmod.exact(files, badfn=badfn)
972 972
973 973
974 974 def parsefollowlinespattern(repo, rev, pat, msg):
975 975 """Return a file name from `pat` pattern suitable for usage in followlines
976 976 logic.
977 977 """
978 978 if not matchmod.patkind(pat):
979 979 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
980 980 else:
981 981 ctx = repo[rev]
982 982 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
983 983 files = [f for f in ctx if m(f)]
984 984 if len(files) != 1:
985 985 raise error.ParseError(msg)
986 986 return files[0]
987 987
988 988
989 989 def getorigvfs(ui, repo):
990 990 """return a vfs suitable to save 'orig' file
991 991
992 992 return None if no special directory is configured"""
993 993 origbackuppath = ui.config(b'ui', b'origbackuppath')
994 994 if not origbackuppath:
995 995 return None
996 996 return vfs.vfs(repo.wvfs.join(origbackuppath))
997 997
998 998
999 999 def backuppath(ui, repo, filepath):
1000 1000 '''customize where working copy backup files (.orig files) are created
1001 1001
1002 1002 Fetch user defined path from config file: [ui] origbackuppath = <path>
1003 1003 Fall back to default (filepath with .orig suffix) if not specified
1004 1004
1005 1005 filepath is repo-relative
1006 1006
1007 1007 Returns an absolute path
1008 1008 '''
1009 1009 origvfs = getorigvfs(ui, repo)
1010 1010 if origvfs is None:
1011 1011 return repo.wjoin(filepath + b".orig")
1012 1012
1013 1013 origbackupdir = origvfs.dirname(filepath)
1014 1014 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1015 1015 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1016 1016
1017 1017 # Remove any files that conflict with the backup file's path
1018 1018 for f in reversed(list(pathutil.finddirs(filepath))):
1019 1019 if origvfs.isfileorlink(f):
1020 1020 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1021 1021 origvfs.unlink(f)
1022 1022 break
1023 1023
1024 1024 origvfs.makedirs(origbackupdir)
1025 1025
1026 1026 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1027 1027 ui.note(
1028 1028 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1029 1029 )
1030 1030 origvfs.rmtree(filepath, forcibly=True)
1031 1031
1032 1032 return origvfs.join(filepath)
1033 1033
1034 1034
1035 1035 class _containsnode(object):
1036 1036 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1037 1037
1038 1038 def __init__(self, repo, revcontainer):
1039 1039 self._torev = repo.changelog.rev
1040 1040 self._revcontains = revcontainer.__contains__
1041 1041
1042 1042 def __contains__(self, node):
1043 1043 return self._revcontains(self._torev(node))
1044 1044
1045 1045
1046 1046 def cleanupnodes(
1047 1047 repo,
1048 1048 replacements,
1049 1049 operation,
1050 1050 moves=None,
1051 1051 metadata=None,
1052 1052 fixphase=False,
1053 1053 targetphase=None,
1054 1054 backup=True,
1055 1055 ):
1056 1056 """do common cleanups when old nodes are replaced by new nodes
1057 1057
1058 1058 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1059 1059 (we might also want to move working directory parent in the future)
1060 1060
1061 1061 By default, bookmark moves are calculated automatically from 'replacements',
1062 1062 but 'moves' can be used to override that. Also, 'moves' may include
1063 1063 additional bookmark moves that should not have associated obsmarkers.
1064 1064
1065 1065 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1066 1066 have replacements. operation is a string, like "rebase".
1067 1067
1068 1068 metadata is dictionary containing metadata to be stored in obsmarker if
1069 1069 obsolescence is enabled.
1070 1070 """
1071 1071 assert fixphase or targetphase is None
1072 1072 if not replacements and not moves:
1073 1073 return
1074 1074
1075 1075 # translate mapping's other forms
1076 1076 if not util.safehasattr(replacements, b'items'):
1077 1077 replacements = {(n,): () for n in replacements}
1078 1078 else:
1079 1079 # upgrading non tuple "source" to tuple ones for BC
1080 1080 repls = {}
1081 1081 for key, value in replacements.items():
1082 1082 if not isinstance(key, tuple):
1083 1083 key = (key,)
1084 1084 repls[key] = value
1085 1085 replacements = repls
1086 1086
1087 1087 # Unfiltered repo is needed since nodes in replacements might be hidden.
1088 1088 unfi = repo.unfiltered()
1089 1089
1090 1090 # Calculate bookmark movements
1091 1091 if moves is None:
1092 1092 moves = {}
1093 1093 for oldnodes, newnodes in replacements.items():
1094 1094 for oldnode in oldnodes:
1095 1095 if oldnode in moves:
1096 1096 continue
1097 1097 if len(newnodes) > 1:
1098 1098 # usually a split, take the one with biggest rev number
1099 1099 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1100 1100 elif len(newnodes) == 0:
1101 1101 # move bookmark backwards
1102 1102 allreplaced = []
1103 1103 for rep in replacements:
1104 1104 allreplaced.extend(rep)
1105 1105 roots = list(
1106 1106 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1107 1107 )
1108 1108 if roots:
1109 1109 newnode = roots[0].node()
1110 1110 else:
1111 1111 newnode = nullid
1112 1112 else:
1113 1113 newnode = newnodes[0]
1114 1114 moves[oldnode] = newnode
1115 1115
1116 1116 allnewnodes = [n for ns in replacements.values() for n in ns]
1117 1117 toretract = {}
1118 1118 toadvance = {}
1119 1119 if fixphase:
1120 1120 precursors = {}
1121 1121 for oldnodes, newnodes in replacements.items():
1122 1122 for oldnode in oldnodes:
1123 1123 for newnode in newnodes:
1124 1124 precursors.setdefault(newnode, []).append(oldnode)
1125 1125
1126 1126 allnewnodes.sort(key=lambda n: unfi[n].rev())
1127 1127 newphases = {}
1128 1128
1129 1129 def phase(ctx):
1130 1130 return newphases.get(ctx.node(), ctx.phase())
1131 1131
1132 1132 for newnode in allnewnodes:
1133 1133 ctx = unfi[newnode]
1134 1134 parentphase = max(phase(p) for p in ctx.parents())
1135 1135 if targetphase is None:
1136 1136 oldphase = max(
1137 1137 unfi[oldnode].phase() for oldnode in precursors[newnode]
1138 1138 )
1139 1139 newphase = max(oldphase, parentphase)
1140 1140 else:
1141 1141 newphase = max(targetphase, parentphase)
1142 1142 newphases[newnode] = newphase
1143 1143 if newphase > ctx.phase():
1144 1144 toretract.setdefault(newphase, []).append(newnode)
1145 1145 elif newphase < ctx.phase():
1146 1146 toadvance.setdefault(newphase, []).append(newnode)
1147 1147
1148 1148 with repo.transaction(b'cleanup') as tr:
1149 1149 # Move bookmarks
1150 1150 bmarks = repo._bookmarks
1151 1151 bmarkchanges = []
1152 1152 for oldnode, newnode in moves.items():
1153 1153 oldbmarks = repo.nodebookmarks(oldnode)
1154 1154 if not oldbmarks:
1155 1155 continue
1156 1156 from . import bookmarks # avoid import cycle
1157 1157
1158 1158 repo.ui.debug(
1159 1159 b'moving bookmarks %r from %s to %s\n'
1160 1160 % (
1161 1161 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1162 1162 hex(oldnode),
1163 1163 hex(newnode),
1164 1164 )
1165 1165 )
1166 1166 # Delete divergent bookmarks being parents of related newnodes
1167 1167 deleterevs = repo.revs(
1168 1168 b'parents(roots(%ln & (::%n))) - parents(%n)',
1169 1169 allnewnodes,
1170 1170 newnode,
1171 1171 oldnode,
1172 1172 )
1173 1173 deletenodes = _containsnode(repo, deleterevs)
1174 1174 for name in oldbmarks:
1175 1175 bmarkchanges.append((name, newnode))
1176 1176 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1177 1177 bmarkchanges.append((b, None))
1178 1178
1179 1179 if bmarkchanges:
1180 1180 bmarks.applychanges(repo, tr, bmarkchanges)
1181 1181
1182 1182 for phase, nodes in toretract.items():
1183 1183 phases.retractboundary(repo, tr, phase, nodes)
1184 1184 for phase, nodes in toadvance.items():
1185 1185 phases.advanceboundary(repo, tr, phase, nodes)
1186 1186
1187 1187 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1188 1188 # Obsolete or strip nodes
1189 1189 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1190 1190 # If a node is already obsoleted, and we want to obsolete it
1191 1191 # without a successor, skip that obssolete request since it's
1192 1192 # unnecessary. That's the "if s or not isobs(n)" check below.
1193 1193 # Also sort the node in topology order, that might be useful for
1194 1194 # some obsstore logic.
1195 1195 # NOTE: the sorting might belong to createmarkers.
1196 1196 torev = unfi.changelog.rev
1197 1197 sortfunc = lambda ns: torev(ns[0][0])
1198 1198 rels = []
1199 1199 for ns, s in sorted(replacements.items(), key=sortfunc):
1200 1200 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1201 1201 rels.append(rel)
1202 1202 if rels:
1203 1203 obsolete.createmarkers(
1204 1204 repo, rels, operation=operation, metadata=metadata
1205 1205 )
1206 1206 elif phases.supportinternal(repo) and mayusearchived:
1207 1207 # this assume we do not have "unstable" nodes above the cleaned ones
1208 1208 allreplaced = set()
1209 1209 for ns in replacements.keys():
1210 1210 allreplaced.update(ns)
1211 1211 if backup:
1212 1212 from . import repair # avoid import cycle
1213 1213
1214 1214 node = min(allreplaced, key=repo.changelog.rev)
1215 1215 repair.backupbundle(
1216 1216 repo, allreplaced, allreplaced, node, operation
1217 1217 )
1218 1218 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1219 1219 else:
1220 1220 from . import repair # avoid import cycle
1221 1221
1222 1222 tostrip = list(n for ns in replacements for n in ns)
1223 1223 if tostrip:
1224 1224 repair.delayedstrip(
1225 1225 repo.ui, repo, tostrip, operation, backup=backup
1226 1226 )
1227 1227
1228 1228
1229 1229 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1230 1230 if opts is None:
1231 1231 opts = {}
1232 1232 m = matcher
1233 1233 dry_run = opts.get(b'dry_run')
1234 1234 try:
1235 1235 similarity = float(opts.get(b'similarity') or 0)
1236 1236 except ValueError:
1237 1237 raise error.Abort(_(b'similarity must be a number'))
1238 1238 if similarity < 0 or similarity > 100:
1239 1239 raise error.Abort(_(b'similarity must be between 0 and 100'))
1240 1240 similarity /= 100.0
1241 1241
1242 1242 ret = 0
1243 1243
1244 1244 wctx = repo[None]
1245 1245 for subpath in sorted(wctx.substate):
1246 1246 submatch = matchmod.subdirmatcher(subpath, m)
1247 1247 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1248 1248 sub = wctx.sub(subpath)
1249 1249 subprefix = repo.wvfs.reljoin(prefix, subpath)
1250 1250 subuipathfn = subdiruipathfn(subpath, uipathfn)
1251 1251 try:
1252 1252 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1253 1253 ret = 1
1254 1254 except error.LookupError:
1255 1255 repo.ui.status(
1256 1256 _(b"skipping missing subrepository: %s\n")
1257 1257 % uipathfn(subpath)
1258 1258 )
1259 1259
1260 1260 rejected = []
1261 1261
1262 1262 def badfn(f, msg):
1263 1263 if f in m.files():
1264 1264 m.bad(f, msg)
1265 1265 rejected.append(f)
1266 1266
1267 1267 badmatch = matchmod.badmatch(m, badfn)
1268 1268 added, unknown, deleted, removed, forgotten = _interestingfiles(
1269 1269 repo, badmatch
1270 1270 )
1271 1271
1272 1272 unknownset = set(unknown + forgotten)
1273 1273 toprint = unknownset.copy()
1274 1274 toprint.update(deleted)
1275 1275 for abs in sorted(toprint):
1276 1276 if repo.ui.verbose or not m.exact(abs):
1277 1277 if abs in unknownset:
1278 1278 status = _(b'adding %s\n') % uipathfn(abs)
1279 1279 label = b'ui.addremove.added'
1280 1280 else:
1281 1281 status = _(b'removing %s\n') % uipathfn(abs)
1282 1282 label = b'ui.addremove.removed'
1283 1283 repo.ui.status(status, label=label)
1284 1284
1285 1285 renames = _findrenames(
1286 1286 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1287 1287 )
1288 1288
1289 1289 if not dry_run:
1290 1290 _markchanges(repo, unknown + forgotten, deleted, renames)
1291 1291
1292 1292 for f in rejected:
1293 1293 if f in m.files():
1294 1294 return 1
1295 1295 return ret
1296 1296
1297 1297
1298 1298 def marktouched(repo, files, similarity=0.0):
1299 1299 '''Assert that files have somehow been operated upon. files are relative to
1300 1300 the repo root.'''
1301 1301 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1302 1302 rejected = []
1303 1303
1304 1304 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1305 1305
1306 1306 if repo.ui.verbose:
1307 1307 unknownset = set(unknown + forgotten)
1308 1308 toprint = unknownset.copy()
1309 1309 toprint.update(deleted)
1310 1310 for abs in sorted(toprint):
1311 1311 if abs in unknownset:
1312 1312 status = _(b'adding %s\n') % abs
1313 1313 else:
1314 1314 status = _(b'removing %s\n') % abs
1315 1315 repo.ui.status(status)
1316 1316
1317 1317 # TODO: We should probably have the caller pass in uipathfn and apply it to
1318 1318 # the messages above too. legacyrelativevalue=True is consistent with how
1319 1319 # it used to work.
1320 1320 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1321 1321 renames = _findrenames(
1322 1322 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1323 1323 )
1324 1324
1325 1325 _markchanges(repo, unknown + forgotten, deleted, renames)
1326 1326
1327 1327 for f in rejected:
1328 1328 if f in m.files():
1329 1329 return 1
1330 1330 return 0
1331 1331
1332 1332
1333 1333 def _interestingfiles(repo, matcher):
1334 1334 '''Walk dirstate with matcher, looking for files that addremove would care
1335 1335 about.
1336 1336
1337 1337 This is different from dirstate.status because it doesn't care about
1338 1338 whether files are modified or clean.'''
1339 1339 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1340 1340 audit_path = pathutil.pathauditor(repo.root, cached=True)
1341 1341
1342 1342 ctx = repo[None]
1343 1343 dirstate = repo.dirstate
1344 1344 matcher = repo.narrowmatch(matcher, includeexact=True)
1345 1345 walkresults = dirstate.walk(
1346 1346 matcher,
1347 1347 subrepos=sorted(ctx.substate),
1348 1348 unknown=True,
1349 1349 ignored=False,
1350 1350 full=False,
1351 1351 )
1352 1352 for abs, st in pycompat.iteritems(walkresults):
1353 1353 dstate = dirstate[abs]
1354 1354 if dstate == b'?' and audit_path.check(abs):
1355 1355 unknown.append(abs)
1356 1356 elif dstate != b'r' and not st:
1357 1357 deleted.append(abs)
1358 1358 elif dstate == b'r' and st:
1359 1359 forgotten.append(abs)
1360 1360 # for finding renames
1361 1361 elif dstate == b'r' and not st:
1362 1362 removed.append(abs)
1363 1363 elif dstate == b'a':
1364 1364 added.append(abs)
1365 1365
1366 1366 return added, unknown, deleted, removed, forgotten
1367 1367
1368 1368
1369 1369 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1370 1370 '''Find renames from removed files to added ones.'''
1371 1371 renames = {}
1372 1372 if similarity > 0:
1373 1373 for old, new, score in similar.findrenames(
1374 1374 repo, added, removed, similarity
1375 1375 ):
1376 1376 if (
1377 1377 repo.ui.verbose
1378 1378 or not matcher.exact(old)
1379 1379 or not matcher.exact(new)
1380 1380 ):
1381 1381 repo.ui.status(
1382 1382 _(
1383 1383 b'recording removal of %s as rename to %s '
1384 1384 b'(%d%% similar)\n'
1385 1385 )
1386 1386 % (uipathfn(old), uipathfn(new), score * 100)
1387 1387 )
1388 1388 renames[new] = old
1389 1389 return renames
1390 1390
1391 1391
1392 1392 def _markchanges(repo, unknown, deleted, renames):
1393 1393 '''Marks the files in unknown as added, the files in deleted as removed,
1394 1394 and the files in renames as copied.'''
1395 1395 wctx = repo[None]
1396 1396 with repo.wlock():
1397 1397 wctx.forget(deleted)
1398 1398 wctx.add(unknown)
1399 1399 for new, old in pycompat.iteritems(renames):
1400 1400 wctx.copy(old, new)
1401 1401
1402 1402
1403 1403 def getrenamedfn(repo, endrev=None):
1404 1404 if copiesmod.usechangesetcentricalgo(repo):
1405 1405
1406 1406 def getrenamed(fn, rev):
1407 1407 ctx = repo[rev]
1408 1408 p1copies = ctx.p1copies()
1409 1409 if fn in p1copies:
1410 1410 return p1copies[fn]
1411 1411 p2copies = ctx.p2copies()
1412 1412 if fn in p2copies:
1413 1413 return p2copies[fn]
1414 1414 return None
1415 1415
1416 1416 return getrenamed
1417 1417
1418 1418 rcache = {}
1419 1419 if endrev is None:
1420 1420 endrev = len(repo)
1421 1421
1422 1422 def getrenamed(fn, rev):
1423 1423 '''looks up all renames for a file (up to endrev) the first
1424 1424 time the file is given. It indexes on the changerev and only
1425 1425 parses the manifest if linkrev != changerev.
1426 1426 Returns rename info for fn at changerev rev.'''
1427 1427 if fn not in rcache:
1428 1428 rcache[fn] = {}
1429 1429 fl = repo.file(fn)
1430 1430 for i in fl:
1431 1431 lr = fl.linkrev(i)
1432 1432 renamed = fl.renamed(fl.node(i))
1433 1433 rcache[fn][lr] = renamed and renamed[0]
1434 1434 if lr >= endrev:
1435 1435 break
1436 1436 if rev in rcache[fn]:
1437 1437 return rcache[fn][rev]
1438 1438
1439 1439 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1440 1440 # filectx logic.
1441 1441 try:
1442 1442 return repo[rev][fn].copysource()
1443 1443 except error.LookupError:
1444 1444 return None
1445 1445
1446 1446 return getrenamed
1447 1447
1448 1448
1449 1449 def getcopiesfn(repo, endrev=None):
1450 1450 if copiesmod.usechangesetcentricalgo(repo):
1451 1451
1452 1452 def copiesfn(ctx):
1453 1453 if ctx.p2copies():
1454 1454 allcopies = ctx.p1copies().copy()
1455 1455 # There should be no overlap
1456 1456 allcopies.update(ctx.p2copies())
1457 1457 return sorted(allcopies.items())
1458 1458 else:
1459 1459 return sorted(ctx.p1copies().items())
1460 1460
1461 1461 else:
1462 1462 getrenamed = getrenamedfn(repo, endrev)
1463 1463
1464 1464 def copiesfn(ctx):
1465 1465 copies = []
1466 1466 for fn in ctx.files():
1467 1467 rename = getrenamed(fn, ctx.rev())
1468 1468 if rename:
1469 1469 copies.append((fn, rename))
1470 1470 return copies
1471 1471
1472 1472 return copiesfn
1473 1473
1474 1474
1475 1475 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1476 1476 """Update the dirstate to reflect the intent of copying src to dst. For
1477 1477 different reasons it might not end with dst being marked as copied from src.
1478 1478 """
1479 1479 origsrc = repo.dirstate.copied(src) or src
1480 1480 if dst == origsrc: # copying back a copy?
1481 1481 if repo.dirstate[dst] not in b'mn' and not dryrun:
1482 1482 repo.dirstate.normallookup(dst)
1483 1483 else:
1484 1484 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1485 1485 if not ui.quiet:
1486 1486 ui.warn(
1487 1487 _(
1488 1488 b"%s has not been committed yet, so no copy "
1489 1489 b"data will be stored for %s.\n"
1490 1490 )
1491 1491 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1492 1492 )
1493 1493 if repo.dirstate[dst] in b'?r' and not dryrun:
1494 1494 wctx.add([dst])
1495 1495 elif not dryrun:
1496 1496 wctx.copy(origsrc, dst)
1497 1497
1498 1498
1499 1499 def movedirstate(repo, newctx, match=None):
1500 1500 """Move the dirstate to newctx and adjust it as necessary.
1501 1501
1502 1502 A matcher can be provided as an optimization. It is probably a bug to pass
1503 1503 a matcher that doesn't match all the differences between the parent of the
1504 1504 working copy and newctx.
1505 1505 """
1506 1506 oldctx = repo[b'.']
1507 1507 ds = repo.dirstate
1508 1508 copies = dict(ds.copies())
1509 1509 ds.setparents(newctx.node(), nullid)
1510 1510 s = newctx.status(oldctx, match=match)
1511 1511 for f in s.modified:
1512 1512 if ds[f] == b'r':
1513 1513 # modified + removed -> removed
1514 1514 continue
1515 1515 ds.normallookup(f)
1516 1516
1517 1517 for f in s.added:
1518 1518 if ds[f] == b'r':
1519 1519 # added + removed -> unknown
1520 1520 ds.drop(f)
1521 1521 elif ds[f] != b'a':
1522 1522 ds.add(f)
1523 1523
1524 1524 for f in s.removed:
1525 1525 if ds[f] == b'a':
1526 1526 # removed + added -> normal
1527 1527 ds.normallookup(f)
1528 1528 elif ds[f] != b'r':
1529 1529 ds.remove(f)
1530 1530
1531 1531 # Merge old parent and old working dir copies
1532 1532 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1533 1533 oldcopies.update(copies)
1534 1534 copies = {
1535 1535 dst: oldcopies.get(src, src)
1536 1536 for dst, src in pycompat.iteritems(oldcopies)
1537 1537 }
1538 1538 # Adjust the dirstate copies
1539 1539 for dst, src in pycompat.iteritems(copies):
1540 1540 if src not in newctx or dst in newctx or ds[dst] != b'a':
1541 1541 src = None
1542 1542 ds.copy(src, dst)
1543 1543 repo._quick_access_changeid_invalidate()
1544 1544
1545 1545
1546 1546 def filterrequirements(requirements):
1547 1547 """ filters the requirements into two sets:
1548 1548
1549 1549 wcreq: requirements which should be written in .hg/requires
1550 1550 storereq: which should be written in .hg/store/requires
1551 1551
1552 1552 Returns (wcreq, storereq)
1553 1553 """
1554 1554 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1555 1555 wc, store = set(), set()
1556 1556 for r in requirements:
1557 1557 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1558 1558 wc.add(r)
1559 1559 else:
1560 1560 store.add(r)
1561 1561 return wc, store
1562 1562 return requirements, None
1563 1563
1564 1564
1565 1565 def istreemanifest(repo):
1566 1566 """ returns whether the repository is using treemanifest or not """
1567 1567 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1568 1568
1569 1569
1570 1570 def writereporequirements(repo, requirements=None):
1571 1571 """ writes requirements for the repo to .hg/requires """
1572 1572 if requirements:
1573 1573 repo.requirements = requirements
1574 1574 wcreq, storereq = filterrequirements(repo.requirements)
1575 1575 if wcreq is not None:
1576 1576 writerequires(repo.vfs, wcreq)
1577 1577 if storereq is not None:
1578 1578 writerequires(repo.svfs, storereq)
1579 1579
1580 1580
1581 1581 def writerequires(opener, requirements):
1582 1582 with opener(b'requires', b'w', atomictemp=True) as fp:
1583 1583 for r in sorted(requirements):
1584 1584 fp.write(b"%s\n" % r)
1585 1585
1586 1586
1587 1587 class filecachesubentry(object):
1588 1588 def __init__(self, path, stat):
1589 1589 self.path = path
1590 1590 self.cachestat = None
1591 1591 self._cacheable = None
1592 1592
1593 1593 if stat:
1594 1594 self.cachestat = filecachesubentry.stat(self.path)
1595 1595
1596 1596 if self.cachestat:
1597 1597 self._cacheable = self.cachestat.cacheable()
1598 1598 else:
1599 1599 # None means we don't know yet
1600 1600 self._cacheable = None
1601 1601
1602 1602 def refresh(self):
1603 1603 if self.cacheable():
1604 1604 self.cachestat = filecachesubentry.stat(self.path)
1605 1605
1606 1606 def cacheable(self):
1607 1607 if self._cacheable is not None:
1608 1608 return self._cacheable
1609 1609
1610 1610 # we don't know yet, assume it is for now
1611 1611 return True
1612 1612
1613 1613 def changed(self):
1614 1614 # no point in going further if we can't cache it
1615 1615 if not self.cacheable():
1616 1616 return True
1617 1617
1618 1618 newstat = filecachesubentry.stat(self.path)
1619 1619
1620 1620 # we may not know if it's cacheable yet, check again now
1621 1621 if newstat and self._cacheable is None:
1622 1622 self._cacheable = newstat.cacheable()
1623 1623
1624 1624 # check again
1625 1625 if not self._cacheable:
1626 1626 return True
1627 1627
1628 1628 if self.cachestat != newstat:
1629 1629 self.cachestat = newstat
1630 1630 return True
1631 1631 else:
1632 1632 return False
1633 1633
1634 1634 @staticmethod
1635 1635 def stat(path):
1636 1636 try:
1637 1637 return util.cachestat(path)
1638 1638 except OSError as e:
1639 1639 if e.errno != errno.ENOENT:
1640 1640 raise
1641 1641
1642 1642
1643 1643 class filecacheentry(object):
1644 1644 def __init__(self, paths, stat=True):
1645 1645 self._entries = []
1646 1646 for path in paths:
1647 1647 self._entries.append(filecachesubentry(path, stat))
1648 1648
1649 1649 def changed(self):
1650 1650 '''true if any entry has changed'''
1651 1651 for entry in self._entries:
1652 1652 if entry.changed():
1653 1653 return True
1654 1654 return False
1655 1655
1656 1656 def refresh(self):
1657 1657 for entry in self._entries:
1658 1658 entry.refresh()
1659 1659
1660 1660
1661 1661 class filecache(object):
1662 1662 """A property like decorator that tracks files under .hg/ for updates.
1663 1663
1664 1664 On first access, the files defined as arguments are stat()ed and the
1665 1665 results cached. The decorated function is called. The results are stashed
1666 1666 away in a ``_filecache`` dict on the object whose method is decorated.
1667 1667
1668 1668 On subsequent access, the cached result is used as it is set to the
1669 1669 instance dictionary.
1670 1670
1671 1671 On external property set/delete operations, the caller must update the
1672 1672 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1673 1673 instead of directly setting <attr>.
1674 1674
1675 1675 When using the property API, the cached data is always used if available.
1676 1676 No stat() is performed to check if the file has changed.
1677 1677
1678 1678 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1679 1679 can populate an entry before the property's getter is called. In this case,
1680 1680 entries in ``_filecache`` will be used during property operations,
1681 1681 if available. If the underlying file changes, it is up to external callers
1682 1682 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1683 1683 method result as well as possibly calling ``del obj._filecache[attr]`` to
1684 1684 remove the ``filecacheentry``.
1685 1685 """
1686 1686
1687 1687 def __init__(self, *paths):
1688 1688 self.paths = paths
1689 1689
1690 1690 def join(self, obj, fname):
1691 1691 """Used to compute the runtime path of a cached file.
1692 1692
1693 1693 Users should subclass filecache and provide their own version of this
1694 1694 function to call the appropriate join function on 'obj' (an instance
1695 1695 of the class that its member function was decorated).
1696 1696 """
1697 1697 raise NotImplementedError
1698 1698
1699 1699 def __call__(self, func):
1700 1700 self.func = func
1701 1701 self.sname = func.__name__
1702 1702 self.name = pycompat.sysbytes(self.sname)
1703 1703 return self
1704 1704
1705 1705 def __get__(self, obj, type=None):
1706 1706 # if accessed on the class, return the descriptor itself.
1707 1707 if obj is None:
1708 1708 return self
1709 1709
1710 1710 assert self.sname not in obj.__dict__
1711 1711
1712 1712 entry = obj._filecache.get(self.name)
1713 1713
1714 1714 if entry:
1715 1715 if entry.changed():
1716 1716 entry.obj = self.func(obj)
1717 1717 else:
1718 1718 paths = [self.join(obj, path) for path in self.paths]
1719 1719
1720 1720 # We stat -before- creating the object so our cache doesn't lie if
1721 1721 # a writer modified between the time we read and stat
1722 1722 entry = filecacheentry(paths, True)
1723 1723 entry.obj = self.func(obj)
1724 1724
1725 1725 obj._filecache[self.name] = entry
1726 1726
1727 1727 obj.__dict__[self.sname] = entry.obj
1728 1728 return entry.obj
1729 1729
1730 1730 # don't implement __set__(), which would make __dict__ lookup as slow as
1731 1731 # function call.
1732 1732
1733 1733 def set(self, obj, value):
1734 1734 if self.name not in obj._filecache:
1735 1735 # we add an entry for the missing value because X in __dict__
1736 1736 # implies X in _filecache
1737 1737 paths = [self.join(obj, path) for path in self.paths]
1738 1738 ce = filecacheentry(paths, False)
1739 1739 obj._filecache[self.name] = ce
1740 1740 else:
1741 1741 ce = obj._filecache[self.name]
1742 1742
1743 1743 ce.obj = value # update cached copy
1744 1744 obj.__dict__[self.sname] = value # update copy returned by obj.x
1745 1745
1746 1746
1747 1747 def extdatasource(repo, source):
1748 1748 """Gather a map of rev -> value dict from the specified source
1749 1749
1750 1750 A source spec is treated as a URL, with a special case shell: type
1751 1751 for parsing the output from a shell command.
1752 1752
1753 1753 The data is parsed as a series of newline-separated records where
1754 1754 each record is a revision specifier optionally followed by a space
1755 1755 and a freeform string value. If the revision is known locally, it
1756 1756 is converted to a rev, otherwise the record is skipped.
1757 1757
1758 1758 Note that both key and value are treated as UTF-8 and converted to
1759 1759 the local encoding. This allows uniformity between local and
1760 1760 remote data sources.
1761 1761 """
1762 1762
1763 1763 spec = repo.ui.config(b"extdata", source)
1764 1764 if not spec:
1765 1765 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1766 1766
1767 1767 data = {}
1768 1768 src = proc = None
1769 1769 try:
1770 1770 if spec.startswith(b"shell:"):
1771 1771 # external commands should be run relative to the repo root
1772 1772 cmd = spec[6:]
1773 1773 proc = subprocess.Popen(
1774 1774 procutil.tonativestr(cmd),
1775 1775 shell=True,
1776 1776 bufsize=-1,
1777 1777 close_fds=procutil.closefds,
1778 1778 stdout=subprocess.PIPE,
1779 1779 cwd=procutil.tonativestr(repo.root),
1780 1780 )
1781 1781 src = proc.stdout
1782 1782 else:
1783 1783 # treat as a URL or file
1784 1784 src = url.open(repo.ui, spec)
1785 1785 for l in src:
1786 1786 if b" " in l:
1787 1787 k, v = l.strip().split(b" ", 1)
1788 1788 else:
1789 1789 k, v = l.strip(), b""
1790 1790
1791 1791 k = encoding.tolocal(k)
1792 1792 try:
1793 1793 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1794 1794 except (error.LookupError, error.RepoLookupError):
1795 1795 pass # we ignore data for nodes that don't exist locally
1796 1796 finally:
1797 1797 if proc:
1798 1798 try:
1799 1799 proc.communicate()
1800 1800 except ValueError:
1801 1801 # This happens if we started iterating src and then
1802 1802 # get a parse error on a line. It should be safe to ignore.
1803 1803 pass
1804 1804 if src:
1805 1805 src.close()
1806 1806 if proc and proc.returncode != 0:
1807 1807 raise error.Abort(
1808 1808 _(b"extdata command '%s' failed: %s")
1809 1809 % (cmd, procutil.explainexit(proc.returncode))
1810 1810 )
1811 1811
1812 1812 return data
1813 1813
1814 1814
1815 1815 class progress(object):
1816 1816 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1817 1817 self.ui = ui
1818 1818 self.pos = 0
1819 1819 self.topic = topic
1820 1820 self.unit = unit
1821 1821 self.total = total
1822 1822 self.debug = ui.configbool(b'progress', b'debug')
1823 1823 self._updatebar = updatebar
1824 1824
1825 1825 def __enter__(self):
1826 1826 return self
1827 1827
1828 1828 def __exit__(self, exc_type, exc_value, exc_tb):
1829 1829 self.complete()
1830 1830
1831 1831 def update(self, pos, item=b"", total=None):
1832 1832 assert pos is not None
1833 1833 if total:
1834 1834 self.total = total
1835 1835 self.pos = pos
1836 1836 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1837 1837 if self.debug:
1838 1838 self._printdebug(item)
1839 1839
1840 1840 def increment(self, step=1, item=b"", total=None):
1841 1841 self.update(self.pos + step, item, total)
1842 1842
1843 1843 def complete(self):
1844 1844 self.pos = None
1845 1845 self.unit = b""
1846 1846 self.total = None
1847 1847 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1848 1848
1849 1849 def _printdebug(self, item):
1850 1850 unit = b''
1851 1851 if self.unit:
1852 1852 unit = b' ' + self.unit
1853 1853 if item:
1854 1854 item = b' ' + item
1855 1855
1856 1856 if self.total:
1857 1857 pct = 100.0 * self.pos / self.total
1858 1858 self.ui.debug(
1859 1859 b'%s:%s %d/%d%s (%4.2f%%)\n'
1860 1860 % (self.topic, item, self.pos, self.total, unit, pct)
1861 1861 )
1862 1862 else:
1863 1863 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1864 1864
1865 1865
1866 1866 def gdinitconfig(ui):
1867 1867 """helper function to know if a repo should be created as general delta
1868 1868 """
1869 1869 # experimental config: format.generaldelta
1870 1870 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1871 1871 b'format', b'usegeneraldelta'
1872 1872 )
1873 1873
1874 1874
1875 1875 def gddeltaconfig(ui):
1876 1876 """helper function to know if incoming delta should be optimised
1877 1877 """
1878 1878 # experimental config: format.generaldelta
1879 1879 return ui.configbool(b'format', b'generaldelta')
1880 1880
1881 1881
1882 1882 class simplekeyvaluefile(object):
1883 1883 """A simple file with key=value lines
1884 1884
1885 1885 Keys must be alphanumerics and start with a letter, values must not
1886 1886 contain '\n' characters"""
1887 1887
1888 1888 firstlinekey = b'__firstline'
1889 1889
1890 1890 def __init__(self, vfs, path, keys=None):
1891 1891 self.vfs = vfs
1892 1892 self.path = path
1893 1893
1894 1894 def read(self, firstlinenonkeyval=False):
1895 1895 """Read the contents of a simple key-value file
1896 1896
1897 1897 'firstlinenonkeyval' indicates whether the first line of file should
1898 1898 be treated as a key-value pair or reuturned fully under the
1899 1899 __firstline key."""
1900 1900 lines = self.vfs.readlines(self.path)
1901 1901 d = {}
1902 1902 if firstlinenonkeyval:
1903 1903 if not lines:
1904 1904 e = _(b"empty simplekeyvalue file")
1905 1905 raise error.CorruptedState(e)
1906 1906 # we don't want to include '\n' in the __firstline
1907 1907 d[self.firstlinekey] = lines[0][:-1]
1908 1908 del lines[0]
1909 1909
1910 1910 try:
1911 1911 # the 'if line.strip()' part prevents us from failing on empty
1912 1912 # lines which only contain '\n' therefore are not skipped
1913 1913 # by 'if line'
1914 1914 updatedict = dict(
1915 1915 line[:-1].split(b'=', 1) for line in lines if line.strip()
1916 1916 )
1917 1917 if self.firstlinekey in updatedict:
1918 1918 e = _(b"%r can't be used as a key")
1919 1919 raise error.CorruptedState(e % self.firstlinekey)
1920 1920 d.update(updatedict)
1921 1921 except ValueError as e:
1922 1922 raise error.CorruptedState(stringutil.forcebytestr(e))
1923 1923 return d
1924 1924
1925 1925 def write(self, data, firstline=None):
1926 1926 """Write key=>value mapping to a file
1927 1927 data is a dict. Keys must be alphanumerical and start with a letter.
1928 1928 Values must not contain newline characters.
1929 1929
1930 1930 If 'firstline' is not None, it is written to file before
1931 1931 everything else, as it is, not in a key=value form"""
1932 1932 lines = []
1933 1933 if firstline is not None:
1934 1934 lines.append(b'%s\n' % firstline)
1935 1935
1936 1936 for k, v in data.items():
1937 1937 if k == self.firstlinekey:
1938 1938 e = b"key name '%s' is reserved" % self.firstlinekey
1939 1939 raise error.ProgrammingError(e)
1940 1940 if not k[0:1].isalpha():
1941 1941 e = b"keys must start with a letter in a key-value file"
1942 1942 raise error.ProgrammingError(e)
1943 1943 if not k.isalnum():
1944 1944 e = b"invalid key name in a simple key-value file"
1945 1945 raise error.ProgrammingError(e)
1946 1946 if b'\n' in v:
1947 1947 e = b"invalid value in a simple key-value file"
1948 1948 raise error.ProgrammingError(e)
1949 1949 lines.append(b"%s=%s\n" % (k, v))
1950 1950 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1951 1951 fp.write(b''.join(lines))
1952 1952
1953 1953
1954 1954 _reportobsoletedsource = [
1955 1955 b'debugobsolete',
1956 1956 b'pull',
1957 1957 b'push',
1958 1958 b'serve',
1959 1959 b'unbundle',
1960 1960 ]
1961 1961
1962 1962 _reportnewcssource = [
1963 1963 b'pull',
1964 1964 b'unbundle',
1965 1965 ]
1966 1966
1967 1967
1968 1968 def prefetchfiles(repo, revmatches):
1969 1969 """Invokes the registered file prefetch functions, allowing extensions to
1970 1970 ensure the corresponding files are available locally, before the command
1971 1971 uses them.
1972 1972
1973 1973 Args:
1974 1974 revmatches: a list of (revision, match) tuples to indicate the files to
1975 1975 fetch at each revision. If any of the match elements is None, it matches
1976 1976 all files.
1977 1977 """
1978 1978
1979 1979 def _matcher(m):
1980 1980 if m:
1981 1981 assert isinstance(m, matchmod.basematcher)
1982 1982 # The command itself will complain about files that don't exist, so
1983 1983 # don't duplicate the message.
1984 1984 return matchmod.badmatch(m, lambda fn, msg: None)
1985 1985 else:
1986 1986 return matchall(repo)
1987 1987
1988 1988 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1989 1989
1990 1990 fileprefetchhooks(repo, revbadmatches)
1991 1991
1992 1992
1993 1993 # a list of (repo, revs, match) prefetch functions
1994 1994 fileprefetchhooks = util.hooks()
1995 1995
1996 1996 # A marker that tells the evolve extension to suppress its own reporting
1997 1997 _reportstroubledchangesets = True
1998 1998
1999 1999
2000 2000 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
2001 2001 """register a callback to issue a summary after the transaction is closed
2002 2002
2003 2003 If as_validator is true, then the callbacks are registered as transaction
2004 2004 validators instead
2005 2005 """
2006 2006
2007 2007 def txmatch(sources):
2008 2008 return any(txnname.startswith(source) for source in sources)
2009 2009
2010 2010 categories = []
2011 2011
2012 2012 def reportsummary(func):
2013 2013 """decorator for report callbacks."""
2014 2014 # The repoview life cycle is shorter than the one of the actual
2015 2015 # underlying repository. So the filtered object can die before the
2016 2016 # weakref is used leading to troubles. We keep a reference to the
2017 2017 # unfiltered object and restore the filtering when retrieving the
2018 2018 # repository through the weakref.
2019 2019 filtername = repo.filtername
2020 2020 reporef = weakref.ref(repo.unfiltered())
2021 2021
2022 2022 def wrapped(tr):
2023 2023 repo = reporef()
2024 2024 if filtername:
2025 2025 assert repo is not None # help pytype
2026 2026 repo = repo.filtered(filtername)
2027 2027 func(repo, tr)
2028 2028
2029 2029 newcat = b'%02i-txnreport' % len(categories)
2030 2030 if as_validator:
2031 2031 otr.addvalidator(newcat, wrapped)
2032 2032 else:
2033 2033 otr.addpostclose(newcat, wrapped)
2034 2034 categories.append(newcat)
2035 2035 return wrapped
2036 2036
2037 2037 @reportsummary
2038 2038 def reportchangegroup(repo, tr):
2039 2039 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2040 2040 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2041 2041 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2042 2042 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2043 2043 if cgchangesets or cgrevisions or cgfiles:
2044 2044 htext = b""
2045 2045 if cgheads:
2046 2046 htext = _(b" (%+d heads)") % cgheads
2047 2047 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2048 2048 if as_validator:
2049 2049 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2050 2050 assert repo is not None # help pytype
2051 2051 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2052 2052
2053 2053 if txmatch(_reportobsoletedsource):
2054 2054
2055 2055 @reportsummary
2056 2056 def reportobsoleted(repo, tr):
2057 2057 obsoleted = obsutil.getobsoleted(repo, tr)
2058 2058 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2059 2059 if newmarkers:
2060 2060 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2061 2061 if obsoleted:
2062 2062 msg = _(b'obsoleted %i changesets\n')
2063 2063 if as_validator:
2064 2064 msg = _(b'obsoleting %i changesets\n')
2065 2065 repo.ui.status(msg % len(obsoleted))
2066 2066
2067 2067 if obsolete.isenabled(
2068 2068 repo, obsolete.createmarkersopt
2069 2069 ) and repo.ui.configbool(
2070 2070 b'experimental', b'evolution.report-instabilities'
2071 2071 ):
2072 2072 instabilitytypes = [
2073 2073 (b'orphan', b'orphan'),
2074 2074 (b'phase-divergent', b'phasedivergent'),
2075 2075 (b'content-divergent', b'contentdivergent'),
2076 2076 ]
2077 2077
2078 2078 def getinstabilitycounts(repo):
2079 2079 filtered = repo.changelog.filteredrevs
2080 2080 counts = {}
2081 2081 for instability, revset in instabilitytypes:
2082 2082 counts[instability] = len(
2083 2083 set(obsolete.getrevs(repo, revset)) - filtered
2084 2084 )
2085 2085 return counts
2086 2086
2087 2087 oldinstabilitycounts = getinstabilitycounts(repo)
2088 2088
2089 2089 @reportsummary
2090 2090 def reportnewinstabilities(repo, tr):
2091 2091 newinstabilitycounts = getinstabilitycounts(repo)
2092 2092 for instability, revset in instabilitytypes:
2093 2093 delta = (
2094 2094 newinstabilitycounts[instability]
2095 2095 - oldinstabilitycounts[instability]
2096 2096 )
2097 2097 msg = getinstabilitymessage(delta, instability)
2098 2098 if msg:
2099 2099 repo.ui.warn(msg)
2100 2100
2101 2101 if txmatch(_reportnewcssource):
2102 2102
2103 2103 @reportsummary
2104 2104 def reportnewcs(repo, tr):
2105 2105 """Report the range of new revisions pulled/unbundled."""
2106 2106 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2107 2107 unfi = repo.unfiltered()
2108 2108 if origrepolen >= len(unfi):
2109 2109 return
2110 2110
2111 2111 # Compute the bounds of new visible revisions' range.
2112 2112 revs = smartset.spanset(repo, start=origrepolen)
2113 2113 if revs:
2114 2114 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2115 2115
2116 2116 if minrev == maxrev:
2117 2117 revrange = minrev
2118 2118 else:
2119 2119 revrange = b'%s:%s' % (minrev, maxrev)
2120 2120 draft = len(repo.revs(b'%ld and draft()', revs))
2121 2121 secret = len(repo.revs(b'%ld and secret()', revs))
2122 2122 if not (draft or secret):
2123 2123 msg = _(b'new changesets %s\n') % revrange
2124 2124 elif draft and secret:
2125 2125 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2126 2126 msg %= (revrange, draft, secret)
2127 2127 elif draft:
2128 2128 msg = _(b'new changesets %s (%d drafts)\n')
2129 2129 msg %= (revrange, draft)
2130 2130 elif secret:
2131 2131 msg = _(b'new changesets %s (%d secrets)\n')
2132 2132 msg %= (revrange, secret)
2133 2133 else:
2134 2134 errormsg = b'entered unreachable condition'
2135 2135 raise error.ProgrammingError(errormsg)
2136 2136 repo.ui.status(msg)
2137 2137
2138 2138 # search new changesets directly pulled as obsolete
2139 2139 duplicates = tr.changes.get(b'revduplicates', ())
2140 2140 obsadded = unfi.revs(
2141 2141 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2142 2142 )
2143 2143 cl = repo.changelog
2144 2144 extinctadded = [r for r in obsadded if r not in cl]
2145 2145 if extinctadded:
2146 2146 # They are not just obsolete, but obsolete and invisible
2147 2147 # we call them "extinct" internally but the terms have not been
2148 2148 # exposed to users.
2149 2149 msg = b'(%d other changesets obsolete on arrival)\n'
2150 2150 repo.ui.status(msg % len(extinctadded))
2151 2151
2152 2152 @reportsummary
2153 2153 def reportphasechanges(repo, tr):
2154 2154 """Report statistics of phase changes for changesets pre-existing
2155 2155 pull/unbundle.
2156 2156 """
2157 2157 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2158 2158 published = []
2159 2159 for revs, (old, new) in tr.changes.get(b'phases', []):
2160 2160 if new != phases.public:
2161 2161 continue
2162 2162 published.extend(rev for rev in revs if rev < origrepolen)
2163 2163 if not published:
2164 2164 return
2165 2165 msg = _(b'%d local changesets published\n')
2166 2166 if as_validator:
2167 2167 msg = _(b'%d local changesets will be published\n')
2168 2168 repo.ui.status(msg % len(published))
2169 2169
2170 2170
2171 2171 def getinstabilitymessage(delta, instability):
2172 2172 """function to return the message to show warning about new instabilities
2173 2173
2174 2174 exists as a separate function so that extension can wrap to show more
2175 2175 information like how to fix instabilities"""
2176 2176 if delta > 0:
2177 2177 return _(b'%i new %s changesets\n') % (delta, instability)
2178 2178
2179 2179
2180 2180 def nodesummaries(repo, nodes, maxnumnodes=4):
2181 2181 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2182 2182 return b' '.join(short(h) for h in nodes)
2183 2183 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2184 2184 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2185 2185
2186 2186
2187 2187 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2188 2188 """check that no named branch has multiple heads"""
2189 2189 if desc in (b'strip', b'repair'):
2190 2190 # skip the logic during strip
2191 2191 return
2192 2192 visible = repo.filtered(b'visible')
2193 2193 # possible improvement: we could restrict the check to affected branch
2194 2194 bm = visible.branchmap()
2195 2195 for name in bm:
2196 2196 heads = bm.branchheads(name, closed=accountclosed)
2197 2197 if len(heads) > 1:
2198 2198 msg = _(b'rejecting multiple heads on branch "%s"')
2199 2199 msg %= name
2200 2200 hint = _(b'%d heads: %s')
2201 2201 hint %= (len(heads), nodesummaries(repo, heads))
2202 2202 raise error.Abort(msg, hint=hint)
2203 2203
2204 2204
2205 2205 def wrapconvertsink(sink):
2206 2206 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2207 2207 before it is used, whether or not the convert extension was formally loaded.
2208 2208 """
2209 2209 return sink
2210 2210
2211 2211
2212 2212 def unhidehashlikerevs(repo, specs, hiddentype):
2213 2213 """parse the user specs and unhide changesets whose hash or revision number
2214 2214 is passed.
2215 2215
2216 2216 hiddentype can be: 1) 'warn': warn while unhiding changesets
2217 2217 2) 'nowarn': don't warn while unhiding changesets
2218 2218
2219 2219 returns a repo object with the required changesets unhidden
2220 2220 """
2221 2221 if not repo.filtername or not repo.ui.configbool(
2222 2222 b'experimental', b'directaccess'
2223 2223 ):
2224 2224 return repo
2225 2225
2226 2226 if repo.filtername not in (b'visible', b'visible-hidden'):
2227 2227 return repo
2228 2228
2229 2229 symbols = set()
2230 2230 for spec in specs:
2231 2231 try:
2232 2232 tree = revsetlang.parse(spec)
2233 2233 except error.ParseError: # will be reported by scmutil.revrange()
2234 2234 continue
2235 2235
2236 2236 symbols.update(revsetlang.gethashlikesymbols(tree))
2237 2237
2238 2238 if not symbols:
2239 2239 return repo
2240 2240
2241 2241 revs = _getrevsfromsymbols(repo, symbols)
2242 2242
2243 2243 if not revs:
2244 2244 return repo
2245 2245
2246 2246 if hiddentype == b'warn':
2247 2247 unfi = repo.unfiltered()
2248 2248 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2249 2249 repo.ui.warn(
2250 2250 _(
2251 2251 b"warning: accessing hidden changesets for write "
2252 2252 b"operation: %s\n"
2253 2253 )
2254 2254 % revstr
2255 2255 )
2256 2256
2257 2257 # we have to use new filtername to separate branch/tags cache until we can
2258 2258 # disbale these cache when revisions are dynamically pinned.
2259 2259 return repo.filtered(b'visible-hidden', revs)
2260 2260
2261 2261
2262 2262 def _getrevsfromsymbols(repo, symbols):
2263 2263 """parse the list of symbols and returns a set of revision numbers of hidden
2264 2264 changesets present in symbols"""
2265 2265 revs = set()
2266 2266 unfi = repo.unfiltered()
2267 2267 unficl = unfi.changelog
2268 2268 cl = repo.changelog
2269 2269 tiprev = len(unficl)
2270 2270 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2271 2271 for s in symbols:
2272 2272 try:
2273 2273 n = int(s)
2274 2274 if n <= tiprev:
2275 2275 if not allowrevnums:
2276 2276 continue
2277 2277 else:
2278 2278 if n not in cl:
2279 2279 revs.add(n)
2280 2280 continue
2281 2281 except ValueError:
2282 2282 pass
2283 2283
2284 2284 try:
2285 2285 s = resolvehexnodeidprefix(unfi, s)
2286 2286 except (error.LookupError, error.WdirUnsupported):
2287 2287 s = None
2288 2288
2289 2289 if s is not None:
2290 2290 rev = unficl.rev(s)
2291 2291 if rev not in cl:
2292 2292 revs.add(rev)
2293 2293
2294 2294 return revs
2295 2295
2296 2296
2297 2297 def bookmarkrevs(repo, mark):
2298 2298 """
2299 2299 Select revisions reachable by a given bookmark
2300 2300 """
2301 2301 return repo.revs(
2302 2302 b"ancestors(bookmark(%s)) - "
2303 2303 b"ancestors(head() and not bookmark(%s)) - "
2304 2304 b"ancestors(bookmark() and not bookmark(%s))",
2305 2305 mark,
2306 2306 mark,
2307 2307 mark,
2308 2308 )
@@ -1,332 +1,332 b''
1 1 $ hg init a
2 2 $ cd a
3 3 $ echo a > a
4 4 $ hg add -n
5 5 adding a
6 6 $ hg st
7 7 ? a
8 8 $ hg add
9 9 adding a
10 10 $ hg st
11 11 A a
12 12 $ hg forget a
13 13 $ hg add
14 14 adding a
15 15 $ hg forget a
16 16 $ hg add --color debug
17 17 [ui.addremove.added ui.status|adding a]
18 18 $ hg st
19 19 A a
20 20 $ mkdir dir
21 21 $ cd dir
22 22 $ hg add ../a
23 23 ../a already tracked!
24 24 $ cd ..
25 25
26 26 $ echo b > b
27 27 $ hg add -n b
28 28 $ hg st
29 29 A a
30 30 ? b
31 31 $ hg add b
32 32 $ hg st
33 33 A a
34 34 A b
35 35
36 36 should fail
37 37
38 38 $ hg add b
39 39 b already tracked!
40 40 $ hg st
41 41 A a
42 42 A b
43 43
44 44 #if no-windows
45 45 $ echo foo > con.xml
46 46 $ hg --config ui.portablefilenames=jump add con.xml
47 47 abort: ui.portablefilenames value is invalid ('jump')
48 48 [30]
49 49 $ hg --config ui.portablefilenames=abort add con.xml
50 50 abort: filename contains 'con', which is reserved on Windows: con.xml
51 [255]
51 [10]
52 52 $ hg st
53 53 A a
54 54 A b
55 55 ? con.xml
56 56 $ hg add con.xml
57 57 warning: filename contains 'con', which is reserved on Windows: con.xml
58 58 $ hg st
59 59 A a
60 60 A b
61 61 A con.xml
62 62 $ hg forget con.xml
63 63 $ rm con.xml
64 64 #endif
65 65
66 66 #if eol-in-paths
67 67 $ echo bla > 'hello:world'
68 68 $ hg --config ui.portablefilenames=abort add
69 69 adding hello:world
70 70 abort: filename contains ':', which is reserved on Windows: 'hello:world'
71 [255]
71 [10]
72 72 $ hg st
73 73 A a
74 74 A b
75 75 ? hello:world
76 76 $ hg --config ui.portablefilenames=ignore add
77 77 adding hello:world
78 78 $ hg st
79 79 A a
80 80 A b
81 81 A hello:world
82 82 #endif
83 83
84 84 $ hg ci -m 0 --traceback
85 85
86 86 $ hg log -r "heads(. or wdir() & file('**'))"
87 87 changeset: 0:* (glob)
88 88 tag: tip
89 89 user: test
90 90 date: Thu Jan 01 00:00:00 1970 +0000
91 91 summary: 0
92 92
93 93 should fail
94 94
95 95 $ hg add a
96 96 a already tracked!
97 97
98 98 $ echo aa > a
99 99 $ hg ci -m 1
100 100 $ hg up 0
101 101 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
102 102 $ echo aaa > a
103 103 $ hg ci -m 2
104 104 created new head
105 105
106 106 $ hg merge
107 107 merging a
108 108 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
109 109 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
110 110 use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
111 111 [1]
112 112 $ hg st
113 113 M a
114 114 ? a.orig
115 115
116 116 wdir doesn't cause a crash, and can be dynamically selected if dirty
117 117
118 118 $ hg log -r "heads(. or wdir() & file('**'))"
119 119 changeset: 2147483647:ffffffffffff
120 120 parent: 2:* (glob)
121 121 parent: 1:* (glob)
122 122 user: test
123 123 date: * (glob)
124 124
125 125 should fail
126 126
127 127 $ hg add a
128 128 a already tracked!
129 129 $ hg st
130 130 M a
131 131 ? a.orig
132 132 $ hg resolve -m a
133 133 (no more unresolved files)
134 134 $ hg ci -m merge
135 135
136 136 Issue683: peculiarity with hg revert of an removed then added file
137 137
138 138 $ hg forget a
139 139 $ hg add a
140 140 $ hg st
141 141 ? a.orig
142 142 $ hg rm a
143 143 $ hg st
144 144 R a
145 145 ? a.orig
146 146 $ echo a > a
147 147 $ hg add a
148 148 $ hg st
149 149 M a
150 150 ? a.orig
151 151
152 152 excluded file shouldn't be added even if it is explicitly specified
153 153
154 154 $ hg add a.orig -X '*.orig'
155 155 $ hg st
156 156 M a
157 157 ? a.orig
158 158
159 159 Forgotten file can be added back (as either clean or modified)
160 160
161 161 $ hg forget b
162 162 $ hg add b
163 163 $ hg st -A b
164 164 C b
165 165 $ hg forget b
166 166 $ echo modified > b
167 167 $ hg add b
168 168 $ hg st -A b
169 169 M b
170 170 $ hg revert -qC b
171 171
172 172 $ hg add c && echo "unexpected addition of missing file"
173 173 c: * (glob)
174 174 [1]
175 175 $ echo c > c
176 176 $ hg add d c && echo "unexpected addition of missing file"
177 177 d: * (glob)
178 178 [1]
179 179 $ hg st
180 180 M a
181 181 A c
182 182 ? a.orig
183 183 $ hg up -C
184 184 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
185 185
186 186 forget and get should have the right order: added but missing dir should be
187 187 forgotten before file with same name is added
188 188
189 189 $ echo file d > d
190 190 $ hg add d
191 191 $ hg ci -md
192 192 $ hg rm d
193 193 $ mkdir d
194 194 $ echo a > d/a
195 195 $ hg add d/a
196 196 $ rm -r d
197 197 $ hg up -C
198 198 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
199 199 $ cat d
200 200 file d
201 201
202 202 Test that adding a directory doesn't require case matching (issue4578)
203 203 #if icasefs
204 204 $ mkdir -p CapsDir1/CapsDir
205 205 $ echo abc > CapsDir1/CapsDir/AbC.txt
206 206 $ mkdir CapsDir1/CapsDir/SubDir
207 207 $ echo def > CapsDir1/CapsDir/SubDir/Def.txt
208 208
209 209 $ hg add capsdir1/capsdir
210 210 adding CapsDir1/CapsDir/AbC.txt
211 211 adding CapsDir1/CapsDir/SubDir/Def.txt
212 212
213 213 $ hg forget capsdir1/capsdir/abc.txt
214 214
215 215 $ hg forget capsdir1/capsdir
216 216 removing CapsDir1/CapsDir/SubDir/Def.txt
217 217
218 218 $ hg add capsdir1
219 219 adding CapsDir1/CapsDir/AbC.txt
220 220 adding CapsDir1/CapsDir/SubDir/Def.txt
221 221
222 222 $ hg ci -m "AbCDef" capsdir1/capsdir
223 223
224 224 $ hg status -A capsdir1/capsdir
225 225 C CapsDir1/CapsDir/AbC.txt
226 226 C CapsDir1/CapsDir/SubDir/Def.txt
227 227
228 228 $ hg files capsdir1/capsdir
229 229 CapsDir1/CapsDir/AbC.txt
230 230 CapsDir1/CapsDir/SubDir/Def.txt
231 231
232 232 $ echo xyz > CapsDir1/CapsDir/SubDir/Def.txt
233 233 $ hg ci -m xyz capsdir1/capsdir/subdir/def.txt
234 234
235 235 $ hg revert -r '.^' capsdir1/capsdir
236 236 reverting CapsDir1/CapsDir/SubDir/Def.txt
237 237
238 238 The conditional tests above mean the hash on the diff line differs on Windows
239 239 and OS X
240 240 $ hg diff capsdir1/capsdir
241 241 diff -r * CapsDir1/CapsDir/SubDir/Def.txt (glob)
242 242 --- a/CapsDir1/CapsDir/SubDir/Def.txt Thu Jan 01 00:00:00 1970 +0000
243 243 +++ b/CapsDir1/CapsDir/SubDir/Def.txt * (glob)
244 244 @@ -1,1 +1,1 @@
245 245 -xyz
246 246 +def
247 247
248 248 $ hg mv CapsDir1/CapsDir/abc.txt CapsDir1/CapsDir/ABC.txt
249 249 $ hg ci -m "case changing rename" CapsDir1/CapsDir/AbC.txt CapsDir1/CapsDir/ABC.txt
250 250
251 251 $ hg status -A capsdir1/capsdir
252 252 M CapsDir1/CapsDir/SubDir/Def.txt
253 253 C CapsDir1/CapsDir/ABC.txt
254 254
255 255 $ hg remove -f 'glob:**.txt' -X capsdir1/capsdir
256 256 $ hg remove -f 'glob:**.txt' -I capsdir1/capsdir
257 257 removing CapsDir1/CapsDir/ABC.txt
258 258 removing CapsDir1/CapsDir/SubDir/Def.txt
259 259 #endif
260 260
261 261 $ cd ..
262 262
263 263 test --dry-run mode in forget
264 264
265 265 $ hg init testdir_forget
266 266 $ cd testdir_forget
267 267 $ echo foo > foo
268 268 $ hg add foo
269 269 $ hg commit -m "foo"
270 270 $ hg forget foo --dry-run -v
271 271 removing foo
272 272 $ hg diff
273 273 $ hg forget not_exist -n
274 274 not_exist: $ENOENT$
275 275 [1]
276 276
277 277 $ cd ..
278 278
279 279 test --interactive mode in forget
280 280
281 281 $ hg init interactiveforget
282 282 $ cd interactiveforget
283 283 $ echo foo > foo
284 284 $ hg commit -qAm "foo"
285 285 $ echo bar > bar
286 286 $ hg commit -qAm "bar"
287 287 $ hg forget foo --dry-run -i
288 288 abort: cannot specify both --dry-run and --interactive
289 289 [10]
290 290
291 291 $ hg forget foo --config ui.interactive=True -i << EOF
292 292 > ?
293 293 > n
294 294 > EOF
295 295 forget foo [Ynsa?] ?
296 296 y - yes, forget this file
297 297 n - no, skip this file
298 298 s - skip remaining files
299 299 a - include all remaining files
300 300 ? - ? (display help)
301 301 forget foo [Ynsa?] n
302 302
303 303 $ hg forget foo bar --config ui.interactive=True -i << EOF
304 304 > y
305 305 > n
306 306 > EOF
307 307 forget bar [Ynsa?] y
308 308 forget foo [Ynsa?] n
309 309 removing bar
310 310 $ hg status
311 311 R bar
312 312 $ hg up -qC .
313 313
314 314 $ hg forget foo bar --config ui.interactive=True -i << EOF
315 315 > s
316 316 > EOF
317 317 forget bar [Ynsa?] s
318 318 $ hg st
319 319 $ hg up -qC .
320 320
321 321 $ hg forget foo bar --config ui.interactive=True -i << EOF
322 322 > a
323 323 > EOF
324 324 forget bar [Ynsa?] a
325 325 removing bar
326 326 removing foo
327 327 $ hg status
328 328 R bar
329 329 R foo
330 330 $ hg up -qC .
331 331
332 332 $ cd ..
@@ -1,374 +1,374 b''
1 1 $ mkdir part1
2 2 $ cd part1
3 3
4 4 $ hg init
5 5 $ echo a > a
6 6 $ hg add a
7 7 $ hg commit -m "1"
8 8 $ hg status
9 9 $ hg copy a b
10 10 $ hg --config ui.portablefilenames=abort copy a con.xml
11 11 abort: filename contains 'con', which is reserved on Windows: con.xml
12 [255]
12 [10]
13 13 $ hg status
14 14 A b
15 15 $ hg sum
16 16 parent: 0:c19d34741b0a tip
17 17 1
18 18 branch: default
19 19 commit: 1 copied
20 20 update: (current)
21 21 phases: 1 draft
22 22 $ hg --debug commit -m "2"
23 23 committing files:
24 24 b
25 25 b: copy a:b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
26 26 committing manifest
27 27 committing changelog
28 28 updating the branch cache
29 29 committed changeset 1:93580a2c28a50a56f63526fb305067e6fbf739c4
30 30
31 31 we should see two history entries
32 32
33 33 $ hg history -v
34 34 changeset: 1:93580a2c28a5
35 35 tag: tip
36 36 user: test
37 37 date: Thu Jan 01 00:00:00 1970 +0000
38 38 files: b
39 39 description:
40 40 2
41 41
42 42
43 43 changeset: 0:c19d34741b0a
44 44 user: test
45 45 date: Thu Jan 01 00:00:00 1970 +0000
46 46 files: a
47 47 description:
48 48 1
49 49
50 50
51 51
52 52 we should see one log entry for a
53 53
54 54 $ hg log a
55 55 changeset: 0:c19d34741b0a
56 56 user: test
57 57 date: Thu Jan 01 00:00:00 1970 +0000
58 58 summary: 1
59 59
60 60
61 61 this should show a revision linked to changeset 0
62 62
63 63 $ hg debugindex a
64 64 rev linkrev nodeid p1 p2
65 65 0 0 b789fdd96dc2 000000000000 000000000000
66 66
67 67 we should see one log entry for b
68 68
69 69 $ hg log b
70 70 changeset: 1:93580a2c28a5
71 71 tag: tip
72 72 user: test
73 73 date: Thu Jan 01 00:00:00 1970 +0000
74 74 summary: 2
75 75
76 76
77 77 this should show a revision linked to changeset 1
78 78
79 79 $ hg debugindex b
80 80 rev linkrev nodeid p1 p2
81 81 0 1 37d9b5d994ea 000000000000 000000000000
82 82
83 83 this should show the rename information in the metadata
84 84
85 85 $ hg debugdata b 0 | head -3 | tail -2
86 86 copy: a
87 87 copyrev: b789fdd96dc2f3bd229c1dd8eedf0fc60e2b68e3
88 88
89 89 #if reporevlogstore
90 90 $ md5sum.py .hg/store/data/b.i
91 91 44913824c8f5890ae218f9829535922e .hg/store/data/b.i
92 92 #endif
93 93 $ hg cat b > bsum
94 94 $ md5sum.py bsum
95 95 60b725f10c9c85c70d97880dfe8191b3 bsum
96 96 $ hg cat a > asum
97 97 $ md5sum.py asum
98 98 60b725f10c9c85c70d97880dfe8191b3 asum
99 99 $ hg verify
100 100 checking changesets
101 101 checking manifests
102 102 crosschecking files in changesets and manifests
103 103 checking files
104 104 checked 2 changesets with 2 changes to 2 files
105 105
106 106 $ cd ..
107 107
108 108
109 109 $ mkdir part2
110 110 $ cd part2
111 111
112 112 $ hg init
113 113 $ echo foo > foo
114 114 should fail - foo is not managed
115 115 $ hg mv foo bar
116 116 foo: not copying - file is not managed
117 117 abort: no files to copy
118 118 [10]
119 119 $ hg st -A
120 120 ? foo
121 121 respects ui.relative-paths
122 122 $ mkdir dir
123 123 $ cd dir
124 124 $ hg mv ../foo ../bar
125 125 ../foo: not copying - file is not managed
126 126 abort: no files to copy
127 127 [10]
128 128 $ hg mv ../foo ../bar --config ui.relative-paths=yes
129 129 ../foo: not copying - file is not managed
130 130 abort: no files to copy
131 131 [10]
132 132 $ hg mv ../foo ../bar --config ui.relative-paths=no
133 133 foo: not copying - file is not managed
134 134 abort: no files to copy
135 135 [10]
136 136 $ cd ..
137 137 $ rmdir dir
138 138 $ hg add foo
139 139 dry-run; print a warning that this is not a real copy; foo is added
140 140 $ hg mv --dry-run foo bar
141 141 foo has not been committed yet, so no copy data will be stored for bar.
142 142 $ hg st -A
143 143 A foo
144 144 should print a warning that this is not a real copy; bar is added
145 145 $ hg mv foo bar
146 146 foo has not been committed yet, so no copy data will be stored for bar.
147 147 $ hg st -A
148 148 A bar
149 149 should print a warning that this is not a real copy; foo is added
150 150 $ hg cp bar foo
151 151 bar has not been committed yet, so no copy data will be stored for foo.
152 152 $ hg rm -f bar
153 153 $ rm bar
154 154 $ hg st -A
155 155 A foo
156 156 $ hg commit -m1
157 157
158 158 moving a missing file
159 159 $ rm foo
160 160 $ hg mv foo foo3
161 161 foo: deleted in working directory
162 162 foo3 does not exist!
163 163 $ hg up -qC .
164 164
165 165 copy --after to a nonexistent target filename
166 166 $ hg cp -A foo dummy
167 167 foo: not recording copy - dummy does not exist
168 168 [1]
169 169
170 170 dry-run; should show that foo is clean
171 171 $ hg copy --dry-run foo bar
172 172 $ hg st -A
173 173 C foo
174 174 should show copy
175 175 $ hg copy foo bar
176 176 $ hg st -C
177 177 A bar
178 178 foo
179 179
180 180 shouldn't show copy
181 181 $ hg commit -m2
182 182 $ hg st -C
183 183
184 184 should match
185 185 $ hg debugindex foo
186 186 rev linkrev nodeid p1 p2
187 187 0 0 2ed2a3912a0b 000000000000 000000000000
188 188 $ hg debugrename bar
189 189 bar renamed from foo:2ed2a3912a0b24502043eae84ee4b279c18b90dd
190 190
191 191 $ echo bleah > foo
192 192 $ echo quux > bar
193 193 $ hg commit -m3
194 194
195 195 should not be renamed
196 196 $ hg debugrename bar
197 197 bar not renamed
198 198
199 199 $ hg copy -f foo bar
200 200 should show copy
201 201 $ hg st -C
202 202 M bar
203 203 foo
204 204
205 205 XXX: filtering lfilesrepo.status() in 3.3-rc causes the copy source to not be
206 206 displayed.
207 207 $ hg st -C --config extensions.largefiles=
208 208 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
209 209 M bar
210 210 foo
211 211
212 212 $ hg commit -m3
213 213
214 214 should show no parents for tip
215 215 $ hg debugindex bar
216 216 rev linkrev nodeid p1 p2
217 217 0 1 7711d36246cc 000000000000 000000000000
218 218 1 2 bdf70a2b8d03 7711d36246cc 000000000000
219 219 2 3 b2558327ea8d 000000000000 000000000000
220 220 should match
221 221 $ hg debugindex foo
222 222 rev linkrev nodeid p1 p2
223 223 0 0 2ed2a3912a0b 000000000000 000000000000
224 224 1 2 dd12c926cf16 2ed2a3912a0b 000000000000
225 225 $ hg debugrename bar
226 226 bar renamed from foo:dd12c926cf165e3eb4cf87b084955cb617221c17
227 227
228 228 should show no copies
229 229 $ hg st -C
230 230
231 231 copy --after on an added file
232 232 $ cp bar baz
233 233 $ hg add baz
234 234 $ hg cp -A bar baz
235 235 $ hg st -C
236 236 A baz
237 237 bar
238 238
239 239 foo was clean:
240 240 $ hg st -AC foo
241 241 C foo
242 242 Trying to copy on top of an existing file fails,
243 243 $ hg copy -A bar foo
244 244 foo: not overwriting - file already committed
245 245 ('hg copy --after --force' to replace the file by recording a copy)
246 246 [1]
247 247 same error without the --after, so the user doesn't have to go through
248 248 two hints:
249 249 $ hg copy bar foo
250 250 foo: not overwriting - file already committed
251 251 ('hg copy --force' to replace the file by recording a copy)
252 252 [1]
253 253 but it's considered modified after a copy --after --force
254 254 $ hg copy -Af bar foo
255 255 $ hg st -AC foo
256 256 M foo
257 257 bar
258 258 The hint for a file that exists but is not in file history doesn't
259 259 mention --force:
260 260 $ touch xyzzy
261 261 $ hg cp bar xyzzy
262 262 xyzzy: not overwriting - file exists
263 263 ('hg copy --after' to record the copy)
264 264 [1]
265 265 $ hg co -qC .
266 266 $ rm baz xyzzy
267 267
268 268
269 269 Test unmarking copy of a single file
270 270
271 271 # Set up by creating a copy
272 272 $ hg cp bar baz
273 273 # Test uncopying a non-existent file
274 274 $ hg copy --forget non-existent
275 275 non-existent: $ENOENT$
276 276 # Test uncopying an tracked but unrelated file
277 277 $ hg copy --forget foo
278 278 foo: not unmarking as copy - file is not marked as copied
279 279 # Test uncopying a copy source
280 280 $ hg copy --forget bar
281 281 bar: not unmarking as copy - file is not marked as copied
282 282 # baz should still be marked as a copy
283 283 $ hg st -C
284 284 A baz
285 285 bar
286 286 # Test the normal case
287 287 $ hg copy --forget baz
288 288 $ hg st -C
289 289 A baz
290 290 # Test uncopy with matching an non-matching patterns
291 291 $ hg cp bar baz --after
292 292 $ hg copy --forget bar baz
293 293 bar: not unmarking as copy - file is not marked as copied
294 294 $ hg st -C
295 295 A baz
296 296 # Test uncopy with no exact matches
297 297 $ hg cp bar baz --after
298 298 $ hg copy --forget .
299 299 $ hg st -C
300 300 A baz
301 301 $ hg forget baz
302 302 $ rm baz
303 303
304 304 Test unmarking copy of a directory
305 305
306 306 $ mkdir dir
307 307 $ echo foo > dir/foo
308 308 $ echo bar > dir/bar
309 309 $ hg add dir
310 310 adding dir/bar
311 311 adding dir/foo
312 312 $ hg ci -m 'add dir/'
313 313 $ hg cp dir dir2
314 314 copying dir/bar to dir2/bar
315 315 copying dir/foo to dir2/foo
316 316 $ touch dir2/untracked
317 317 $ hg copy --forget dir2
318 318 $ hg st -C
319 319 A dir2/bar
320 320 A dir2/foo
321 321 ? dir2/untracked
322 322 # Clean up for next test
323 323 $ hg forget dir2
324 324 removing dir2/bar
325 325 removing dir2/foo
326 326 $ rm -r dir2
327 327
328 328 Test uncopy on committed copies
329 329
330 330 # Commit some copies
331 331 $ hg cp bar baz
332 332 $ hg cp bar qux
333 333 $ hg ci -m copies
334 334 $ hg st -C --change .
335 335 A baz
336 336 bar
337 337 A qux
338 338 bar
339 339 $ base=$(hg log -r '.^' -T '{rev}')
340 340 $ hg log -G -T '{rev}:{node|short} {desc}\n' -r $base:
341 341 @ 5:a612dc2edfda copies
342 342 |
343 343 o 4:4800b1f1f38e add dir/
344 344 |
345 345 ~
346 346 # Add a dirty change on top to show that it's unaffected
347 347 $ echo dirty >> baz
348 348 $ hg st
349 349 M baz
350 350 $ cat baz
351 351 bleah
352 352 dirty
353 353 $ hg copy --forget --at-rev . baz
354 354 saved backup bundle to $TESTTMP/part2/.hg/strip-backup/a612dc2edfda-e36b4448-uncopy.hg
355 355 # The unwanted copy is no longer recorded, but the unrelated one is
356 356 $ hg st -C --change .
357 357 A baz
358 358 A qux
359 359 bar
360 360 # The old commit is gone and we have updated to the new commit
361 361 $ hg log -G -T '{rev}:{node|short} {desc}\n' -r $base:
362 362 @ 5:c45090e5effe copies
363 363 |
364 364 o 4:4800b1f1f38e add dir/
365 365 |
366 366 ~
367 367 # Working copy still has the uncommitted change
368 368 $ hg st
369 369 M baz
370 370 $ cat baz
371 371 bleah
372 372 dirty
373 373
374 374 $ cd ..
@@ -1,80 +1,80 b''
1 1 #require eol-in-paths
2 2
3 3 https://bz.mercurial-scm.org/352
4 4
5 5 test issue352
6 6
7 7 $ hg init foo
8 8 $ cd foo
9 9 $ A=`printf 'he\rllo'`
10 10 $ echo foo > "$A"
11 11 $ hg add
12 12 adding he\r (no-eol) (esc)
13 13 llo
14 14 abort: '\n' and '\r' disallowed in filenames: 'he\rllo'
15 [255]
15 [10]
16 16 $ hg ci -A -m m
17 17 adding he\r (no-eol) (esc)
18 18 llo
19 19 abort: '\n' and '\r' disallowed in filenames: 'he\rllo'
20 [255]
20 [10]
21 21 $ rm "$A"
22 22 $ echo foo > "hell
23 23 > o"
24 24 $ hg add
25 25 adding hell
26 26 o
27 27 abort: '\n' and '\r' disallowed in filenames: 'hell\no'
28 [255]
28 [10]
29 29 $ hg ci -A -m m
30 30 adding hell
31 31 o
32 32 abort: '\n' and '\r' disallowed in filenames: 'hell\no'
33 [255]
33 [10]
34 34 $ echo foo > "$A"
35 35 $ hg debugwalk -v
36 36 * matcher:
37 37 <alwaysmatcher>
38 38 f he\r (no-eol) (esc)
39 39 llo he\r (no-eol) (esc)
40 40 llo
41 41 f hell
42 42 o hell
43 43 o
44 44
45 45 $ echo bla > quickfox
46 46 $ hg add quickfox
47 47 $ hg ci -m 2
48 48 $ A=`printf 'quick\rfox'`
49 49 $ hg cp quickfox "$A"
50 50 abort: '\n' and '\r' disallowed in filenames: 'quick\rfox'
51 [255]
51 [10]
52 52 $ hg mv quickfox "$A"
53 53 abort: '\n' and '\r' disallowed in filenames: 'quick\rfox'
54 [255]
54 [10]
55 55
56 56 https://bz.mercurial-scm.org/2036
57 57
58 58 $ cd ..
59 59
60 60 test issue2039
61 61
62 62 $ hg init bar
63 63 $ cd bar
64 64 $ cat <<EOF >> $HGRCPATH
65 65 > [extensions]
66 66 > color =
67 67 > [color]
68 68 > mode = ansi
69 69 > EOF
70 70 $ A=`printf 'foo\nbar'`
71 71 $ B=`printf 'foo\nbar.baz'`
72 72 $ touch "$A"
73 73 $ touch "$B"
74 74 $ hg status --color=always
75 75 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mfoo\x1b[0m (esc)
76 76 \x1b[0;35;1;4mbar\x1b[0m (esc)
77 77 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mfoo\x1b[0m (esc)
78 78 \x1b[0;35;1;4mbar.baz\x1b[0m (esc)
79 79
80 80 $ cd ..
@@ -1,698 +1,698 b''
1 1 $ hg init
2 2 $ mkdir d1 d1/d11 d2
3 3 $ echo d1/a > d1/a
4 4 $ echo d1/ba > d1/ba
5 5 $ echo d1/a1 > d1/d11/a1
6 6 $ echo d1/b > d1/b
7 7 $ echo d2/b > d2/b
8 8 $ hg add d1/a d1/b d1/ba d1/d11/a1 d2/b
9 9 $ hg commit -m "1"
10 10
11 11 rename a single file
12 12
13 13 $ hg rename d1/d11/a1 d2/c
14 14 $ hg --config ui.portablefilenames=abort rename d1/a d1/con.xml
15 15 abort: filename contains 'con', which is reserved on Windows: d1/con.xml
16 [255]
16 [10]
17 17 $ hg sum
18 18 parent: 0:9b4b6e7b2c26 tip
19 19 1
20 20 branch: default
21 21 commit: 1 renamed
22 22 update: (current)
23 23 phases: 1 draft
24 24 $ hg status -C
25 25 A d2/c
26 26 d1/d11/a1
27 27 R d1/d11/a1
28 28 $ hg update -C
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30 $ rm d2/c
31 31
32 32 rename a single file using absolute paths
33 33
34 34 $ hg rename `pwd`/d1/d11/a1 `pwd`/d2/c
35 35 $ hg status -C
36 36 A d2/c
37 37 d1/d11/a1
38 38 R d1/d11/a1
39 39 $ hg update -C
40 40 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
41 41 $ rm d2/c
42 42
43 43 rename --after a single file
44 44
45 45 $ mv d1/d11/a1 d2/c
46 46 $ hg rename --after d1/d11/a1 d2/c
47 47 $ hg status -C
48 48 A d2/c
49 49 d1/d11/a1
50 50 R d1/d11/a1
51 51 $ hg update -C
52 52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 53 $ rm d2/c
54 54
55 55 rename --after a single file when src and tgt already tracked
56 56
57 57 $ mv d1/d11/a1 d2/c
58 58 $ hg addrem -s 0
59 59 removing d1/d11/a1
60 60 adding d2/c
61 61 $ hg rename --after d1/d11/a1 d2/c
62 62 $ hg status -C
63 63 A d2/c
64 64 d1/d11/a1
65 65 R d1/d11/a1
66 66 $ hg update -C
67 67 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
68 68 $ rm d2/c
69 69
70 70 rename --after a single file to a nonexistent target filename
71 71
72 72 $ hg rename --after d1/a dummy
73 73 d1/a: not recording move - dummy does not exist
74 74 [1]
75 75
76 76 move a single file to an existing directory
77 77
78 78 $ hg rename d1/d11/a1 d2
79 79 $ hg status -C
80 80 A d2/a1
81 81 d1/d11/a1
82 82 R d1/d11/a1
83 83 $ hg update -C
84 84 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 85 $ rm d2/a1
86 86
87 87 move --after a single file to an existing directory
88 88
89 89 $ mv d1/d11/a1 d2
90 90 $ hg rename --after d1/d11/a1 d2
91 91 $ hg status -C
92 92 A d2/a1
93 93 d1/d11/a1
94 94 R d1/d11/a1
95 95 $ hg update -C
96 96 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
97 97 $ rm d2/a1
98 98
99 99 rename a file using a relative path
100 100
101 101 $ (cd d1/d11; hg rename ../../d2/b e)
102 102 $ hg status -C
103 103 A d1/d11/e
104 104 d2/b
105 105 R d2/b
106 106 $ hg update -C
107 107 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
108 108 $ rm d1/d11/e
109 109
110 110 rename --after a file using a relative path
111 111
112 112 $ (cd d1/d11; mv ../../d2/b e; hg rename --after ../../d2/b e)
113 113 $ hg status -C
114 114 A d1/d11/e
115 115 d2/b
116 116 R d2/b
117 117 $ hg update -C
118 118 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
119 119 $ rm d1/d11/e
120 120
121 121 rename directory d1 as d3
122 122
123 123 $ hg rename d1/ d3
124 124 moving d1/a to d3/a
125 125 moving d1/b to d3/b
126 126 moving d1/ba to d3/ba
127 127 moving d1/d11/a1 to d3/d11/a1
128 128 $ hg status -C
129 129 A d3/a
130 130 d1/a
131 131 A d3/b
132 132 d1/b
133 133 A d3/ba
134 134 d1/ba
135 135 A d3/d11/a1
136 136 d1/d11/a1
137 137 R d1/a
138 138 R d1/b
139 139 R d1/ba
140 140 R d1/d11/a1
141 141 $ hg update -C
142 142 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
143 143 $ rm -rf d3
144 144
145 145 rename --after directory d1 as d3
146 146
147 147 $ mv d1 d3
148 148 $ hg rename --after d1 d3
149 149 moving d1/a to d3/a
150 150 moving d1/b to d3/b
151 151 moving d1/ba to d3/ba
152 152 moving d1/d11/a1 to d3/d11/a1
153 153 $ hg status -C
154 154 A d3/a
155 155 d1/a
156 156 A d3/b
157 157 d1/b
158 158 A d3/ba
159 159 d1/ba
160 160 A d3/d11/a1
161 161 d1/d11/a1
162 162 R d1/a
163 163 R d1/b
164 164 R d1/ba
165 165 R d1/d11/a1
166 166 $ hg update -C
167 167 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
168 168 $ rm -rf d3
169 169
170 170 move a directory using a relative path
171 171
172 172 $ (cd d2; mkdir d3; hg rename ../d1/d11 d3)
173 173 moving ../d1/d11/a1 to d3/d11/a1
174 174 $ hg status -C
175 175 A d2/d3/d11/a1
176 176 d1/d11/a1
177 177 R d1/d11/a1
178 178 $ hg update -C
179 179 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
180 180 $ rm -rf d2/d3
181 181
182 182 move --after a directory using a relative path
183 183
184 184 $ (cd d2; mkdir d3; mv ../d1/d11 d3; hg rename --after ../d1/d11 d3)
185 185 moving ../d1/d11/a1 to d3/d11/a1
186 186 $ hg status -C
187 187 A d2/d3/d11/a1
188 188 d1/d11/a1
189 189 R d1/d11/a1
190 190 $ hg update -C
191 191 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
192 192 $ rm -rf d2/d3
193 193
194 194 move directory d1/d11 to an existing directory d2 (removes empty d1)
195 195
196 196 $ hg rename d1/d11/ d2
197 197 moving d1/d11/a1 to d2/d11/a1
198 198 $ hg status -C
199 199 A d2/d11/a1
200 200 d1/d11/a1
201 201 R d1/d11/a1
202 202 $ hg update -C
203 203 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
204 204 $ rm -rf d2/d11
205 205
206 206 move directories d1 and d2 to a new directory d3
207 207
208 208 $ mkdir d3
209 209 $ hg rename d1 d2 d3
210 210 moving d1/a to d3/d1/a
211 211 moving d1/b to d3/d1/b
212 212 moving d1/ba to d3/d1/ba
213 213 moving d1/d11/a1 to d3/d1/d11/a1
214 214 moving d2/b to d3/d2/b
215 215 $ hg status -C
216 216 A d3/d1/a
217 217 d1/a
218 218 A d3/d1/b
219 219 d1/b
220 220 A d3/d1/ba
221 221 d1/ba
222 222 A d3/d1/d11/a1
223 223 d1/d11/a1
224 224 A d3/d2/b
225 225 d2/b
226 226 R d1/a
227 227 R d1/b
228 228 R d1/ba
229 229 R d1/d11/a1
230 230 R d2/b
231 231 $ hg update -C
232 232 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
233 233 $ rm -rf d3
234 234
235 235 move --after directories d1 and d2 to a new directory d3
236 236
237 237 $ mkdir d3
238 238 $ mv d1 d2 d3
239 239 $ hg rename --after d1 d2 d3
240 240 moving d1/a to d3/d1/a
241 241 moving d1/b to d3/d1/b
242 242 moving d1/ba to d3/d1/ba
243 243 moving d1/d11/a1 to d3/d1/d11/a1
244 244 moving d2/b to d3/d2/b
245 245 $ hg status -C
246 246 A d3/d1/a
247 247 d1/a
248 248 A d3/d1/b
249 249 d1/b
250 250 A d3/d1/ba
251 251 d1/ba
252 252 A d3/d1/d11/a1
253 253 d1/d11/a1
254 254 A d3/d2/b
255 255 d2/b
256 256 R d1/a
257 257 R d1/b
258 258 R d1/ba
259 259 R d1/d11/a1
260 260 R d2/b
261 261 $ hg update -C
262 262 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
263 263 $ rm -rf d3
264 264
265 265 move everything under directory d1 to existing directory d2, do not
266 266 overwrite existing files (d2/b)
267 267
268 268 $ hg rename d1/* d2
269 269 d2/b: not overwriting - file already committed
270 270 ('hg rename --force' to replace the file by recording a rename)
271 271 moving d1/d11/a1 to d2/d11/a1
272 272 [1]
273 273 $ hg status -C
274 274 A d2/a
275 275 d1/a
276 276 A d2/ba
277 277 d1/ba
278 278 A d2/d11/a1
279 279 d1/d11/a1
280 280 R d1/a
281 281 R d1/ba
282 282 R d1/d11/a1
283 283 $ diff -u d1/b d2/b
284 284 --- d1/b * (glob)
285 285 +++ d2/b * (glob)
286 286 @@ * (glob)
287 287 -d1/b
288 288 +d2/b
289 289 [1]
290 290 $ hg update -C
291 291 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
292 292 $ rm d2/a d2/ba d2/d11/a1
293 293
294 294 attempt to move one file into a non-existent directory
295 295
296 296 $ hg rename d1/a dx/
297 297 abort: destination dx/ is not a directory
298 298 [10]
299 299 $ hg status -C
300 300 $ hg update -C
301 301 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
302 302
303 303 attempt to move potentially more than one file into a non-existent directory
304 304
305 305 $ hg rename 'glob:d1/**' dx
306 306 abort: with multiple sources, destination must be an existing directory
307 307 [10]
308 308
309 309 move every file under d1 to d2/d21
310 310
311 311 $ mkdir d2/d21
312 312 $ hg rename 'glob:d1/**' d2/d21
313 313 moving d1/a to d2/d21/a
314 314 moving d1/b to d2/d21/b
315 315 moving d1/ba to d2/d21/ba
316 316 moving d1/d11/a1 to d2/d21/a1
317 317 $ hg status -C
318 318 A d2/d21/a
319 319 d1/a
320 320 A d2/d21/a1
321 321 d1/d11/a1
322 322 A d2/d21/b
323 323 d1/b
324 324 A d2/d21/ba
325 325 d1/ba
326 326 R d1/a
327 327 R d1/b
328 328 R d1/ba
329 329 R d1/d11/a1
330 330 $ hg update -C
331 331 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
332 332 $ rm -rf d2/d21
333 333
334 334 move --after some files under d1 to d2/d21
335 335
336 336 $ mkdir d2/d21
337 337 $ mv d1/a d1/d11/a1 d2/d21
338 338 $ hg rename --after 'glob:d1/**' d2/d21
339 339 moving d1/a to d2/d21/a
340 340 d1/b: not recording move - d2/d21/b does not exist
341 341 d1/ba: not recording move - d2/d21/ba does not exist
342 342 moving d1/d11/a1 to d2/d21/a1
343 343 [1]
344 344 $ hg status -C
345 345 A d2/d21/a
346 346 d1/a
347 347 A d2/d21/a1
348 348 d1/d11/a1
349 349 R d1/a
350 350 R d1/d11/a1
351 351 $ hg update -C
352 352 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
353 353 $ rm -rf d2/d21
354 354
355 355 move every file under d1 starting with an 'a' to d2/d21 (regexp)
356 356
357 357 $ mkdir d2/d21
358 358 $ hg rename 're:d1/([^a][^/]*/)*a.*' d2/d21
359 359 moving d1/a to d2/d21/a
360 360 moving d1/d11/a1 to d2/d21/a1
361 361 $ hg status -C
362 362 A d2/d21/a
363 363 d1/a
364 364 A d2/d21/a1
365 365 d1/d11/a1
366 366 R d1/a
367 367 R d1/d11/a1
368 368 $ hg update -C
369 369 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
370 370 $ rm -rf d2/d21
371 371
372 372 attempt to overwrite an existing file
373 373
374 374 $ echo "ca" > d1/ca
375 375 $ hg rename d1/ba d1/ca
376 376 d1/ca: not overwriting - file exists
377 377 ('hg rename --after' to record the rename)
378 378 [1]
379 379 $ hg status -C
380 380 ? d1/ca
381 381 $ hg update -C
382 382 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
383 383
384 384 forced overwrite of an existing file
385 385
386 386 $ echo "ca" > d1/ca
387 387 $ hg rename --force d1/ba d1/ca
388 388 $ hg status -C
389 389 A d1/ca
390 390 d1/ba
391 391 R d1/ba
392 392 $ hg update -C
393 393 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
394 394 $ rm d1/ca
395 395
396 396 attempt to overwrite an existing broken symlink
397 397
398 398 #if symlink
399 399 $ ln -s ba d1/ca
400 400 $ hg rename --traceback d1/ba d1/ca
401 401 d1/ca: not overwriting - file exists
402 402 ('hg rename --after' to record the rename)
403 403 [1]
404 404 $ hg status -C
405 405 ? d1/ca
406 406 $ hg update -C
407 407 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
408 408 $ rm d1/ca
409 409
410 410 replace a symlink with a file
411 411
412 412 $ ln -s ba d1/ca
413 413 $ hg rename --force d1/ba d1/ca
414 414 $ hg status -C
415 415 A d1/ca
416 416 d1/ba
417 417 R d1/ba
418 418 $ hg update -C
419 419 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
420 420 $ rm d1/ca
421 421 #endif
422 422
423 423 do not copy more than one source file to the same destination file
424 424
425 425 $ mkdir d3
426 426 $ hg rename d1/* d2/* d3
427 427 moving d1/d11/a1 to d3/d11/a1
428 428 d3/b: not overwriting - d2/b collides with d1/b
429 429 [1]
430 430 $ hg status -C
431 431 A d3/a
432 432 d1/a
433 433 A d3/b
434 434 d1/b
435 435 A d3/ba
436 436 d1/ba
437 437 A d3/d11/a1
438 438 d1/d11/a1
439 439 R d1/a
440 440 R d1/b
441 441 R d1/ba
442 442 R d1/d11/a1
443 443 $ hg update -C
444 444 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
445 445 $ rm -rf d3
446 446
447 447 move a whole subtree with "hg rename ."
448 448
449 449 $ mkdir d3
450 450 $ (cd d1; hg rename . ../d3)
451 451 moving a to ../d3/d1/a
452 452 moving b to ../d3/d1/b
453 453 moving ba to ../d3/d1/ba
454 454 moving d11/a1 to ../d3/d1/d11/a1
455 455 $ hg status -C
456 456 A d3/d1/a
457 457 d1/a
458 458 A d3/d1/b
459 459 d1/b
460 460 A d3/d1/ba
461 461 d1/ba
462 462 A d3/d1/d11/a1
463 463 d1/d11/a1
464 464 R d1/a
465 465 R d1/b
466 466 R d1/ba
467 467 R d1/d11/a1
468 468 $ hg update -C
469 469 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
470 470 $ rm -rf d3
471 471
472 472 move a whole subtree with "hg rename --after ."
473 473
474 474 $ mkdir d3
475 475 $ mv d1/* d3
476 476 $ (cd d1; hg rename --after . ../d3)
477 477 moving a to ../d3/a
478 478 moving b to ../d3/b
479 479 moving ba to ../d3/ba
480 480 moving d11/a1 to ../d3/d11/a1
481 481 $ hg status -C
482 482 A d3/a
483 483 d1/a
484 484 A d3/b
485 485 d1/b
486 486 A d3/ba
487 487 d1/ba
488 488 A d3/d11/a1
489 489 d1/d11/a1
490 490 R d1/a
491 491 R d1/b
492 492 R d1/ba
493 493 R d1/d11/a1
494 494 $ hg update -C
495 495 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
496 496 $ rm -rf d3
497 497
498 498 move the parent tree with "hg rename .."
499 499
500 500 $ (cd d1/d11; hg rename .. ../../d3)
501 501 moving ../a to ../../d3/a
502 502 moving ../b to ../../d3/b
503 503 moving ../ba to ../../d3/ba
504 504 moving a1 to ../../d3/d11/a1
505 505 $ hg status -C
506 506 A d3/a
507 507 d1/a
508 508 A d3/b
509 509 d1/b
510 510 A d3/ba
511 511 d1/ba
512 512 A d3/d11/a1
513 513 d1/d11/a1
514 514 R d1/a
515 515 R d1/b
516 516 R d1/ba
517 517 R d1/d11/a1
518 518 $ hg update -C
519 519 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
520 520 $ rm -rf d3
521 521
522 522 skip removed files
523 523
524 524 $ hg remove d1/b
525 525 $ hg rename d1 d3
526 526 moving d1/a to d3/a
527 527 moving d1/ba to d3/ba
528 528 moving d1/d11/a1 to d3/d11/a1
529 529 $ hg status -C
530 530 A d3/a
531 531 d1/a
532 532 A d3/ba
533 533 d1/ba
534 534 A d3/d11/a1
535 535 d1/d11/a1
536 536 R d1/a
537 537 R d1/b
538 538 R d1/ba
539 539 R d1/d11/a1
540 540 $ hg update -C
541 541 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
542 542 $ rm -rf d3
543 543
544 544 transitive rename
545 545
546 546 $ hg rename d1/b d1/bb
547 547 $ hg rename d1/bb d1/bc
548 548 $ hg status -C
549 549 A d1/bc
550 550 d1/b
551 551 R d1/b
552 552 $ hg update -C
553 553 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
554 554 $ rm d1/bc
555 555
556 556 transitive rename --after
557 557
558 558 $ hg rename d1/b d1/bb
559 559 $ mv d1/bb d1/bc
560 560 $ hg rename --after d1/bb d1/bc
561 561 $ hg status -C
562 562 A d1/bc
563 563 d1/b
564 564 R d1/b
565 565 $ hg update -C
566 566 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
567 567 $ rm d1/bc
568 568
569 569 $ echo "# idempotent renames (d1/b -> d1/bb followed by d1/bb -> d1/b)"
570 570 # idempotent renames (d1/b -> d1/bb followed by d1/bb -> d1/b)
571 571 $ hg rename d1/b d1/bb
572 572 $ echo "some stuff added to d1/bb" >> d1/bb
573 573 $ hg rename d1/bb d1/b
574 574 $ hg status -C
575 575 M d1/b
576 576 $ hg update -C
577 577 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
578 578
579 579 overwriting with renames (issue1959)
580 580
581 581 $ hg rename d1/a d1/c
582 582 $ hg rename d1/b d1/a
583 583 $ hg status -C
584 584 M d1/a
585 585 d1/b
586 586 A d1/c
587 587 d1/a
588 588 R d1/b
589 589 $ hg diff --git
590 590 diff --git a/d1/a b/d1/a
591 591 --- a/d1/a
592 592 +++ b/d1/a
593 593 @@ -1,1 +1,1 @@
594 594 -d1/a
595 595 +d1/b
596 596 diff --git a/d1/b b/d1/b
597 597 deleted file mode 100644
598 598 --- a/d1/b
599 599 +++ /dev/null
600 600 @@ -1,1 +0,0 @@
601 601 -d1/b
602 602 diff --git a/d1/a b/d1/c
603 603 copy from d1/a
604 604 copy to d1/c
605 605 $ hg update -C
606 606 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
607 607 $ rm d1/c # The file was marked as added, so 'hg update' action was 'forget'
608 608
609 609 check illegal path components
610 610
611 611 $ hg rename d1/d11/a1 .hg/foo
612 612 abort: path contains illegal component: .hg/foo
613 613 [255]
614 614 $ hg status -C
615 615 $ hg rename d1/d11/a1 ../foo
616 616 abort: ../foo not under root '$TESTTMP'
617 617 [255]
618 618 $ hg status -C
619 619
620 620 $ mv d1/d11/a1 .hg/foo
621 621 $ hg rename --after d1/d11/a1 .hg/foo
622 622 abort: path contains illegal component: .hg/foo
623 623 [255]
624 624 $ hg status -C
625 625 ! d1/d11/a1
626 626 $ hg update -C
627 627 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
628 628 $ rm .hg/foo
629 629
630 630 $ hg rename d1/d11/a1 .hg
631 631 abort: path contains illegal component: .hg/a1
632 632 [255]
633 633 $ hg --config extensions.largefiles= rename d1/d11/a1 .hg
634 634 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
635 635 abort: path contains illegal component: .hg/a1
636 636 [255]
637 637 $ hg status -C
638 638 $ hg rename d1/d11/a1 ..
639 639 abort: ../a1 not under root '$TESTTMP'
640 640 [255]
641 641 $ hg --config extensions.largefiles= rename d1/d11/a1 ..
642 642 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
643 643 abort: ../a1 not under root '$TESTTMP'
644 644 [255]
645 645 $ hg status -C
646 646
647 647 $ mv d1/d11/a1 .hg
648 648 $ hg rename --after d1/d11/a1 .hg
649 649 abort: path contains illegal component: .hg/a1
650 650 [255]
651 651 $ hg status -C
652 652 ! d1/d11/a1
653 653 $ hg update -C
654 654 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
655 655 $ rm .hg/a1
656 656
657 657 $ (cd d1/d11; hg rename ../../d2/b ../../.hg/foo)
658 658 abort: path contains illegal component: .hg/foo
659 659 [255]
660 660 $ hg status -C
661 661 $ (cd d1/d11; hg rename ../../d2/b ../../../foo)
662 662 abort: ../../../foo not under root '$TESTTMP'
663 663 [255]
664 664 $ hg status -C
665 665
666 666 check that stat information such as mtime is preserved on rename - it's unclear
667 667 whether the `touch` and `stat` commands are portable, so we mimic them using
668 668 python. Not all platforms support precision of even one-second granularity, so
669 669 we allow a rather generous fudge factor here; 1234567890 is 2009, and the
670 670 primary thing we care about is that it's not the machine's current time;
671 671 hopefully it's really unlikely for a machine to have such a broken clock that
672 672 this test fails. :)
673 673
674 674 $ mkdir mtime
675 675 Create the file (as empty), then update its mtime and atime to be 1234567890.
676 676 >>> import os
677 677 >>> filename = "mtime/f"
678 678 >>> mtime = 1234567890
679 679 >>> open(filename, "w").close()
680 680 >>> os.utime(filename, (mtime, mtime))
681 681 $ hg ci -qAm 'add mtime dir'
682 682 "hg cp" does not preserve the mtime, so it should be newer than the 2009
683 683 timestamp.
684 684 $ hg cp -q mtime mtime_cp
685 685 >>> from __future__ import print_function
686 686 >>> import os
687 687 >>> filename = "mtime_cp/f"
688 688 >>> print(os.stat(filename).st_mtime < 1234567999)
689 689 False
690 690 "hg mv" preserves the mtime, so it should be ~equal to the 2009 timestamp
691 691 (modulo some fudge factor due to not every system supporting 1s-level
692 692 precision).
693 693 $ hg mv -q mtime mtime_mv
694 694 >>> from __future__ import print_function
695 695 >>> import os
696 696 >>> filename = "mtime_mv/f"
697 697 >>> print(os.stat(filename).st_mtime < 1234567999)
698 698 True
General Comments 0
You need to be logged in to leave comments. Login now