##// END OF EJS Templates
copy: use `set_tracked` instead of `normallookup` in `dirstatecopy`...
marmoute -
r48529:b66ae446 default
parent child Browse files
Show More
@@ -1,2289 +1,2289 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullrev,
23 23 short,
24 24 wdirrev,
25 25 )
26 26 from .pycompat import getattr
27 27 from .thirdparty import attr
28 28 from . import (
29 29 copies as copiesmod,
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 policy,
38 38 pycompat,
39 39 requirements as requirementsmod,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 hashutil,
50 50 procutil,
51 51 stringutil,
52 52 )
53 53
54 54 if pycompat.iswindows:
55 55 from . import scmwindows as scmplatform
56 56 else:
57 57 from . import scmposix as scmplatform
58 58
59 59 parsers = policy.importmod('parsers')
60 60 rustrevlog = policy.importrust('revlog')
61 61
62 62 termsize = scmplatform.termsize
63 63
64 64
65 65 @attr.s(slots=True, repr=False)
66 66 class status(object):
67 67 """Struct with a list of files per status.
68 68
69 69 The 'deleted', 'unknown' and 'ignored' properties are only
70 70 relevant to the working copy.
71 71 """
72 72
73 73 modified = attr.ib(default=attr.Factory(list))
74 74 added = attr.ib(default=attr.Factory(list))
75 75 removed = attr.ib(default=attr.Factory(list))
76 76 deleted = attr.ib(default=attr.Factory(list))
77 77 unknown = attr.ib(default=attr.Factory(list))
78 78 ignored = attr.ib(default=attr.Factory(list))
79 79 clean = attr.ib(default=attr.Factory(list))
80 80
81 81 def __iter__(self):
82 82 yield self.modified
83 83 yield self.added
84 84 yield self.removed
85 85 yield self.deleted
86 86 yield self.unknown
87 87 yield self.ignored
88 88 yield self.clean
89 89
90 90 def __repr__(self):
91 91 return (
92 92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 93 r'unknown=%s, ignored=%s, clean=%s>'
94 94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95 95
96 96
97 97 def itersubrepos(ctx1, ctx2):
98 98 """find subrepos in ctx1 or ctx2"""
99 99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104 104
105 105 missing = set()
106 106
107 107 for subpath in ctx2.substate:
108 108 if subpath not in ctx1.substate:
109 109 del subpaths[subpath]
110 110 missing.add(subpath)
111 111
112 112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
113 113 yield subpath, ctx.sub(subpath)
114 114
115 115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 116 # status and diff will have an accurate result when it does
117 117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 118 # against itself.
119 119 for subpath in missing:
120 120 yield subpath, ctx2.nullsub(subpath, ctx1)
121 121
122 122
123 123 def nochangesfound(ui, repo, excluded=None):
124 124 """Report no changes for push/pull, excluded is None or a list of
125 125 nodes excluded from the push/pull.
126 126 """
127 127 secretlist = []
128 128 if excluded:
129 129 for n in excluded:
130 130 ctx = repo[n]
131 131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 132 secretlist.append(n)
133 133
134 134 if secretlist:
135 135 ui.status(
136 136 _(b"no changes found (ignored %d secret changesets)\n")
137 137 % len(secretlist)
138 138 )
139 139 else:
140 140 ui.status(_(b"no changes found\n"))
141 141
142 142
143 143 def callcatch(ui, func):
144 144 """call func() with global exception handling
145 145
146 146 return func() if no exception happens. otherwise do some error handling
147 147 and return an exit code accordingly. does not handle all exceptions.
148 148 """
149 149 coarse_exit_code = -1
150 150 detailed_exit_code = -1
151 151 try:
152 152 try:
153 153 return func()
154 154 except: # re-raises
155 155 ui.traceback()
156 156 raise
157 157 # Global exception handling, alphabetically
158 158 # Mercurial-specific first, followed by built-in and library exceptions
159 159 except error.LockHeld as inst:
160 160 detailed_exit_code = 20
161 161 if inst.errno == errno.ETIMEDOUT:
162 162 reason = _(b'timed out waiting for lock held by %r') % (
163 163 pycompat.bytestr(inst.locker)
164 164 )
165 165 else:
166 166 reason = _(b'lock held by %r') % inst.locker
167 167 ui.error(
168 168 _(b"abort: %s: %s\n")
169 169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
170 170 )
171 171 if not inst.locker:
172 172 ui.error(_(b"(lock might be very busy)\n"))
173 173 except error.LockUnavailable as inst:
174 174 detailed_exit_code = 20
175 175 ui.error(
176 176 _(b"abort: could not lock %s: %s\n")
177 177 % (
178 178 inst.desc or stringutil.forcebytestr(inst.filename),
179 179 encoding.strtolocal(inst.strerror),
180 180 )
181 181 )
182 182 except error.RepoError as inst:
183 183 ui.error(_(b"abort: %s\n") % inst)
184 184 if inst.hint:
185 185 ui.error(_(b"(%s)\n") % inst.hint)
186 186 except error.ResponseError as inst:
187 187 ui.error(_(b"abort: %s") % inst.args[0])
188 188 msg = inst.args[1]
189 189 if isinstance(msg, type(u'')):
190 190 msg = pycompat.sysbytes(msg)
191 191 if msg is None:
192 192 ui.error(b"\n")
193 193 elif not isinstance(msg, bytes):
194 194 ui.error(b" %r\n" % (msg,))
195 195 elif not msg:
196 196 ui.error(_(b" empty string\n"))
197 197 else:
198 198 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
199 199 except error.CensoredNodeError as inst:
200 200 ui.error(_(b"abort: file censored %s\n") % inst)
201 201 except error.WdirUnsupported:
202 202 ui.error(_(b"abort: working directory revision cannot be specified\n"))
203 203 except error.Error as inst:
204 204 if inst.detailed_exit_code is not None:
205 205 detailed_exit_code = inst.detailed_exit_code
206 206 if inst.coarse_exit_code is not None:
207 207 coarse_exit_code = inst.coarse_exit_code
208 208 ui.error(inst.format())
209 209 except error.WorkerError as inst:
210 210 # Don't print a message -- the worker already should have
211 211 return inst.status_code
212 212 except ImportError as inst:
213 213 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
214 214 m = stringutil.forcebytestr(inst).split()[-1]
215 215 if m in b"mpatch bdiff".split():
216 216 ui.error(_(b"(did you forget to compile extensions?)\n"))
217 217 elif m in b"zlib".split():
218 218 ui.error(_(b"(is your Python install correct?)\n"))
219 219 except util.urlerr.httperror as inst:
220 220 detailed_exit_code = 100
221 221 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
222 222 except util.urlerr.urlerror as inst:
223 223 detailed_exit_code = 100
224 224 try: # usually it is in the form (errno, strerror)
225 225 reason = inst.reason.args[1]
226 226 except (AttributeError, IndexError):
227 227 # it might be anything, for example a string
228 228 reason = inst.reason
229 229 if isinstance(reason, pycompat.unicode):
230 230 # SSLError of Python 2.7.9 contains a unicode
231 231 reason = encoding.unitolocal(reason)
232 232 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
233 233 except (IOError, OSError) as inst:
234 234 if (
235 235 util.safehasattr(inst, b"args")
236 236 and inst.args
237 237 and inst.args[0] == errno.EPIPE
238 238 ):
239 239 pass
240 240 elif getattr(inst, "strerror", None): # common IOError or OSError
241 241 if getattr(inst, "filename", None) is not None:
242 242 ui.error(
243 243 _(b"abort: %s: '%s'\n")
244 244 % (
245 245 encoding.strtolocal(inst.strerror),
246 246 stringutil.forcebytestr(inst.filename),
247 247 )
248 248 )
249 249 else:
250 250 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
251 251 else: # suspicious IOError
252 252 raise
253 253 except MemoryError:
254 254 ui.error(_(b"abort: out of memory\n"))
255 255 except SystemExit as inst:
256 256 # Commands shouldn't sys.exit directly, but give a return code.
257 257 # Just in case catch this and and pass exit code to caller.
258 258 detailed_exit_code = 254
259 259 coarse_exit_code = inst.code
260 260
261 261 if ui.configbool(b'ui', b'detailed-exit-code'):
262 262 return detailed_exit_code
263 263 else:
264 264 return coarse_exit_code
265 265
266 266
267 267 def checknewlabel(repo, lbl, kind):
268 268 # Do not use the "kind" parameter in ui output.
269 269 # It makes strings difficult to translate.
270 270 if lbl in [b'tip', b'.', b'null']:
271 271 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
272 272 for c in (b':', b'\0', b'\n', b'\r'):
273 273 if c in lbl:
274 274 raise error.InputError(
275 275 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
276 276 )
277 277 try:
278 278 int(lbl)
279 279 raise error.InputError(_(b"cannot use an integer as a name"))
280 280 except ValueError:
281 281 pass
282 282 if lbl.strip() != lbl:
283 283 raise error.InputError(
284 284 _(b"leading or trailing whitespace in name %r") % lbl
285 285 )
286 286
287 287
288 288 def checkfilename(f):
289 289 '''Check that the filename f is an acceptable filename for a tracked file'''
290 290 if b'\r' in f or b'\n' in f:
291 291 raise error.InputError(
292 292 _(b"'\\n' and '\\r' disallowed in filenames: %r")
293 293 % pycompat.bytestr(f)
294 294 )
295 295
296 296
297 297 def checkportable(ui, f):
298 298 '''Check if filename f is portable and warn or abort depending on config'''
299 299 checkfilename(f)
300 300 abort, warn = checkportabilityalert(ui)
301 301 if abort or warn:
302 302 msg = util.checkwinfilename(f)
303 303 if msg:
304 304 msg = b"%s: %s" % (msg, procutil.shellquote(f))
305 305 if abort:
306 306 raise error.InputError(msg)
307 307 ui.warn(_(b"warning: %s\n") % msg)
308 308
309 309
310 310 def checkportabilityalert(ui):
311 311 """check if the user's config requests nothing, a warning, or abort for
312 312 non-portable filenames"""
313 313 val = ui.config(b'ui', b'portablefilenames')
314 314 lval = val.lower()
315 315 bval = stringutil.parsebool(val)
316 316 abort = pycompat.iswindows or lval == b'abort'
317 317 warn = bval or lval == b'warn'
318 318 if bval is None and not (warn or abort or lval == b'ignore'):
319 319 raise error.ConfigError(
320 320 _(b"ui.portablefilenames value is invalid ('%s')") % val
321 321 )
322 322 return abort, warn
323 323
324 324
325 325 class casecollisionauditor(object):
326 326 def __init__(self, ui, abort, dirstate):
327 327 self._ui = ui
328 328 self._abort = abort
329 329 allfiles = b'\0'.join(dirstate)
330 330 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
331 331 self._dirstate = dirstate
332 332 # The purpose of _newfiles is so that we don't complain about
333 333 # case collisions if someone were to call this object with the
334 334 # same filename twice.
335 335 self._newfiles = set()
336 336
337 337 def __call__(self, f):
338 338 if f in self._newfiles:
339 339 return
340 340 fl = encoding.lower(f)
341 341 if fl in self._loweredfiles and f not in self._dirstate:
342 342 msg = _(b'possible case-folding collision for %s') % f
343 343 if self._abort:
344 344 raise error.Abort(msg)
345 345 self._ui.warn(_(b"warning: %s\n") % msg)
346 346 self._loweredfiles.add(fl)
347 347 self._newfiles.add(f)
348 348
349 349
350 350 def filteredhash(repo, maxrev):
351 351 """build hash of filtered revisions in the current repoview.
352 352
353 353 Multiple caches perform up-to-date validation by checking that the
354 354 tiprev and tipnode stored in the cache file match the current repository.
355 355 However, this is not sufficient for validating repoviews because the set
356 356 of revisions in the view may change without the repository tiprev and
357 357 tipnode changing.
358 358
359 359 This function hashes all the revs filtered from the view and returns
360 360 that SHA-1 digest.
361 361 """
362 362 cl = repo.changelog
363 363 if not cl.filteredrevs:
364 364 return None
365 365 key = cl._filteredrevs_hashcache.get(maxrev)
366 366 if not key:
367 367 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
368 368 if revs:
369 369 s = hashutil.sha1()
370 370 for rev in revs:
371 371 s.update(b'%d;' % rev)
372 372 key = s.digest()
373 373 cl._filteredrevs_hashcache[maxrev] = key
374 374 return key
375 375
376 376
377 377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
378 378 """yield every hg repository under path, always recursively.
379 379 The recurse flag will only control recursion into repo working dirs"""
380 380
381 381 def errhandler(err):
382 382 if err.filename == path:
383 383 raise err
384 384
385 385 samestat = getattr(os.path, 'samestat', None)
386 386 if followsym and samestat is not None:
387 387
388 388 def adddir(dirlst, dirname):
389 389 dirstat = os.stat(dirname)
390 390 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
391 391 if not match:
392 392 dirlst.append(dirstat)
393 393 return not match
394 394
395 395 else:
396 396 followsym = False
397 397
398 398 if (seen_dirs is None) and followsym:
399 399 seen_dirs = []
400 400 adddir(seen_dirs, path)
401 401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
402 402 dirs.sort()
403 403 if b'.hg' in dirs:
404 404 yield root # found a repository
405 405 qroot = os.path.join(root, b'.hg', b'patches')
406 406 if os.path.isdir(os.path.join(qroot, b'.hg')):
407 407 yield qroot # we have a patch queue repo here
408 408 if recurse:
409 409 # avoid recursing inside the .hg directory
410 410 dirs.remove(b'.hg')
411 411 else:
412 412 dirs[:] = [] # don't descend further
413 413 elif followsym:
414 414 newdirs = []
415 415 for d in dirs:
416 416 fname = os.path.join(root, d)
417 417 if adddir(seen_dirs, fname):
418 418 if os.path.islink(fname):
419 419 for hgname in walkrepos(fname, True, seen_dirs):
420 420 yield hgname
421 421 else:
422 422 newdirs.append(d)
423 423 dirs[:] = newdirs
424 424
425 425
426 426 def binnode(ctx):
427 427 """Return binary node id for a given basectx"""
428 428 node = ctx.node()
429 429 if node is None:
430 430 return ctx.repo().nodeconstants.wdirid
431 431 return node
432 432
433 433
434 434 def intrev(ctx):
435 435 """Return integer for a given basectx that can be used in comparison or
436 436 arithmetic operation"""
437 437 rev = ctx.rev()
438 438 if rev is None:
439 439 return wdirrev
440 440 return rev
441 441
442 442
443 443 def formatchangeid(ctx):
444 444 """Format changectx as '{rev}:{node|formatnode}', which is the default
445 445 template provided by logcmdutil.changesettemplater"""
446 446 repo = ctx.repo()
447 447 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
448 448
449 449
450 450 def formatrevnode(ui, rev, node):
451 451 """Format given revision and node depending on the current verbosity"""
452 452 if ui.debugflag:
453 453 hexfunc = hex
454 454 else:
455 455 hexfunc = short
456 456 return b'%d:%s' % (rev, hexfunc(node))
457 457
458 458
459 459 def resolvehexnodeidprefix(repo, prefix):
460 460 if prefix.startswith(b'x'):
461 461 prefix = prefix[1:]
462 462 try:
463 463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
464 464 # This matches the shortesthexnodeidprefix() function below.
465 465 node = repo.unfiltered().changelog._partialmatch(prefix)
466 466 except error.AmbiguousPrefixLookupError:
467 467 revset = repo.ui.config(
468 468 b'experimental', b'revisions.disambiguatewithin'
469 469 )
470 470 if revset:
471 471 # Clear config to avoid infinite recursion
472 472 configoverrides = {
473 473 (b'experimental', b'revisions.disambiguatewithin'): None
474 474 }
475 475 with repo.ui.configoverride(configoverrides):
476 476 revs = repo.anyrevs([revset], user=True)
477 477 matches = []
478 478 for rev in revs:
479 479 node = repo.changelog.node(rev)
480 480 if hex(node).startswith(prefix):
481 481 matches.append(node)
482 482 if len(matches) == 1:
483 483 return matches[0]
484 484 raise
485 485 if node is None:
486 486 return
487 487 repo.changelog.rev(node) # make sure node isn't filtered
488 488 return node
489 489
490 490
491 491 def mayberevnum(repo, prefix):
492 492 """Checks if the given prefix may be mistaken for a revision number"""
493 493 try:
494 494 i = int(prefix)
495 495 # if we are a pure int, then starting with zero will not be
496 496 # confused as a rev; or, obviously, if the int is larger
497 497 # than the value of the tip rev. We still need to disambiguate if
498 498 # prefix == '0', since that *is* a valid revnum.
499 499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
500 500 return False
501 501 return True
502 502 except ValueError:
503 503 return False
504 504
505 505
506 506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
507 507 """Find the shortest unambiguous prefix that matches hexnode.
508 508
509 509 If "cache" is not None, it must be a dictionary that can be used for
510 510 caching between calls to this method.
511 511 """
512 512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
513 513 # which would be unacceptably slow. so we look for hash collision in
514 514 # unfiltered space, which means some hashes may be slightly longer.
515 515
516 516 minlength = max(minlength, 1)
517 517
518 518 def disambiguate(prefix):
519 519 """Disambiguate against revnums."""
520 520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
521 521 if mayberevnum(repo, prefix):
522 522 return b'x' + prefix
523 523 else:
524 524 return prefix
525 525
526 526 hexnode = hex(node)
527 527 for length in range(len(prefix), len(hexnode) + 1):
528 528 prefix = hexnode[:length]
529 529 if not mayberevnum(repo, prefix):
530 530 return prefix
531 531
532 532 cl = repo.unfiltered().changelog
533 533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
534 534 if revset:
535 535 revs = None
536 536 if cache is not None:
537 537 revs = cache.get(b'disambiguationrevset')
538 538 if revs is None:
539 539 revs = repo.anyrevs([revset], user=True)
540 540 if cache is not None:
541 541 cache[b'disambiguationrevset'] = revs
542 542 if cl.rev(node) in revs:
543 543 hexnode = hex(node)
544 544 nodetree = None
545 545 if cache is not None:
546 546 nodetree = cache.get(b'disambiguationnodetree')
547 547 if not nodetree:
548 548 if util.safehasattr(parsers, 'nodetree'):
549 549 # The CExt is the only implementation to provide a nodetree
550 550 # class so far.
551 551 index = cl.index
552 552 if util.safehasattr(index, 'get_cindex'):
553 553 # the rust wrapped need to give access to its internal index
554 554 index = index.get_cindex()
555 555 nodetree = parsers.nodetree(index, len(revs))
556 556 for r in revs:
557 557 nodetree.insert(r)
558 558 if cache is not None:
559 559 cache[b'disambiguationnodetree'] = nodetree
560 560 if nodetree is not None:
561 561 length = max(nodetree.shortest(node), minlength)
562 562 prefix = hexnode[:length]
563 563 return disambiguate(prefix)
564 564 for length in range(minlength, len(hexnode) + 1):
565 565 matches = []
566 566 prefix = hexnode[:length]
567 567 for rev in revs:
568 568 otherhexnode = repo[rev].hex()
569 569 if prefix == otherhexnode[:length]:
570 570 matches.append(otherhexnode)
571 571 if len(matches) == 1:
572 572 return disambiguate(prefix)
573 573
574 574 try:
575 575 return disambiguate(cl.shortest(node, minlength))
576 576 except error.LookupError:
577 577 raise error.RepoLookupError()
578 578
579 579
580 580 def isrevsymbol(repo, symbol):
581 581 """Checks if a symbol exists in the repo.
582 582
583 583 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
584 584 symbol is an ambiguous nodeid prefix.
585 585 """
586 586 try:
587 587 revsymbol(repo, symbol)
588 588 return True
589 589 except error.RepoLookupError:
590 590 return False
591 591
592 592
593 593 def revsymbol(repo, symbol):
594 594 """Returns a context given a single revision symbol (as string).
595 595
596 596 This is similar to revsingle(), but accepts only a single revision symbol,
597 597 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
598 598 not "max(public())".
599 599 """
600 600 if not isinstance(symbol, bytes):
601 601 msg = (
602 602 b"symbol (%s of type %s) was not a string, did you mean "
603 603 b"repo[symbol]?" % (symbol, type(symbol))
604 604 )
605 605 raise error.ProgrammingError(msg)
606 606 try:
607 607 if symbol in (b'.', b'tip', b'null'):
608 608 return repo[symbol]
609 609
610 610 try:
611 611 r = int(symbol)
612 612 if b'%d' % r != symbol:
613 613 raise ValueError
614 614 l = len(repo.changelog)
615 615 if r < 0:
616 616 r += l
617 617 if r < 0 or r >= l and r != wdirrev:
618 618 raise ValueError
619 619 return repo[r]
620 620 except error.FilteredIndexError:
621 621 raise
622 622 except (ValueError, OverflowError, IndexError):
623 623 pass
624 624
625 625 if len(symbol) == 2 * repo.nodeconstants.nodelen:
626 626 try:
627 627 node = bin(symbol)
628 628 rev = repo.changelog.rev(node)
629 629 return repo[rev]
630 630 except error.FilteredLookupError:
631 631 raise
632 632 except (TypeError, LookupError):
633 633 pass
634 634
635 635 # look up bookmarks through the name interface
636 636 try:
637 637 node = repo.names.singlenode(repo, symbol)
638 638 rev = repo.changelog.rev(node)
639 639 return repo[rev]
640 640 except KeyError:
641 641 pass
642 642
643 643 node = resolvehexnodeidprefix(repo, symbol)
644 644 if node is not None:
645 645 rev = repo.changelog.rev(node)
646 646 return repo[rev]
647 647
648 648 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
649 649
650 650 except error.WdirUnsupported:
651 651 return repo[None]
652 652 except (
653 653 error.FilteredIndexError,
654 654 error.FilteredLookupError,
655 655 error.FilteredRepoLookupError,
656 656 ):
657 657 raise _filterederror(repo, symbol)
658 658
659 659
660 660 def _filterederror(repo, changeid):
661 661 """build an exception to be raised about a filtered changeid
662 662
663 663 This is extracted in a function to help extensions (eg: evolve) to
664 664 experiment with various message variants."""
665 665 if repo.filtername.startswith(b'visible'):
666 666
667 667 # Check if the changeset is obsolete
668 668 unfilteredrepo = repo.unfiltered()
669 669 ctx = revsymbol(unfilteredrepo, changeid)
670 670
671 671 # If the changeset is obsolete, enrich the message with the reason
672 672 # that made this changeset not visible
673 673 if ctx.obsolete():
674 674 msg = obsutil._getfilteredreason(repo, changeid, ctx)
675 675 else:
676 676 msg = _(b"hidden revision '%s'") % changeid
677 677
678 678 hint = _(b'use --hidden to access hidden revisions')
679 679
680 680 return error.FilteredRepoLookupError(msg, hint=hint)
681 681 msg = _(b"filtered revision '%s' (not in '%s' subset)")
682 682 msg %= (changeid, repo.filtername)
683 683 return error.FilteredRepoLookupError(msg)
684 684
685 685
686 686 def revsingle(repo, revspec, default=b'.', localalias=None):
687 687 if not revspec and revspec != 0:
688 688 return repo[default]
689 689
690 690 l = revrange(repo, [revspec], localalias=localalias)
691 691 if not l:
692 692 raise error.Abort(_(b'empty revision set'))
693 693 return repo[l.last()]
694 694
695 695
696 696 def _pairspec(revspec):
697 697 tree = revsetlang.parse(revspec)
698 698 return tree and tree[0] in (
699 699 b'range',
700 700 b'rangepre',
701 701 b'rangepost',
702 702 b'rangeall',
703 703 )
704 704
705 705
706 706 def revpair(repo, revs):
707 707 if not revs:
708 708 return repo[b'.'], repo[None]
709 709
710 710 l = revrange(repo, revs)
711 711
712 712 if not l:
713 713 raise error.Abort(_(b'empty revision range'))
714 714
715 715 first = l.first()
716 716 second = l.last()
717 717
718 718 if (
719 719 first == second
720 720 and len(revs) >= 2
721 721 and not all(revrange(repo, [r]) for r in revs)
722 722 ):
723 723 raise error.Abort(_(b'empty revision on one side of range'))
724 724
725 725 # if top-level is range expression, the result must always be a pair
726 726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
727 727 return repo[first], repo[None]
728 728
729 729 return repo[first], repo[second]
730 730
731 731
732 732 def revrange(repo, specs, localalias=None):
733 733 """Execute 1 to many revsets and return the union.
734 734
735 735 This is the preferred mechanism for executing revsets using user-specified
736 736 config options, such as revset aliases.
737 737
738 738 The revsets specified by ``specs`` will be executed via a chained ``OR``
739 739 expression. If ``specs`` is empty, an empty result is returned.
740 740
741 741 ``specs`` can contain integers, in which case they are assumed to be
742 742 revision numbers.
743 743
744 744 It is assumed the revsets are already formatted. If you have arguments
745 745 that need to be expanded in the revset, call ``revsetlang.formatspec()``
746 746 and pass the result as an element of ``specs``.
747 747
748 748 Specifying a single revset is allowed.
749 749
750 750 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
751 751 integer revisions.
752 752 """
753 753 allspecs = []
754 754 for spec in specs:
755 755 if isinstance(spec, int):
756 756 spec = revsetlang.formatspec(b'%d', spec)
757 757 allspecs.append(spec)
758 758 return repo.anyrevs(allspecs, user=True, localalias=localalias)
759 759
760 760
761 761 def increasingwindows(windowsize=8, sizelimit=512):
762 762 while True:
763 763 yield windowsize
764 764 if windowsize < sizelimit:
765 765 windowsize *= 2
766 766
767 767
768 768 def walkchangerevs(repo, revs, makefilematcher, prepare):
769 769 """Iterate over files and the revs in a "windowed" way.
770 770
771 771 Callers most commonly need to iterate backwards over the history
772 772 in which they are interested. Doing so has awful (quadratic-looking)
773 773 performance, so we use iterators in a "windowed" way.
774 774
775 775 We walk a window of revisions in the desired order. Within the
776 776 window, we first walk forwards to gather data, then in the desired
777 777 order (usually backwards) to display it.
778 778
779 779 This function returns an iterator yielding contexts. Before
780 780 yielding each context, the iterator will first call the prepare
781 781 function on each context in the window in forward order."""
782 782
783 783 if not revs:
784 784 return []
785 785 change = repo.__getitem__
786 786
787 787 def iterate():
788 788 it = iter(revs)
789 789 stopiteration = False
790 790 for windowsize in increasingwindows():
791 791 nrevs = []
792 792 for i in pycompat.xrange(windowsize):
793 793 rev = next(it, None)
794 794 if rev is None:
795 795 stopiteration = True
796 796 break
797 797 nrevs.append(rev)
798 798 for rev in sorted(nrevs):
799 799 ctx = change(rev)
800 800 prepare(ctx, makefilematcher(ctx))
801 801 for rev in nrevs:
802 802 yield change(rev)
803 803
804 804 if stopiteration:
805 805 break
806 806
807 807 return iterate()
808 808
809 809
810 810 def meaningfulparents(repo, ctx):
811 811 """Return list of meaningful (or all if debug) parentrevs for rev.
812 812
813 813 For merges (two non-nullrev revisions) both parents are meaningful.
814 814 Otherwise the first parent revision is considered meaningful if it
815 815 is not the preceding revision.
816 816 """
817 817 parents = ctx.parents()
818 818 if len(parents) > 1:
819 819 return parents
820 820 if repo.ui.debugflag:
821 821 return [parents[0], repo[nullrev]]
822 822 if parents[0].rev() >= intrev(ctx) - 1:
823 823 return []
824 824 return parents
825 825
826 826
827 827 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
828 828 """Return a function that produced paths for presenting to the user.
829 829
830 830 The returned function takes a repo-relative path and produces a path
831 831 that can be presented in the UI.
832 832
833 833 Depending on the value of ui.relative-paths, either a repo-relative or
834 834 cwd-relative path will be produced.
835 835
836 836 legacyrelativevalue is the value to use if ui.relative-paths=legacy
837 837
838 838 If forcerelativevalue is not None, then that value will be used regardless
839 839 of what ui.relative-paths is set to.
840 840 """
841 841 if forcerelativevalue is not None:
842 842 relative = forcerelativevalue
843 843 else:
844 844 config = repo.ui.config(b'ui', b'relative-paths')
845 845 if config == b'legacy':
846 846 relative = legacyrelativevalue
847 847 else:
848 848 relative = stringutil.parsebool(config)
849 849 if relative is None:
850 850 raise error.ConfigError(
851 851 _(b"ui.relative-paths is not a boolean ('%s')") % config
852 852 )
853 853
854 854 if relative:
855 855 cwd = repo.getcwd()
856 856 if cwd != b'':
857 857 # this branch would work even if cwd == b'' (ie cwd = repo
858 858 # root), but its generality makes the returned function slower
859 859 pathto = repo.pathto
860 860 return lambda f: pathto(f, cwd)
861 861 if repo.ui.configbool(b'ui', b'slash'):
862 862 return lambda f: f
863 863 else:
864 864 return util.localpath
865 865
866 866
867 867 def subdiruipathfn(subpath, uipathfn):
868 868 '''Create a new uipathfn that treats the file as relative to subpath.'''
869 869 return lambda f: uipathfn(posixpath.join(subpath, f))
870 870
871 871
872 872 def anypats(pats, opts):
873 873 """Checks if any patterns, including --include and --exclude were given.
874 874
875 875 Some commands (e.g. addremove) use this condition for deciding whether to
876 876 print absolute or relative paths.
877 877 """
878 878 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
879 879
880 880
881 881 def expandpats(pats):
882 882 """Expand bare globs when running on windows.
883 883 On posix we assume it already has already been done by sh."""
884 884 if not util.expandglobs:
885 885 return list(pats)
886 886 ret = []
887 887 for kindpat in pats:
888 888 kind, pat = matchmod._patsplit(kindpat, None)
889 889 if kind is None:
890 890 try:
891 891 globbed = glob.glob(pat)
892 892 except re.error:
893 893 globbed = [pat]
894 894 if globbed:
895 895 ret.extend(globbed)
896 896 continue
897 897 ret.append(kindpat)
898 898 return ret
899 899
900 900
901 901 def matchandpats(
902 902 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
903 903 ):
904 904 """Return a matcher and the patterns that were used.
905 905 The matcher will warn about bad matches, unless an alternate badfn callback
906 906 is provided."""
907 907 if opts is None:
908 908 opts = {}
909 909 if not globbed and default == b'relpath':
910 910 pats = expandpats(pats or [])
911 911
912 912 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
913 913
914 914 def bad(f, msg):
915 915 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
916 916
917 917 if badfn is None:
918 918 badfn = bad
919 919
920 920 m = ctx.match(
921 921 pats,
922 922 opts.get(b'include'),
923 923 opts.get(b'exclude'),
924 924 default,
925 925 listsubrepos=opts.get(b'subrepos'),
926 926 badfn=badfn,
927 927 )
928 928
929 929 if m.always():
930 930 pats = []
931 931 return m, pats
932 932
933 933
934 934 def match(
935 935 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
936 936 ):
937 937 '''Return a matcher that will warn about bad matches.'''
938 938 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
939 939
940 940
941 941 def matchall(repo):
942 942 '''Return a matcher that will efficiently match everything.'''
943 943 return matchmod.always()
944 944
945 945
946 946 def matchfiles(repo, files, badfn=None):
947 947 '''Return a matcher that will efficiently match exactly these files.'''
948 948 return matchmod.exact(files, badfn=badfn)
949 949
950 950
951 951 def parsefollowlinespattern(repo, rev, pat, msg):
952 952 """Return a file name from `pat` pattern suitable for usage in followlines
953 953 logic.
954 954 """
955 955 if not matchmod.patkind(pat):
956 956 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
957 957 else:
958 958 ctx = repo[rev]
959 959 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
960 960 files = [f for f in ctx if m(f)]
961 961 if len(files) != 1:
962 962 raise error.ParseError(msg)
963 963 return files[0]
964 964
965 965
966 966 def getorigvfs(ui, repo):
967 967 """return a vfs suitable to save 'orig' file
968 968
969 969 return None if no special directory is configured"""
970 970 origbackuppath = ui.config(b'ui', b'origbackuppath')
971 971 if not origbackuppath:
972 972 return None
973 973 return vfs.vfs(repo.wvfs.join(origbackuppath))
974 974
975 975
976 976 def backuppath(ui, repo, filepath):
977 977 """customize where working copy backup files (.orig files) are created
978 978
979 979 Fetch user defined path from config file: [ui] origbackuppath = <path>
980 980 Fall back to default (filepath with .orig suffix) if not specified
981 981
982 982 filepath is repo-relative
983 983
984 984 Returns an absolute path
985 985 """
986 986 origvfs = getorigvfs(ui, repo)
987 987 if origvfs is None:
988 988 return repo.wjoin(filepath + b".orig")
989 989
990 990 origbackupdir = origvfs.dirname(filepath)
991 991 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
992 992 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
993 993
994 994 # Remove any files that conflict with the backup file's path
995 995 for f in reversed(list(pathutil.finddirs(filepath))):
996 996 if origvfs.isfileorlink(f):
997 997 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
998 998 origvfs.unlink(f)
999 999 break
1000 1000
1001 1001 origvfs.makedirs(origbackupdir)
1002 1002
1003 1003 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1004 1004 ui.note(
1005 1005 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1006 1006 )
1007 1007 origvfs.rmtree(filepath, forcibly=True)
1008 1008
1009 1009 return origvfs.join(filepath)
1010 1010
1011 1011
1012 1012 class _containsnode(object):
1013 1013 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1014 1014
1015 1015 def __init__(self, repo, revcontainer):
1016 1016 self._torev = repo.changelog.rev
1017 1017 self._revcontains = revcontainer.__contains__
1018 1018
1019 1019 def __contains__(self, node):
1020 1020 return self._revcontains(self._torev(node))
1021 1021
1022 1022
1023 1023 def cleanupnodes(
1024 1024 repo,
1025 1025 replacements,
1026 1026 operation,
1027 1027 moves=None,
1028 1028 metadata=None,
1029 1029 fixphase=False,
1030 1030 targetphase=None,
1031 1031 backup=True,
1032 1032 ):
1033 1033 """do common cleanups when old nodes are replaced by new nodes
1034 1034
1035 1035 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1036 1036 (we might also want to move working directory parent in the future)
1037 1037
1038 1038 By default, bookmark moves are calculated automatically from 'replacements',
1039 1039 but 'moves' can be used to override that. Also, 'moves' may include
1040 1040 additional bookmark moves that should not have associated obsmarkers.
1041 1041
1042 1042 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1043 1043 have replacements. operation is a string, like "rebase".
1044 1044
1045 1045 metadata is dictionary containing metadata to be stored in obsmarker if
1046 1046 obsolescence is enabled.
1047 1047 """
1048 1048 assert fixphase or targetphase is None
1049 1049 if not replacements and not moves:
1050 1050 return
1051 1051
1052 1052 # translate mapping's other forms
1053 1053 if not util.safehasattr(replacements, b'items'):
1054 1054 replacements = {(n,): () for n in replacements}
1055 1055 else:
1056 1056 # upgrading non tuple "source" to tuple ones for BC
1057 1057 repls = {}
1058 1058 for key, value in replacements.items():
1059 1059 if not isinstance(key, tuple):
1060 1060 key = (key,)
1061 1061 repls[key] = value
1062 1062 replacements = repls
1063 1063
1064 1064 # Unfiltered repo is needed since nodes in replacements might be hidden.
1065 1065 unfi = repo.unfiltered()
1066 1066
1067 1067 # Calculate bookmark movements
1068 1068 if moves is None:
1069 1069 moves = {}
1070 1070 for oldnodes, newnodes in replacements.items():
1071 1071 for oldnode in oldnodes:
1072 1072 if oldnode in moves:
1073 1073 continue
1074 1074 if len(newnodes) > 1:
1075 1075 # usually a split, take the one with biggest rev number
1076 1076 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1077 1077 elif len(newnodes) == 0:
1078 1078 # move bookmark backwards
1079 1079 allreplaced = []
1080 1080 for rep in replacements:
1081 1081 allreplaced.extend(rep)
1082 1082 roots = list(
1083 1083 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1084 1084 )
1085 1085 if roots:
1086 1086 newnode = roots[0].node()
1087 1087 else:
1088 1088 newnode = repo.nullid
1089 1089 else:
1090 1090 newnode = newnodes[0]
1091 1091 moves[oldnode] = newnode
1092 1092
1093 1093 allnewnodes = [n for ns in replacements.values() for n in ns]
1094 1094 toretract = {}
1095 1095 toadvance = {}
1096 1096 if fixphase:
1097 1097 precursors = {}
1098 1098 for oldnodes, newnodes in replacements.items():
1099 1099 for oldnode in oldnodes:
1100 1100 for newnode in newnodes:
1101 1101 precursors.setdefault(newnode, []).append(oldnode)
1102 1102
1103 1103 allnewnodes.sort(key=lambda n: unfi[n].rev())
1104 1104 newphases = {}
1105 1105
1106 1106 def phase(ctx):
1107 1107 return newphases.get(ctx.node(), ctx.phase())
1108 1108
1109 1109 for newnode in allnewnodes:
1110 1110 ctx = unfi[newnode]
1111 1111 parentphase = max(phase(p) for p in ctx.parents())
1112 1112 if targetphase is None:
1113 1113 oldphase = max(
1114 1114 unfi[oldnode].phase() for oldnode in precursors[newnode]
1115 1115 )
1116 1116 newphase = max(oldphase, parentphase)
1117 1117 else:
1118 1118 newphase = max(targetphase, parentphase)
1119 1119 newphases[newnode] = newphase
1120 1120 if newphase > ctx.phase():
1121 1121 toretract.setdefault(newphase, []).append(newnode)
1122 1122 elif newphase < ctx.phase():
1123 1123 toadvance.setdefault(newphase, []).append(newnode)
1124 1124
1125 1125 with repo.transaction(b'cleanup') as tr:
1126 1126 # Move bookmarks
1127 1127 bmarks = repo._bookmarks
1128 1128 bmarkchanges = []
1129 1129 for oldnode, newnode in moves.items():
1130 1130 oldbmarks = repo.nodebookmarks(oldnode)
1131 1131 if not oldbmarks:
1132 1132 continue
1133 1133 from . import bookmarks # avoid import cycle
1134 1134
1135 1135 repo.ui.debug(
1136 1136 b'moving bookmarks %r from %s to %s\n'
1137 1137 % (
1138 1138 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1139 1139 hex(oldnode),
1140 1140 hex(newnode),
1141 1141 )
1142 1142 )
1143 1143 # Delete divergent bookmarks being parents of related newnodes
1144 1144 deleterevs = repo.revs(
1145 1145 b'parents(roots(%ln & (::%n))) - parents(%n)',
1146 1146 allnewnodes,
1147 1147 newnode,
1148 1148 oldnode,
1149 1149 )
1150 1150 deletenodes = _containsnode(repo, deleterevs)
1151 1151 for name in oldbmarks:
1152 1152 bmarkchanges.append((name, newnode))
1153 1153 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1154 1154 bmarkchanges.append((b, None))
1155 1155
1156 1156 if bmarkchanges:
1157 1157 bmarks.applychanges(repo, tr, bmarkchanges)
1158 1158
1159 1159 for phase, nodes in toretract.items():
1160 1160 phases.retractboundary(repo, tr, phase, nodes)
1161 1161 for phase, nodes in toadvance.items():
1162 1162 phases.advanceboundary(repo, tr, phase, nodes)
1163 1163
1164 1164 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1165 1165 # Obsolete or strip nodes
1166 1166 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1167 1167 # If a node is already obsoleted, and we want to obsolete it
1168 1168 # without a successor, skip that obssolete request since it's
1169 1169 # unnecessary. That's the "if s or not isobs(n)" check below.
1170 1170 # Also sort the node in topology order, that might be useful for
1171 1171 # some obsstore logic.
1172 1172 # NOTE: the sorting might belong to createmarkers.
1173 1173 torev = unfi.changelog.rev
1174 1174 sortfunc = lambda ns: torev(ns[0][0])
1175 1175 rels = []
1176 1176 for ns, s in sorted(replacements.items(), key=sortfunc):
1177 1177 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1178 1178 rels.append(rel)
1179 1179 if rels:
1180 1180 obsolete.createmarkers(
1181 1181 repo, rels, operation=operation, metadata=metadata
1182 1182 )
1183 1183 elif phases.supportinternal(repo) and mayusearchived:
1184 1184 # this assume we do not have "unstable" nodes above the cleaned ones
1185 1185 allreplaced = set()
1186 1186 for ns in replacements.keys():
1187 1187 allreplaced.update(ns)
1188 1188 if backup:
1189 1189 from . import repair # avoid import cycle
1190 1190
1191 1191 node = min(allreplaced, key=repo.changelog.rev)
1192 1192 repair.backupbundle(
1193 1193 repo, allreplaced, allreplaced, node, operation
1194 1194 )
1195 1195 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1196 1196 else:
1197 1197 from . import repair # avoid import cycle
1198 1198
1199 1199 tostrip = list(n for ns in replacements for n in ns)
1200 1200 if tostrip:
1201 1201 repair.delayedstrip(
1202 1202 repo.ui, repo, tostrip, operation, backup=backup
1203 1203 )
1204 1204
1205 1205
1206 1206 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1207 1207 if opts is None:
1208 1208 opts = {}
1209 1209 m = matcher
1210 1210 dry_run = opts.get(b'dry_run')
1211 1211 try:
1212 1212 similarity = float(opts.get(b'similarity') or 0)
1213 1213 except ValueError:
1214 1214 raise error.Abort(_(b'similarity must be a number'))
1215 1215 if similarity < 0 or similarity > 100:
1216 1216 raise error.Abort(_(b'similarity must be between 0 and 100'))
1217 1217 similarity /= 100.0
1218 1218
1219 1219 ret = 0
1220 1220
1221 1221 wctx = repo[None]
1222 1222 for subpath in sorted(wctx.substate):
1223 1223 submatch = matchmod.subdirmatcher(subpath, m)
1224 1224 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1225 1225 sub = wctx.sub(subpath)
1226 1226 subprefix = repo.wvfs.reljoin(prefix, subpath)
1227 1227 subuipathfn = subdiruipathfn(subpath, uipathfn)
1228 1228 try:
1229 1229 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1230 1230 ret = 1
1231 1231 except error.LookupError:
1232 1232 repo.ui.status(
1233 1233 _(b"skipping missing subrepository: %s\n")
1234 1234 % uipathfn(subpath)
1235 1235 )
1236 1236
1237 1237 rejected = []
1238 1238
1239 1239 def badfn(f, msg):
1240 1240 if f in m.files():
1241 1241 m.bad(f, msg)
1242 1242 rejected.append(f)
1243 1243
1244 1244 badmatch = matchmod.badmatch(m, badfn)
1245 1245 added, unknown, deleted, removed, forgotten = _interestingfiles(
1246 1246 repo, badmatch
1247 1247 )
1248 1248
1249 1249 unknownset = set(unknown + forgotten)
1250 1250 toprint = unknownset.copy()
1251 1251 toprint.update(deleted)
1252 1252 for abs in sorted(toprint):
1253 1253 if repo.ui.verbose or not m.exact(abs):
1254 1254 if abs in unknownset:
1255 1255 status = _(b'adding %s\n') % uipathfn(abs)
1256 1256 label = b'ui.addremove.added'
1257 1257 else:
1258 1258 status = _(b'removing %s\n') % uipathfn(abs)
1259 1259 label = b'ui.addremove.removed'
1260 1260 repo.ui.status(status, label=label)
1261 1261
1262 1262 renames = _findrenames(
1263 1263 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1264 1264 )
1265 1265
1266 1266 if not dry_run:
1267 1267 _markchanges(repo, unknown + forgotten, deleted, renames)
1268 1268
1269 1269 for f in rejected:
1270 1270 if f in m.files():
1271 1271 return 1
1272 1272 return ret
1273 1273
1274 1274
1275 1275 def marktouched(repo, files, similarity=0.0):
1276 1276 """Assert that files have somehow been operated upon. files are relative to
1277 1277 the repo root."""
1278 1278 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1279 1279 rejected = []
1280 1280
1281 1281 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1282 1282
1283 1283 if repo.ui.verbose:
1284 1284 unknownset = set(unknown + forgotten)
1285 1285 toprint = unknownset.copy()
1286 1286 toprint.update(deleted)
1287 1287 for abs in sorted(toprint):
1288 1288 if abs in unknownset:
1289 1289 status = _(b'adding %s\n') % abs
1290 1290 else:
1291 1291 status = _(b'removing %s\n') % abs
1292 1292 repo.ui.status(status)
1293 1293
1294 1294 # TODO: We should probably have the caller pass in uipathfn and apply it to
1295 1295 # the messages above too. legacyrelativevalue=True is consistent with how
1296 1296 # it used to work.
1297 1297 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1298 1298 renames = _findrenames(
1299 1299 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1300 1300 )
1301 1301
1302 1302 _markchanges(repo, unknown + forgotten, deleted, renames)
1303 1303
1304 1304 for f in rejected:
1305 1305 if f in m.files():
1306 1306 return 1
1307 1307 return 0
1308 1308
1309 1309
1310 1310 def _interestingfiles(repo, matcher):
1311 1311 """Walk dirstate with matcher, looking for files that addremove would care
1312 1312 about.
1313 1313
1314 1314 This is different from dirstate.status because it doesn't care about
1315 1315 whether files are modified or clean."""
1316 1316 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1317 1317 audit_path = pathutil.pathauditor(repo.root, cached=True)
1318 1318
1319 1319 ctx = repo[None]
1320 1320 dirstate = repo.dirstate
1321 1321 matcher = repo.narrowmatch(matcher, includeexact=True)
1322 1322 walkresults = dirstate.walk(
1323 1323 matcher,
1324 1324 subrepos=sorted(ctx.substate),
1325 1325 unknown=True,
1326 1326 ignored=False,
1327 1327 full=False,
1328 1328 )
1329 1329 for abs, st in pycompat.iteritems(walkresults):
1330 1330 dstate = dirstate[abs]
1331 1331 if dstate == b'?' and audit_path.check(abs):
1332 1332 unknown.append(abs)
1333 1333 elif dstate != b'r' and not st:
1334 1334 deleted.append(abs)
1335 1335 elif dstate == b'r' and st:
1336 1336 forgotten.append(abs)
1337 1337 # for finding renames
1338 1338 elif dstate == b'r' and not st:
1339 1339 removed.append(abs)
1340 1340 elif dstate == b'a':
1341 1341 added.append(abs)
1342 1342
1343 1343 return added, unknown, deleted, removed, forgotten
1344 1344
1345 1345
1346 1346 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1347 1347 '''Find renames from removed files to added ones.'''
1348 1348 renames = {}
1349 1349 if similarity > 0:
1350 1350 for old, new, score in similar.findrenames(
1351 1351 repo, added, removed, similarity
1352 1352 ):
1353 1353 if (
1354 1354 repo.ui.verbose
1355 1355 or not matcher.exact(old)
1356 1356 or not matcher.exact(new)
1357 1357 ):
1358 1358 repo.ui.status(
1359 1359 _(
1360 1360 b'recording removal of %s as rename to %s '
1361 1361 b'(%d%% similar)\n'
1362 1362 )
1363 1363 % (uipathfn(old), uipathfn(new), score * 100)
1364 1364 )
1365 1365 renames[new] = old
1366 1366 return renames
1367 1367
1368 1368
1369 1369 def _markchanges(repo, unknown, deleted, renames):
1370 1370 """Marks the files in unknown as added, the files in deleted as removed,
1371 1371 and the files in renames as copied."""
1372 1372 wctx = repo[None]
1373 1373 with repo.wlock():
1374 1374 wctx.forget(deleted)
1375 1375 wctx.add(unknown)
1376 1376 for new, old in pycompat.iteritems(renames):
1377 1377 wctx.copy(old, new)
1378 1378
1379 1379
1380 1380 def getrenamedfn(repo, endrev=None):
1381 1381 if copiesmod.usechangesetcentricalgo(repo):
1382 1382
1383 1383 def getrenamed(fn, rev):
1384 1384 ctx = repo[rev]
1385 1385 p1copies = ctx.p1copies()
1386 1386 if fn in p1copies:
1387 1387 return p1copies[fn]
1388 1388 p2copies = ctx.p2copies()
1389 1389 if fn in p2copies:
1390 1390 return p2copies[fn]
1391 1391 return None
1392 1392
1393 1393 return getrenamed
1394 1394
1395 1395 rcache = {}
1396 1396 if endrev is None:
1397 1397 endrev = len(repo)
1398 1398
1399 1399 def getrenamed(fn, rev):
1400 1400 """looks up all renames for a file (up to endrev) the first
1401 1401 time the file is given. It indexes on the changerev and only
1402 1402 parses the manifest if linkrev != changerev.
1403 1403 Returns rename info for fn at changerev rev."""
1404 1404 if fn not in rcache:
1405 1405 rcache[fn] = {}
1406 1406 fl = repo.file(fn)
1407 1407 for i in fl:
1408 1408 lr = fl.linkrev(i)
1409 1409 renamed = fl.renamed(fl.node(i))
1410 1410 rcache[fn][lr] = renamed and renamed[0]
1411 1411 if lr >= endrev:
1412 1412 break
1413 1413 if rev in rcache[fn]:
1414 1414 return rcache[fn][rev]
1415 1415
1416 1416 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1417 1417 # filectx logic.
1418 1418 try:
1419 1419 return repo[rev][fn].copysource()
1420 1420 except error.LookupError:
1421 1421 return None
1422 1422
1423 1423 return getrenamed
1424 1424
1425 1425
1426 1426 def getcopiesfn(repo, endrev=None):
1427 1427 if copiesmod.usechangesetcentricalgo(repo):
1428 1428
1429 1429 def copiesfn(ctx):
1430 1430 if ctx.p2copies():
1431 1431 allcopies = ctx.p1copies().copy()
1432 1432 # There should be no overlap
1433 1433 allcopies.update(ctx.p2copies())
1434 1434 return sorted(allcopies.items())
1435 1435 else:
1436 1436 return sorted(ctx.p1copies().items())
1437 1437
1438 1438 else:
1439 1439 getrenamed = getrenamedfn(repo, endrev)
1440 1440
1441 1441 def copiesfn(ctx):
1442 1442 copies = []
1443 1443 for fn in ctx.files():
1444 1444 rename = getrenamed(fn, ctx.rev())
1445 1445 if rename:
1446 1446 copies.append((fn, rename))
1447 1447 return copies
1448 1448
1449 1449 return copiesfn
1450 1450
1451 1451
1452 1452 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1453 1453 """Update the dirstate to reflect the intent of copying src to dst. For
1454 1454 different reasons it might not end with dst being marked as copied from src.
1455 1455 """
1456 1456 origsrc = repo.dirstate.copied(src) or src
1457 1457 if dst == origsrc: # copying back a copy?
1458 1458 if repo.dirstate[dst] not in b'mn' and not dryrun:
1459 repo.dirstate.normallookup(dst)
1459 repo.dirstate.set_tracked(dst)
1460 1460 else:
1461 1461 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1462 1462 if not ui.quiet:
1463 1463 ui.warn(
1464 1464 _(
1465 1465 b"%s has not been committed yet, so no copy "
1466 1466 b"data will be stored for %s.\n"
1467 1467 )
1468 1468 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1469 1469 )
1470 1470 if repo.dirstate[dst] in b'?r' and not dryrun:
1471 1471 wctx.add([dst])
1472 1472 elif not dryrun:
1473 1473 wctx.copy(origsrc, dst)
1474 1474
1475 1475
1476 1476 def movedirstate(repo, newctx, match=None):
1477 1477 """Move the dirstate to newctx and adjust it as necessary.
1478 1478
1479 1479 A matcher can be provided as an optimization. It is probably a bug to pass
1480 1480 a matcher that doesn't match all the differences between the parent of the
1481 1481 working copy and newctx.
1482 1482 """
1483 1483 oldctx = repo[b'.']
1484 1484 ds = repo.dirstate
1485 1485 copies = dict(ds.copies())
1486 1486 ds.setparents(newctx.node(), repo.nullid)
1487 1487 s = newctx.status(oldctx, match=match)
1488 1488
1489 1489 for f in s.modified:
1490 1490 ds.update_file_p1(f, p1_tracked=True)
1491 1491
1492 1492 for f in s.added:
1493 1493 ds.update_file_p1(f, p1_tracked=False)
1494 1494
1495 1495 for f in s.removed:
1496 1496 ds.update_file_p1(f, p1_tracked=True)
1497 1497
1498 1498 # Merge old parent and old working dir copies
1499 1499 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1500 1500 oldcopies.update(copies)
1501 1501 copies = {
1502 1502 dst: oldcopies.get(src, src)
1503 1503 for dst, src in pycompat.iteritems(oldcopies)
1504 1504 }
1505 1505 # Adjust the dirstate copies
1506 1506 for dst, src in pycompat.iteritems(copies):
1507 1507 if src not in newctx or dst in newctx or ds[dst] != b'a':
1508 1508 src = None
1509 1509 ds.copy(src, dst)
1510 1510 repo._quick_access_changeid_invalidate()
1511 1511
1512 1512
1513 1513 def filterrequirements(requirements):
1514 1514 """filters the requirements into two sets:
1515 1515
1516 1516 wcreq: requirements which should be written in .hg/requires
1517 1517 storereq: which should be written in .hg/store/requires
1518 1518
1519 1519 Returns (wcreq, storereq)
1520 1520 """
1521 1521 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1522 1522 wc, store = set(), set()
1523 1523 for r in requirements:
1524 1524 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1525 1525 wc.add(r)
1526 1526 else:
1527 1527 store.add(r)
1528 1528 return wc, store
1529 1529 return requirements, None
1530 1530
1531 1531
1532 1532 def istreemanifest(repo):
1533 1533 """returns whether the repository is using treemanifest or not"""
1534 1534 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1535 1535
1536 1536
1537 1537 def writereporequirements(repo, requirements=None):
1538 1538 """writes requirements for the repo
1539 1539
1540 1540 Requirements are written to .hg/requires and .hg/store/requires based
1541 1541 on whether share-safe mode is enabled and which requirements are wdir
1542 1542 requirements and which are store requirements
1543 1543 """
1544 1544 if requirements:
1545 1545 repo.requirements = requirements
1546 1546 wcreq, storereq = filterrequirements(repo.requirements)
1547 1547 if wcreq is not None:
1548 1548 writerequires(repo.vfs, wcreq)
1549 1549 if storereq is not None:
1550 1550 writerequires(repo.svfs, storereq)
1551 1551 elif repo.ui.configbool(b'format', b'usestore'):
1552 1552 # only remove store requires if we are using store
1553 1553 repo.svfs.tryunlink(b'requires')
1554 1554
1555 1555
1556 1556 def writerequires(opener, requirements):
1557 1557 with opener(b'requires', b'w', atomictemp=True) as fp:
1558 1558 for r in sorted(requirements):
1559 1559 fp.write(b"%s\n" % r)
1560 1560
1561 1561
1562 1562 class filecachesubentry(object):
1563 1563 def __init__(self, path, stat):
1564 1564 self.path = path
1565 1565 self.cachestat = None
1566 1566 self._cacheable = None
1567 1567
1568 1568 if stat:
1569 1569 self.cachestat = filecachesubentry.stat(self.path)
1570 1570
1571 1571 if self.cachestat:
1572 1572 self._cacheable = self.cachestat.cacheable()
1573 1573 else:
1574 1574 # None means we don't know yet
1575 1575 self._cacheable = None
1576 1576
1577 1577 def refresh(self):
1578 1578 if self.cacheable():
1579 1579 self.cachestat = filecachesubentry.stat(self.path)
1580 1580
1581 1581 def cacheable(self):
1582 1582 if self._cacheable is not None:
1583 1583 return self._cacheable
1584 1584
1585 1585 # we don't know yet, assume it is for now
1586 1586 return True
1587 1587
1588 1588 def changed(self):
1589 1589 # no point in going further if we can't cache it
1590 1590 if not self.cacheable():
1591 1591 return True
1592 1592
1593 1593 newstat = filecachesubentry.stat(self.path)
1594 1594
1595 1595 # we may not know if it's cacheable yet, check again now
1596 1596 if newstat and self._cacheable is None:
1597 1597 self._cacheable = newstat.cacheable()
1598 1598
1599 1599 # check again
1600 1600 if not self._cacheable:
1601 1601 return True
1602 1602
1603 1603 if self.cachestat != newstat:
1604 1604 self.cachestat = newstat
1605 1605 return True
1606 1606 else:
1607 1607 return False
1608 1608
1609 1609 @staticmethod
1610 1610 def stat(path):
1611 1611 try:
1612 1612 return util.cachestat(path)
1613 1613 except OSError as e:
1614 1614 if e.errno != errno.ENOENT:
1615 1615 raise
1616 1616
1617 1617
1618 1618 class filecacheentry(object):
1619 1619 def __init__(self, paths, stat=True):
1620 1620 self._entries = []
1621 1621 for path in paths:
1622 1622 self._entries.append(filecachesubentry(path, stat))
1623 1623
1624 1624 def changed(self):
1625 1625 '''true if any entry has changed'''
1626 1626 for entry in self._entries:
1627 1627 if entry.changed():
1628 1628 return True
1629 1629 return False
1630 1630
1631 1631 def refresh(self):
1632 1632 for entry in self._entries:
1633 1633 entry.refresh()
1634 1634
1635 1635
1636 1636 class filecache(object):
1637 1637 """A property like decorator that tracks files under .hg/ for updates.
1638 1638
1639 1639 On first access, the files defined as arguments are stat()ed and the
1640 1640 results cached. The decorated function is called. The results are stashed
1641 1641 away in a ``_filecache`` dict on the object whose method is decorated.
1642 1642
1643 1643 On subsequent access, the cached result is used as it is set to the
1644 1644 instance dictionary.
1645 1645
1646 1646 On external property set/delete operations, the caller must update the
1647 1647 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1648 1648 instead of directly setting <attr>.
1649 1649
1650 1650 When using the property API, the cached data is always used if available.
1651 1651 No stat() is performed to check if the file has changed.
1652 1652
1653 1653 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1654 1654 can populate an entry before the property's getter is called. In this case,
1655 1655 entries in ``_filecache`` will be used during property operations,
1656 1656 if available. If the underlying file changes, it is up to external callers
1657 1657 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1658 1658 method result as well as possibly calling ``del obj._filecache[attr]`` to
1659 1659 remove the ``filecacheentry``.
1660 1660 """
1661 1661
1662 1662 def __init__(self, *paths):
1663 1663 self.paths = paths
1664 1664
1665 1665 def join(self, obj, fname):
1666 1666 """Used to compute the runtime path of a cached file.
1667 1667
1668 1668 Users should subclass filecache and provide their own version of this
1669 1669 function to call the appropriate join function on 'obj' (an instance
1670 1670 of the class that its member function was decorated).
1671 1671 """
1672 1672 raise NotImplementedError
1673 1673
1674 1674 def __call__(self, func):
1675 1675 self.func = func
1676 1676 self.sname = func.__name__
1677 1677 self.name = pycompat.sysbytes(self.sname)
1678 1678 return self
1679 1679
1680 1680 def __get__(self, obj, type=None):
1681 1681 # if accessed on the class, return the descriptor itself.
1682 1682 if obj is None:
1683 1683 return self
1684 1684
1685 1685 assert self.sname not in obj.__dict__
1686 1686
1687 1687 entry = obj._filecache.get(self.name)
1688 1688
1689 1689 if entry:
1690 1690 if entry.changed():
1691 1691 entry.obj = self.func(obj)
1692 1692 else:
1693 1693 paths = [self.join(obj, path) for path in self.paths]
1694 1694
1695 1695 # We stat -before- creating the object so our cache doesn't lie if
1696 1696 # a writer modified between the time we read and stat
1697 1697 entry = filecacheentry(paths, True)
1698 1698 entry.obj = self.func(obj)
1699 1699
1700 1700 obj._filecache[self.name] = entry
1701 1701
1702 1702 obj.__dict__[self.sname] = entry.obj
1703 1703 return entry.obj
1704 1704
1705 1705 # don't implement __set__(), which would make __dict__ lookup as slow as
1706 1706 # function call.
1707 1707
1708 1708 def set(self, obj, value):
1709 1709 if self.name not in obj._filecache:
1710 1710 # we add an entry for the missing value because X in __dict__
1711 1711 # implies X in _filecache
1712 1712 paths = [self.join(obj, path) for path in self.paths]
1713 1713 ce = filecacheentry(paths, False)
1714 1714 obj._filecache[self.name] = ce
1715 1715 else:
1716 1716 ce = obj._filecache[self.name]
1717 1717
1718 1718 ce.obj = value # update cached copy
1719 1719 obj.__dict__[self.sname] = value # update copy returned by obj.x
1720 1720
1721 1721
1722 1722 def extdatasource(repo, source):
1723 1723 """Gather a map of rev -> value dict from the specified source
1724 1724
1725 1725 A source spec is treated as a URL, with a special case shell: type
1726 1726 for parsing the output from a shell command.
1727 1727
1728 1728 The data is parsed as a series of newline-separated records where
1729 1729 each record is a revision specifier optionally followed by a space
1730 1730 and a freeform string value. If the revision is known locally, it
1731 1731 is converted to a rev, otherwise the record is skipped.
1732 1732
1733 1733 Note that both key and value are treated as UTF-8 and converted to
1734 1734 the local encoding. This allows uniformity between local and
1735 1735 remote data sources.
1736 1736 """
1737 1737
1738 1738 spec = repo.ui.config(b"extdata", source)
1739 1739 if not spec:
1740 1740 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1741 1741
1742 1742 data = {}
1743 1743 src = proc = None
1744 1744 try:
1745 1745 if spec.startswith(b"shell:"):
1746 1746 # external commands should be run relative to the repo root
1747 1747 cmd = spec[6:]
1748 1748 proc = subprocess.Popen(
1749 1749 procutil.tonativestr(cmd),
1750 1750 shell=True,
1751 1751 bufsize=-1,
1752 1752 close_fds=procutil.closefds,
1753 1753 stdout=subprocess.PIPE,
1754 1754 cwd=procutil.tonativestr(repo.root),
1755 1755 )
1756 1756 src = proc.stdout
1757 1757 else:
1758 1758 # treat as a URL or file
1759 1759 src = url.open(repo.ui, spec)
1760 1760 for l in src:
1761 1761 if b" " in l:
1762 1762 k, v = l.strip().split(b" ", 1)
1763 1763 else:
1764 1764 k, v = l.strip(), b""
1765 1765
1766 1766 k = encoding.tolocal(k)
1767 1767 try:
1768 1768 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1769 1769 except (error.LookupError, error.RepoLookupError, error.InputError):
1770 1770 pass # we ignore data for nodes that don't exist locally
1771 1771 finally:
1772 1772 if proc:
1773 1773 try:
1774 1774 proc.communicate()
1775 1775 except ValueError:
1776 1776 # This happens if we started iterating src and then
1777 1777 # get a parse error on a line. It should be safe to ignore.
1778 1778 pass
1779 1779 if src:
1780 1780 src.close()
1781 1781 if proc and proc.returncode != 0:
1782 1782 raise error.Abort(
1783 1783 _(b"extdata command '%s' failed: %s")
1784 1784 % (cmd, procutil.explainexit(proc.returncode))
1785 1785 )
1786 1786
1787 1787 return data
1788 1788
1789 1789
1790 1790 class progress(object):
1791 1791 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1792 1792 self.ui = ui
1793 1793 self.pos = 0
1794 1794 self.topic = topic
1795 1795 self.unit = unit
1796 1796 self.total = total
1797 1797 self.debug = ui.configbool(b'progress', b'debug')
1798 1798 self._updatebar = updatebar
1799 1799
1800 1800 def __enter__(self):
1801 1801 return self
1802 1802
1803 1803 def __exit__(self, exc_type, exc_value, exc_tb):
1804 1804 self.complete()
1805 1805
1806 1806 def update(self, pos, item=b"", total=None):
1807 1807 assert pos is not None
1808 1808 if total:
1809 1809 self.total = total
1810 1810 self.pos = pos
1811 1811 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1812 1812 if self.debug:
1813 1813 self._printdebug(item)
1814 1814
1815 1815 def increment(self, step=1, item=b"", total=None):
1816 1816 self.update(self.pos + step, item, total)
1817 1817
1818 1818 def complete(self):
1819 1819 self.pos = None
1820 1820 self.unit = b""
1821 1821 self.total = None
1822 1822 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1823 1823
1824 1824 def _printdebug(self, item):
1825 1825 unit = b''
1826 1826 if self.unit:
1827 1827 unit = b' ' + self.unit
1828 1828 if item:
1829 1829 item = b' ' + item
1830 1830
1831 1831 if self.total:
1832 1832 pct = 100.0 * self.pos / self.total
1833 1833 self.ui.debug(
1834 1834 b'%s:%s %d/%d%s (%4.2f%%)\n'
1835 1835 % (self.topic, item, self.pos, self.total, unit, pct)
1836 1836 )
1837 1837 else:
1838 1838 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1839 1839
1840 1840
1841 1841 def gdinitconfig(ui):
1842 1842 """helper function to know if a repo should be created as general delta"""
1843 1843 # experimental config: format.generaldelta
1844 1844 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1845 1845 b'format', b'usegeneraldelta'
1846 1846 )
1847 1847
1848 1848
1849 1849 def gddeltaconfig(ui):
1850 1850 """helper function to know if incoming delta should be optimised"""
1851 1851 # experimental config: format.generaldelta
1852 1852 return ui.configbool(b'format', b'generaldelta')
1853 1853
1854 1854
1855 1855 class simplekeyvaluefile(object):
1856 1856 """A simple file with key=value lines
1857 1857
1858 1858 Keys must be alphanumerics and start with a letter, values must not
1859 1859 contain '\n' characters"""
1860 1860
1861 1861 firstlinekey = b'__firstline'
1862 1862
1863 1863 def __init__(self, vfs, path, keys=None):
1864 1864 self.vfs = vfs
1865 1865 self.path = path
1866 1866
1867 1867 def read(self, firstlinenonkeyval=False):
1868 1868 """Read the contents of a simple key-value file
1869 1869
1870 1870 'firstlinenonkeyval' indicates whether the first line of file should
1871 1871 be treated as a key-value pair or reuturned fully under the
1872 1872 __firstline key."""
1873 1873 lines = self.vfs.readlines(self.path)
1874 1874 d = {}
1875 1875 if firstlinenonkeyval:
1876 1876 if not lines:
1877 1877 e = _(b"empty simplekeyvalue file")
1878 1878 raise error.CorruptedState(e)
1879 1879 # we don't want to include '\n' in the __firstline
1880 1880 d[self.firstlinekey] = lines[0][:-1]
1881 1881 del lines[0]
1882 1882
1883 1883 try:
1884 1884 # the 'if line.strip()' part prevents us from failing on empty
1885 1885 # lines which only contain '\n' therefore are not skipped
1886 1886 # by 'if line'
1887 1887 updatedict = dict(
1888 1888 line[:-1].split(b'=', 1) for line in lines if line.strip()
1889 1889 )
1890 1890 if self.firstlinekey in updatedict:
1891 1891 e = _(b"%r can't be used as a key")
1892 1892 raise error.CorruptedState(e % self.firstlinekey)
1893 1893 d.update(updatedict)
1894 1894 except ValueError as e:
1895 1895 raise error.CorruptedState(stringutil.forcebytestr(e))
1896 1896 return d
1897 1897
1898 1898 def write(self, data, firstline=None):
1899 1899 """Write key=>value mapping to a file
1900 1900 data is a dict. Keys must be alphanumerical and start with a letter.
1901 1901 Values must not contain newline characters.
1902 1902
1903 1903 If 'firstline' is not None, it is written to file before
1904 1904 everything else, as it is, not in a key=value form"""
1905 1905 lines = []
1906 1906 if firstline is not None:
1907 1907 lines.append(b'%s\n' % firstline)
1908 1908
1909 1909 for k, v in data.items():
1910 1910 if k == self.firstlinekey:
1911 1911 e = b"key name '%s' is reserved" % self.firstlinekey
1912 1912 raise error.ProgrammingError(e)
1913 1913 if not k[0:1].isalpha():
1914 1914 e = b"keys must start with a letter in a key-value file"
1915 1915 raise error.ProgrammingError(e)
1916 1916 if not k.isalnum():
1917 1917 e = b"invalid key name in a simple key-value file"
1918 1918 raise error.ProgrammingError(e)
1919 1919 if b'\n' in v:
1920 1920 e = b"invalid value in a simple key-value file"
1921 1921 raise error.ProgrammingError(e)
1922 1922 lines.append(b"%s=%s\n" % (k, v))
1923 1923 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1924 1924 fp.write(b''.join(lines))
1925 1925
1926 1926
1927 1927 _reportobsoletedsource = [
1928 1928 b'debugobsolete',
1929 1929 b'pull',
1930 1930 b'push',
1931 1931 b'serve',
1932 1932 b'unbundle',
1933 1933 ]
1934 1934
1935 1935 _reportnewcssource = [
1936 1936 b'pull',
1937 1937 b'unbundle',
1938 1938 ]
1939 1939
1940 1940
1941 1941 def prefetchfiles(repo, revmatches):
1942 1942 """Invokes the registered file prefetch functions, allowing extensions to
1943 1943 ensure the corresponding files are available locally, before the command
1944 1944 uses them.
1945 1945
1946 1946 Args:
1947 1947 revmatches: a list of (revision, match) tuples to indicate the files to
1948 1948 fetch at each revision. If any of the match elements is None, it matches
1949 1949 all files.
1950 1950 """
1951 1951
1952 1952 def _matcher(m):
1953 1953 if m:
1954 1954 assert isinstance(m, matchmod.basematcher)
1955 1955 # The command itself will complain about files that don't exist, so
1956 1956 # don't duplicate the message.
1957 1957 return matchmod.badmatch(m, lambda fn, msg: None)
1958 1958 else:
1959 1959 return matchall(repo)
1960 1960
1961 1961 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1962 1962
1963 1963 fileprefetchhooks(repo, revbadmatches)
1964 1964
1965 1965
1966 1966 # a list of (repo, revs, match) prefetch functions
1967 1967 fileprefetchhooks = util.hooks()
1968 1968
1969 1969 # A marker that tells the evolve extension to suppress its own reporting
1970 1970 _reportstroubledchangesets = True
1971 1971
1972 1972
1973 1973 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1974 1974 """register a callback to issue a summary after the transaction is closed
1975 1975
1976 1976 If as_validator is true, then the callbacks are registered as transaction
1977 1977 validators instead
1978 1978 """
1979 1979
1980 1980 def txmatch(sources):
1981 1981 return any(txnname.startswith(source) for source in sources)
1982 1982
1983 1983 categories = []
1984 1984
1985 1985 def reportsummary(func):
1986 1986 """decorator for report callbacks."""
1987 1987 # The repoview life cycle is shorter than the one of the actual
1988 1988 # underlying repository. So the filtered object can die before the
1989 1989 # weakref is used leading to troubles. We keep a reference to the
1990 1990 # unfiltered object and restore the filtering when retrieving the
1991 1991 # repository through the weakref.
1992 1992 filtername = repo.filtername
1993 1993 reporef = weakref.ref(repo.unfiltered())
1994 1994
1995 1995 def wrapped(tr):
1996 1996 repo = reporef()
1997 1997 if filtername:
1998 1998 assert repo is not None # help pytype
1999 1999 repo = repo.filtered(filtername)
2000 2000 func(repo, tr)
2001 2001
2002 2002 newcat = b'%02i-txnreport' % len(categories)
2003 2003 if as_validator:
2004 2004 otr.addvalidator(newcat, wrapped)
2005 2005 else:
2006 2006 otr.addpostclose(newcat, wrapped)
2007 2007 categories.append(newcat)
2008 2008 return wrapped
2009 2009
2010 2010 @reportsummary
2011 2011 def reportchangegroup(repo, tr):
2012 2012 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2013 2013 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2014 2014 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2015 2015 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2016 2016 if cgchangesets or cgrevisions or cgfiles:
2017 2017 htext = b""
2018 2018 if cgheads:
2019 2019 htext = _(b" (%+d heads)") % cgheads
2020 2020 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2021 2021 if as_validator:
2022 2022 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2023 2023 assert repo is not None # help pytype
2024 2024 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2025 2025
2026 2026 if txmatch(_reportobsoletedsource):
2027 2027
2028 2028 @reportsummary
2029 2029 def reportobsoleted(repo, tr):
2030 2030 obsoleted = obsutil.getobsoleted(repo, tr)
2031 2031 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2032 2032 if newmarkers:
2033 2033 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2034 2034 if obsoleted:
2035 2035 msg = _(b'obsoleted %i changesets\n')
2036 2036 if as_validator:
2037 2037 msg = _(b'obsoleting %i changesets\n')
2038 2038 repo.ui.status(msg % len(obsoleted))
2039 2039
2040 2040 if obsolete.isenabled(
2041 2041 repo, obsolete.createmarkersopt
2042 2042 ) and repo.ui.configbool(
2043 2043 b'experimental', b'evolution.report-instabilities'
2044 2044 ):
2045 2045 instabilitytypes = [
2046 2046 (b'orphan', b'orphan'),
2047 2047 (b'phase-divergent', b'phasedivergent'),
2048 2048 (b'content-divergent', b'contentdivergent'),
2049 2049 ]
2050 2050
2051 2051 def getinstabilitycounts(repo):
2052 2052 filtered = repo.changelog.filteredrevs
2053 2053 counts = {}
2054 2054 for instability, revset in instabilitytypes:
2055 2055 counts[instability] = len(
2056 2056 set(obsolete.getrevs(repo, revset)) - filtered
2057 2057 )
2058 2058 return counts
2059 2059
2060 2060 oldinstabilitycounts = getinstabilitycounts(repo)
2061 2061
2062 2062 @reportsummary
2063 2063 def reportnewinstabilities(repo, tr):
2064 2064 newinstabilitycounts = getinstabilitycounts(repo)
2065 2065 for instability, revset in instabilitytypes:
2066 2066 delta = (
2067 2067 newinstabilitycounts[instability]
2068 2068 - oldinstabilitycounts[instability]
2069 2069 )
2070 2070 msg = getinstabilitymessage(delta, instability)
2071 2071 if msg:
2072 2072 repo.ui.warn(msg)
2073 2073
2074 2074 if txmatch(_reportnewcssource):
2075 2075
2076 2076 @reportsummary
2077 2077 def reportnewcs(repo, tr):
2078 2078 """Report the range of new revisions pulled/unbundled."""
2079 2079 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2080 2080 unfi = repo.unfiltered()
2081 2081 if origrepolen >= len(unfi):
2082 2082 return
2083 2083
2084 2084 # Compute the bounds of new visible revisions' range.
2085 2085 revs = smartset.spanset(repo, start=origrepolen)
2086 2086 if revs:
2087 2087 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2088 2088
2089 2089 if minrev == maxrev:
2090 2090 revrange = minrev
2091 2091 else:
2092 2092 revrange = b'%s:%s' % (minrev, maxrev)
2093 2093 draft = len(repo.revs(b'%ld and draft()', revs))
2094 2094 secret = len(repo.revs(b'%ld and secret()', revs))
2095 2095 if not (draft or secret):
2096 2096 msg = _(b'new changesets %s\n') % revrange
2097 2097 elif draft and secret:
2098 2098 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2099 2099 msg %= (revrange, draft, secret)
2100 2100 elif draft:
2101 2101 msg = _(b'new changesets %s (%d drafts)\n')
2102 2102 msg %= (revrange, draft)
2103 2103 elif secret:
2104 2104 msg = _(b'new changesets %s (%d secrets)\n')
2105 2105 msg %= (revrange, secret)
2106 2106 else:
2107 2107 errormsg = b'entered unreachable condition'
2108 2108 raise error.ProgrammingError(errormsg)
2109 2109 repo.ui.status(msg)
2110 2110
2111 2111 # search new changesets directly pulled as obsolete
2112 2112 duplicates = tr.changes.get(b'revduplicates', ())
2113 2113 obsadded = unfi.revs(
2114 2114 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2115 2115 )
2116 2116 cl = repo.changelog
2117 2117 extinctadded = [r for r in obsadded if r not in cl]
2118 2118 if extinctadded:
2119 2119 # They are not just obsolete, but obsolete and invisible
2120 2120 # we call them "extinct" internally but the terms have not been
2121 2121 # exposed to users.
2122 2122 msg = b'(%d other changesets obsolete on arrival)\n'
2123 2123 repo.ui.status(msg % len(extinctadded))
2124 2124
2125 2125 @reportsummary
2126 2126 def reportphasechanges(repo, tr):
2127 2127 """Report statistics of phase changes for changesets pre-existing
2128 2128 pull/unbundle.
2129 2129 """
2130 2130 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2131 2131 published = []
2132 2132 for revs, (old, new) in tr.changes.get(b'phases', []):
2133 2133 if new != phases.public:
2134 2134 continue
2135 2135 published.extend(rev for rev in revs if rev < origrepolen)
2136 2136 if not published:
2137 2137 return
2138 2138 msg = _(b'%d local changesets published\n')
2139 2139 if as_validator:
2140 2140 msg = _(b'%d local changesets will be published\n')
2141 2141 repo.ui.status(msg % len(published))
2142 2142
2143 2143
2144 2144 def getinstabilitymessage(delta, instability):
2145 2145 """function to return the message to show warning about new instabilities
2146 2146
2147 2147 exists as a separate function so that extension can wrap to show more
2148 2148 information like how to fix instabilities"""
2149 2149 if delta > 0:
2150 2150 return _(b'%i new %s changesets\n') % (delta, instability)
2151 2151
2152 2152
2153 2153 def nodesummaries(repo, nodes, maxnumnodes=4):
2154 2154 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2155 2155 return b' '.join(short(h) for h in nodes)
2156 2156 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2157 2157 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2158 2158
2159 2159
2160 2160 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2161 2161 """check that no named branch has multiple heads"""
2162 2162 if desc in (b'strip', b'repair'):
2163 2163 # skip the logic during strip
2164 2164 return
2165 2165 visible = repo.filtered(filtername)
2166 2166 # possible improvement: we could restrict the check to affected branch
2167 2167 bm = visible.branchmap()
2168 2168 for name in bm:
2169 2169 heads = bm.branchheads(name, closed=accountclosed)
2170 2170 if len(heads) > 1:
2171 2171 msg = _(b'rejecting multiple heads on branch "%s"')
2172 2172 msg %= name
2173 2173 hint = _(b'%d heads: %s')
2174 2174 hint %= (len(heads), nodesummaries(repo, heads))
2175 2175 raise error.Abort(msg, hint=hint)
2176 2176
2177 2177
2178 2178 def wrapconvertsink(sink):
2179 2179 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2180 2180 before it is used, whether or not the convert extension was formally loaded.
2181 2181 """
2182 2182 return sink
2183 2183
2184 2184
2185 2185 def unhidehashlikerevs(repo, specs, hiddentype):
2186 2186 """parse the user specs and unhide changesets whose hash or revision number
2187 2187 is passed.
2188 2188
2189 2189 hiddentype can be: 1) 'warn': warn while unhiding changesets
2190 2190 2) 'nowarn': don't warn while unhiding changesets
2191 2191
2192 2192 returns a repo object with the required changesets unhidden
2193 2193 """
2194 2194 if not repo.filtername or not repo.ui.configbool(
2195 2195 b'experimental', b'directaccess'
2196 2196 ):
2197 2197 return repo
2198 2198
2199 2199 if repo.filtername not in (b'visible', b'visible-hidden'):
2200 2200 return repo
2201 2201
2202 2202 symbols = set()
2203 2203 for spec in specs:
2204 2204 try:
2205 2205 tree = revsetlang.parse(spec)
2206 2206 except error.ParseError: # will be reported by scmutil.revrange()
2207 2207 continue
2208 2208
2209 2209 symbols.update(revsetlang.gethashlikesymbols(tree))
2210 2210
2211 2211 if not symbols:
2212 2212 return repo
2213 2213
2214 2214 revs = _getrevsfromsymbols(repo, symbols)
2215 2215
2216 2216 if not revs:
2217 2217 return repo
2218 2218
2219 2219 if hiddentype == b'warn':
2220 2220 unfi = repo.unfiltered()
2221 2221 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2222 2222 repo.ui.warn(
2223 2223 _(
2224 2224 b"warning: accessing hidden changesets for write "
2225 2225 b"operation: %s\n"
2226 2226 )
2227 2227 % revstr
2228 2228 )
2229 2229
2230 2230 # we have to use new filtername to separate branch/tags cache until we can
2231 2231 # disbale these cache when revisions are dynamically pinned.
2232 2232 return repo.filtered(b'visible-hidden', revs)
2233 2233
2234 2234
2235 2235 def _getrevsfromsymbols(repo, symbols):
2236 2236 """parse the list of symbols and returns a set of revision numbers of hidden
2237 2237 changesets present in symbols"""
2238 2238 revs = set()
2239 2239 unfi = repo.unfiltered()
2240 2240 unficl = unfi.changelog
2241 2241 cl = repo.changelog
2242 2242 tiprev = len(unficl)
2243 2243 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2244 2244 for s in symbols:
2245 2245 try:
2246 2246 n = int(s)
2247 2247 if n <= tiprev:
2248 2248 if not allowrevnums:
2249 2249 continue
2250 2250 else:
2251 2251 if n not in cl:
2252 2252 revs.add(n)
2253 2253 continue
2254 2254 except ValueError:
2255 2255 pass
2256 2256
2257 2257 try:
2258 2258 s = resolvehexnodeidprefix(unfi, s)
2259 2259 except (error.LookupError, error.WdirUnsupported):
2260 2260 s = None
2261 2261
2262 2262 if s is not None:
2263 2263 rev = unficl.rev(s)
2264 2264 if rev not in cl:
2265 2265 revs.add(rev)
2266 2266
2267 2267 return revs
2268 2268
2269 2269
2270 2270 def bookmarkrevs(repo, mark):
2271 2271 """Select revisions reachable by a given bookmark
2272 2272
2273 2273 If the bookmarked revision isn't a head, an empty set will be returned.
2274 2274 """
2275 2275 return repo.revs(format_bookmark_revspec(mark))
2276 2276
2277 2277
2278 2278 def format_bookmark_revspec(mark):
2279 2279 """Build a revset expression to select revisions reachable by a given
2280 2280 bookmark"""
2281 2281 mark = b'literal:' + mark
2282 2282 return revsetlang.formatspec(
2283 2283 b"ancestors(bookmark(%s)) - "
2284 2284 b"ancestors(head() and not bookmark(%s)) - "
2285 2285 b"ancestors(bookmark() and not bookmark(%s))",
2286 2286 mark,
2287 2287 mark,
2288 2288 mark,
2289 2289 )
General Comments 0
You need to be logged in to leave comments. Login now