##// END OF EJS Templates
scmutil: obsrevs is already a frozenset...
av6 -
r49575:c7e67584 default
parent child Browse files
Show More
@@ -1,2309 +1,2307 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullrev,
23 23 short,
24 24 wdirrev,
25 25 )
26 26 from .pycompat import getattr
27 27 from .thirdparty import attr
28 28 from . import (
29 29 copies as copiesmod,
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 policy,
38 38 pycompat,
39 39 requirements as requirementsmod,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 hashutil,
50 50 procutil,
51 51 stringutil,
52 52 )
53 53
54 54 if pycompat.iswindows:
55 55 from . import scmwindows as scmplatform
56 56 else:
57 57 from . import scmposix as scmplatform
58 58
59 59 parsers = policy.importmod('parsers')
60 60 rustrevlog = policy.importrust('revlog')
61 61
62 62 termsize = scmplatform.termsize
63 63
64 64
65 65 @attr.s(slots=True, repr=False)
66 66 class status(object):
67 67 """Struct with a list of files per status.
68 68
69 69 The 'deleted', 'unknown' and 'ignored' properties are only
70 70 relevant to the working copy.
71 71 """
72 72
73 73 modified = attr.ib(default=attr.Factory(list))
74 74 added = attr.ib(default=attr.Factory(list))
75 75 removed = attr.ib(default=attr.Factory(list))
76 76 deleted = attr.ib(default=attr.Factory(list))
77 77 unknown = attr.ib(default=attr.Factory(list))
78 78 ignored = attr.ib(default=attr.Factory(list))
79 79 clean = attr.ib(default=attr.Factory(list))
80 80
81 81 def __iter__(self):
82 82 yield self.modified
83 83 yield self.added
84 84 yield self.removed
85 85 yield self.deleted
86 86 yield self.unknown
87 87 yield self.ignored
88 88 yield self.clean
89 89
90 90 def __repr__(self):
91 91 return (
92 92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 93 r'unknown=%s, ignored=%s, clean=%s>'
94 94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95 95
96 96
97 97 def itersubrepos(ctx1, ctx2):
98 98 """find subrepos in ctx1 or ctx2"""
99 99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104 104
105 105 missing = set()
106 106
107 107 for subpath in ctx2.substate:
108 108 if subpath not in ctx1.substate:
109 109 del subpaths[subpath]
110 110 missing.add(subpath)
111 111
112 112 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
113 113 yield subpath, ctx.sub(subpath)
114 114
115 115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 116 # status and diff will have an accurate result when it does
117 117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 118 # against itself.
119 119 for subpath in missing:
120 120 yield subpath, ctx2.nullsub(subpath, ctx1)
121 121
122 122
123 123 def nochangesfound(ui, repo, excluded=None):
124 124 """Report no changes for push/pull, excluded is None or a list of
125 125 nodes excluded from the push/pull.
126 126 """
127 127 secretlist = []
128 128 if excluded:
129 129 for n in excluded:
130 130 ctx = repo[n]
131 131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 132 secretlist.append(n)
133 133
134 134 if secretlist:
135 135 ui.status(
136 136 _(b"no changes found (ignored %d secret changesets)\n")
137 137 % len(secretlist)
138 138 )
139 139 else:
140 140 ui.status(_(b"no changes found\n"))
141 141
142 142
143 143 def callcatch(ui, func):
144 144 """call func() with global exception handling
145 145
146 146 return func() if no exception happens. otherwise do some error handling
147 147 and return an exit code accordingly. does not handle all exceptions.
148 148 """
149 149 coarse_exit_code = -1
150 150 detailed_exit_code = -1
151 151 try:
152 152 try:
153 153 return func()
154 154 except: # re-raises
155 155 ui.traceback()
156 156 raise
157 157 # Global exception handling, alphabetically
158 158 # Mercurial-specific first, followed by built-in and library exceptions
159 159 except error.LockHeld as inst:
160 160 detailed_exit_code = 20
161 161 if inst.errno == errno.ETIMEDOUT:
162 162 reason = _(b'timed out waiting for lock held by %r') % (
163 163 pycompat.bytestr(inst.locker)
164 164 )
165 165 else:
166 166 reason = _(b'lock held by %r') % inst.locker
167 167 ui.error(
168 168 _(b"abort: %s: %s\n")
169 169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
170 170 )
171 171 if not inst.locker:
172 172 ui.error(_(b"(lock might be very busy)\n"))
173 173 except error.LockUnavailable as inst:
174 174 detailed_exit_code = 20
175 175 ui.error(
176 176 _(b"abort: could not lock %s: %s\n")
177 177 % (
178 178 inst.desc or stringutil.forcebytestr(inst.filename),
179 179 encoding.strtolocal(inst.strerror),
180 180 )
181 181 )
182 182 except error.RepoError as inst:
183 183 if isinstance(inst, error.RepoLookupError):
184 184 detailed_exit_code = 10
185 185 ui.error(_(b"abort: %s\n") % inst)
186 186 if inst.hint:
187 187 ui.error(_(b"(%s)\n") % inst.hint)
188 188 except error.ResponseError as inst:
189 189 ui.error(_(b"abort: %s") % inst.args[0])
190 190 msg = inst.args[1]
191 191 if isinstance(msg, type(u'')):
192 192 msg = pycompat.sysbytes(msg)
193 193 if msg is None:
194 194 ui.error(b"\n")
195 195 elif not isinstance(msg, bytes):
196 196 ui.error(b" %r\n" % (msg,))
197 197 elif not msg:
198 198 ui.error(_(b" empty string\n"))
199 199 else:
200 200 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
201 201 except error.CensoredNodeError as inst:
202 202 ui.error(_(b"abort: file censored %s\n") % inst)
203 203 except error.WdirUnsupported:
204 204 ui.error(_(b"abort: working directory revision cannot be specified\n"))
205 205 except error.Error as inst:
206 206 if inst.detailed_exit_code is not None:
207 207 detailed_exit_code = inst.detailed_exit_code
208 208 if inst.coarse_exit_code is not None:
209 209 coarse_exit_code = inst.coarse_exit_code
210 210 ui.error(inst.format())
211 211 except error.WorkerError as inst:
212 212 # Don't print a message -- the worker already should have
213 213 return inst.status_code
214 214 except ImportError as inst:
215 215 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
216 216 m = stringutil.forcebytestr(inst).split()[-1]
217 217 if m in b"mpatch bdiff".split():
218 218 ui.error(_(b"(did you forget to compile extensions?)\n"))
219 219 elif m in b"zlib".split():
220 220 ui.error(_(b"(is your Python install correct?)\n"))
221 221 except util.urlerr.httperror as inst:
222 222 detailed_exit_code = 100
223 223 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
224 224 except util.urlerr.urlerror as inst:
225 225 detailed_exit_code = 100
226 226 try: # usually it is in the form (errno, strerror)
227 227 reason = inst.reason.args[1]
228 228 except (AttributeError, IndexError):
229 229 # it might be anything, for example a string
230 230 reason = inst.reason
231 231 if isinstance(reason, pycompat.unicode):
232 232 # SSLError of Python 2.7.9 contains a unicode
233 233 reason = encoding.unitolocal(reason)
234 234 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
235 235 except (IOError, OSError) as inst:
236 236 if (
237 237 util.safehasattr(inst, b"args")
238 238 and inst.args
239 239 and inst.args[0] == errno.EPIPE
240 240 ):
241 241 pass
242 242 elif getattr(inst, "strerror", None): # common IOError or OSError
243 243 if getattr(inst, "filename", None) is not None:
244 244 ui.error(
245 245 _(b"abort: %s: '%s'\n")
246 246 % (
247 247 encoding.strtolocal(inst.strerror),
248 248 stringutil.forcebytestr(inst.filename),
249 249 )
250 250 )
251 251 else:
252 252 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
253 253 else: # suspicious IOError
254 254 raise
255 255 except MemoryError:
256 256 ui.error(_(b"abort: out of memory\n"))
257 257 except SystemExit as inst:
258 258 # Commands shouldn't sys.exit directly, but give a return code.
259 259 # Just in case catch this and and pass exit code to caller.
260 260 detailed_exit_code = 254
261 261 coarse_exit_code = inst.code
262 262
263 263 if ui.configbool(b'ui', b'detailed-exit-code'):
264 264 return detailed_exit_code
265 265 else:
266 266 return coarse_exit_code
267 267
268 268
269 269 def checknewlabel(repo, lbl, kind):
270 270 # Do not use the "kind" parameter in ui output.
271 271 # It makes strings difficult to translate.
272 272 if lbl in [b'tip', b'.', b'null']:
273 273 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
274 274 for c in (b':', b'\0', b'\n', b'\r'):
275 275 if c in lbl:
276 276 raise error.InputError(
277 277 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
278 278 )
279 279 try:
280 280 int(lbl)
281 281 raise error.InputError(_(b"cannot use an integer as a name"))
282 282 except ValueError:
283 283 pass
284 284 if lbl.strip() != lbl:
285 285 raise error.InputError(
286 286 _(b"leading or trailing whitespace in name %r") % lbl
287 287 )
288 288
289 289
290 290 def checkfilename(f):
291 291 '''Check that the filename f is an acceptable filename for a tracked file'''
292 292 if b'\r' in f or b'\n' in f:
293 293 raise error.InputError(
294 294 _(b"'\\n' and '\\r' disallowed in filenames: %r")
295 295 % pycompat.bytestr(f)
296 296 )
297 297
298 298
299 299 def checkportable(ui, f):
300 300 '''Check if filename f is portable and warn or abort depending on config'''
301 301 checkfilename(f)
302 302 abort, warn = checkportabilityalert(ui)
303 303 if abort or warn:
304 304 msg = util.checkwinfilename(f)
305 305 if msg:
306 306 msg = b"%s: %s" % (msg, procutil.shellquote(f))
307 307 if abort:
308 308 raise error.InputError(msg)
309 309 ui.warn(_(b"warning: %s\n") % msg)
310 310
311 311
312 312 def checkportabilityalert(ui):
313 313 """check if the user's config requests nothing, a warning, or abort for
314 314 non-portable filenames"""
315 315 val = ui.config(b'ui', b'portablefilenames')
316 316 lval = val.lower()
317 317 bval = stringutil.parsebool(val)
318 318 abort = pycompat.iswindows or lval == b'abort'
319 319 warn = bval or lval == b'warn'
320 320 if bval is None and not (warn or abort or lval == b'ignore'):
321 321 raise error.ConfigError(
322 322 _(b"ui.portablefilenames value is invalid ('%s')") % val
323 323 )
324 324 return abort, warn
325 325
326 326
327 327 class casecollisionauditor(object):
328 328 def __init__(self, ui, abort, dirstate):
329 329 self._ui = ui
330 330 self._abort = abort
331 331 allfiles = b'\0'.join(dirstate)
332 332 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
333 333 self._dirstate = dirstate
334 334 # The purpose of _newfiles is so that we don't complain about
335 335 # case collisions if someone were to call this object with the
336 336 # same filename twice.
337 337 self._newfiles = set()
338 338
339 339 def __call__(self, f):
340 340 if f in self._newfiles:
341 341 return
342 342 fl = encoding.lower(f)
343 343 if fl in self._loweredfiles and f not in self._dirstate:
344 344 msg = _(b'possible case-folding collision for %s') % f
345 345 if self._abort:
346 346 raise error.StateError(msg)
347 347 self._ui.warn(_(b"warning: %s\n") % msg)
348 348 self._loweredfiles.add(fl)
349 349 self._newfiles.add(f)
350 350
351 351
352 352 def filteredhash(repo, maxrev, needobsolete=False):
353 353 """build hash of filtered revisions in the current repoview.
354 354
355 355 Multiple caches perform up-to-date validation by checking that the
356 356 tiprev and tipnode stored in the cache file match the current repository.
357 357 However, this is not sufficient for validating repoviews because the set
358 358 of revisions in the view may change without the repository tiprev and
359 359 tipnode changing.
360 360
361 361 This function hashes all the revs filtered from the view (and, optionally,
362 362 all obsolete revs) up to maxrev and returns that SHA-1 digest.
363 363 """
364 364 cl = repo.changelog
365 365 if needobsolete:
366 366 obsrevs = obsolete.getrevs(repo, b'obsolete')
367 367 if not cl.filteredrevs and not obsrevs:
368 368 return None
369 # TODO: obsrevs should be a frozenset, but right now obsolete.getrevs()
370 # may return a set, which is not a hashable type.
371 key = (maxrev, hash(cl.filteredrevs), hash(frozenset(obsrevs)))
369 key = (maxrev, hash(cl.filteredrevs), hash(obsrevs))
372 370 else:
373 371 if not cl.filteredrevs:
374 372 return None
375 373 key = maxrev
376 374 obsrevs = frozenset()
377 375
378 376 result = cl._filteredrevs_hashcache.get(key)
379 377 if not result:
380 378 revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
381 379 if revs:
382 380 s = hashutil.sha1()
383 381 for rev in revs:
384 382 s.update(b'%d;' % rev)
385 383 result = s.digest()
386 384 cl._filteredrevs_hashcache[key] = result
387 385 return result
388 386
389 387
390 388 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
391 389 """yield every hg repository under path, always recursively.
392 390 The recurse flag will only control recursion into repo working dirs"""
393 391
394 392 def errhandler(err):
395 393 if err.filename == path:
396 394 raise err
397 395
398 396 samestat = getattr(os.path, 'samestat', None)
399 397 if followsym and samestat is not None:
400 398
401 399 def adddir(dirlst, dirname):
402 400 dirstat = os.stat(dirname)
403 401 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
404 402 if not match:
405 403 dirlst.append(dirstat)
406 404 return not match
407 405
408 406 else:
409 407 followsym = False
410 408
411 409 if (seen_dirs is None) and followsym:
412 410 seen_dirs = []
413 411 adddir(seen_dirs, path)
414 412 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
415 413 dirs.sort()
416 414 if b'.hg' in dirs:
417 415 yield root # found a repository
418 416 qroot = os.path.join(root, b'.hg', b'patches')
419 417 if os.path.isdir(os.path.join(qroot, b'.hg')):
420 418 yield qroot # we have a patch queue repo here
421 419 if recurse:
422 420 # avoid recursing inside the .hg directory
423 421 dirs.remove(b'.hg')
424 422 else:
425 423 dirs[:] = [] # don't descend further
426 424 elif followsym:
427 425 newdirs = []
428 426 for d in dirs:
429 427 fname = os.path.join(root, d)
430 428 if adddir(seen_dirs, fname):
431 429 if os.path.islink(fname):
432 430 for hgname in walkrepos(fname, True, seen_dirs):
433 431 yield hgname
434 432 else:
435 433 newdirs.append(d)
436 434 dirs[:] = newdirs
437 435
438 436
439 437 def binnode(ctx):
440 438 """Return binary node id for a given basectx"""
441 439 node = ctx.node()
442 440 if node is None:
443 441 return ctx.repo().nodeconstants.wdirid
444 442 return node
445 443
446 444
447 445 def intrev(ctx):
448 446 """Return integer for a given basectx that can be used in comparison or
449 447 arithmetic operation"""
450 448 rev = ctx.rev()
451 449 if rev is None:
452 450 return wdirrev
453 451 return rev
454 452
455 453
456 454 def formatchangeid(ctx):
457 455 """Format changectx as '{rev}:{node|formatnode}', which is the default
458 456 template provided by logcmdutil.changesettemplater"""
459 457 repo = ctx.repo()
460 458 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
461 459
462 460
463 461 def formatrevnode(ui, rev, node):
464 462 """Format given revision and node depending on the current verbosity"""
465 463 if ui.debugflag:
466 464 hexfunc = hex
467 465 else:
468 466 hexfunc = short
469 467 return b'%d:%s' % (rev, hexfunc(node))
470 468
471 469
472 470 def resolvehexnodeidprefix(repo, prefix):
473 471 if prefix.startswith(b'x'):
474 472 prefix = prefix[1:]
475 473 try:
476 474 # Uses unfiltered repo because it's faster when prefix is ambiguous/
477 475 # This matches the shortesthexnodeidprefix() function below.
478 476 node = repo.unfiltered().changelog._partialmatch(prefix)
479 477 except error.AmbiguousPrefixLookupError:
480 478 revset = repo.ui.config(
481 479 b'experimental', b'revisions.disambiguatewithin'
482 480 )
483 481 if revset:
484 482 # Clear config to avoid infinite recursion
485 483 configoverrides = {
486 484 (b'experimental', b'revisions.disambiguatewithin'): None
487 485 }
488 486 with repo.ui.configoverride(configoverrides):
489 487 revs = repo.anyrevs([revset], user=True)
490 488 matches = []
491 489 for rev in revs:
492 490 node = repo.changelog.node(rev)
493 491 if hex(node).startswith(prefix):
494 492 matches.append(node)
495 493 if len(matches) == 1:
496 494 return matches[0]
497 495 raise
498 496 if node is None:
499 497 return
500 498 repo.changelog.rev(node) # make sure node isn't filtered
501 499 return node
502 500
503 501
504 502 def mayberevnum(repo, prefix):
505 503 """Checks if the given prefix may be mistaken for a revision number"""
506 504 try:
507 505 i = int(prefix)
508 506 # if we are a pure int, then starting with zero will not be
509 507 # confused as a rev; or, obviously, if the int is larger
510 508 # than the value of the tip rev. We still need to disambiguate if
511 509 # prefix == '0', since that *is* a valid revnum.
512 510 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
513 511 return False
514 512 return True
515 513 except ValueError:
516 514 return False
517 515
518 516
519 517 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
520 518 """Find the shortest unambiguous prefix that matches hexnode.
521 519
522 520 If "cache" is not None, it must be a dictionary that can be used for
523 521 caching between calls to this method.
524 522 """
525 523 # _partialmatch() of filtered changelog could take O(len(repo)) time,
526 524 # which would be unacceptably slow. so we look for hash collision in
527 525 # unfiltered space, which means some hashes may be slightly longer.
528 526
529 527 minlength = max(minlength, 1)
530 528
531 529 def disambiguate(prefix):
532 530 """Disambiguate against revnums."""
533 531 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
534 532 if mayberevnum(repo, prefix):
535 533 return b'x' + prefix
536 534 else:
537 535 return prefix
538 536
539 537 hexnode = hex(node)
540 538 for length in range(len(prefix), len(hexnode) + 1):
541 539 prefix = hexnode[:length]
542 540 if not mayberevnum(repo, prefix):
543 541 return prefix
544 542
545 543 cl = repo.unfiltered().changelog
546 544 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
547 545 if revset:
548 546 revs = None
549 547 if cache is not None:
550 548 revs = cache.get(b'disambiguationrevset')
551 549 if revs is None:
552 550 revs = repo.anyrevs([revset], user=True)
553 551 if cache is not None:
554 552 cache[b'disambiguationrevset'] = revs
555 553 if cl.rev(node) in revs:
556 554 hexnode = hex(node)
557 555 nodetree = None
558 556 if cache is not None:
559 557 nodetree = cache.get(b'disambiguationnodetree')
560 558 if not nodetree:
561 559 if util.safehasattr(parsers, 'nodetree'):
562 560 # The CExt is the only implementation to provide a nodetree
563 561 # class so far.
564 562 index = cl.index
565 563 if util.safehasattr(index, 'get_cindex'):
566 564 # the rust wrapped need to give access to its internal index
567 565 index = index.get_cindex()
568 566 nodetree = parsers.nodetree(index, len(revs))
569 567 for r in revs:
570 568 nodetree.insert(r)
571 569 if cache is not None:
572 570 cache[b'disambiguationnodetree'] = nodetree
573 571 if nodetree is not None:
574 572 length = max(nodetree.shortest(node), minlength)
575 573 prefix = hexnode[:length]
576 574 return disambiguate(prefix)
577 575 for length in range(minlength, len(hexnode) + 1):
578 576 matches = []
579 577 prefix = hexnode[:length]
580 578 for rev in revs:
581 579 otherhexnode = repo[rev].hex()
582 580 if prefix == otherhexnode[:length]:
583 581 matches.append(otherhexnode)
584 582 if len(matches) == 1:
585 583 return disambiguate(prefix)
586 584
587 585 try:
588 586 return disambiguate(cl.shortest(node, minlength))
589 587 except error.LookupError:
590 588 raise error.RepoLookupError()
591 589
592 590
593 591 def isrevsymbol(repo, symbol):
594 592 """Checks if a symbol exists in the repo.
595 593
596 594 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
597 595 symbol is an ambiguous nodeid prefix.
598 596 """
599 597 try:
600 598 revsymbol(repo, symbol)
601 599 return True
602 600 except error.RepoLookupError:
603 601 return False
604 602
605 603
606 604 def revsymbol(repo, symbol):
607 605 """Returns a context given a single revision symbol (as string).
608 606
609 607 This is similar to revsingle(), but accepts only a single revision symbol,
610 608 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
611 609 not "max(public())".
612 610 """
613 611 if not isinstance(symbol, bytes):
614 612 msg = (
615 613 b"symbol (%s of type %s) was not a string, did you mean "
616 614 b"repo[symbol]?" % (symbol, type(symbol))
617 615 )
618 616 raise error.ProgrammingError(msg)
619 617 try:
620 618 if symbol in (b'.', b'tip', b'null'):
621 619 return repo[symbol]
622 620
623 621 try:
624 622 r = int(symbol)
625 623 if b'%d' % r != symbol:
626 624 raise ValueError
627 625 l = len(repo.changelog)
628 626 if r < 0:
629 627 r += l
630 628 if r < 0 or r >= l and r != wdirrev:
631 629 raise ValueError
632 630 return repo[r]
633 631 except error.FilteredIndexError:
634 632 raise
635 633 except (ValueError, OverflowError, IndexError):
636 634 pass
637 635
638 636 if len(symbol) == 2 * repo.nodeconstants.nodelen:
639 637 try:
640 638 node = bin(symbol)
641 639 rev = repo.changelog.rev(node)
642 640 return repo[rev]
643 641 except error.FilteredLookupError:
644 642 raise
645 643 except (TypeError, LookupError):
646 644 pass
647 645
648 646 # look up bookmarks through the name interface
649 647 try:
650 648 node = repo.names.singlenode(repo, symbol)
651 649 rev = repo.changelog.rev(node)
652 650 return repo[rev]
653 651 except KeyError:
654 652 pass
655 653
656 654 node = resolvehexnodeidprefix(repo, symbol)
657 655 if node is not None:
658 656 rev = repo.changelog.rev(node)
659 657 return repo[rev]
660 658
661 659 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
662 660
663 661 except error.WdirUnsupported:
664 662 return repo[None]
665 663 except (
666 664 error.FilteredIndexError,
667 665 error.FilteredLookupError,
668 666 error.FilteredRepoLookupError,
669 667 ):
670 668 raise _filterederror(repo, symbol)
671 669
672 670
673 671 def _filterederror(repo, changeid):
674 672 """build an exception to be raised about a filtered changeid
675 673
676 674 This is extracted in a function to help extensions (eg: evolve) to
677 675 experiment with various message variants."""
678 676 if repo.filtername.startswith(b'visible'):
679 677
680 678 # Check if the changeset is obsolete
681 679 unfilteredrepo = repo.unfiltered()
682 680 ctx = revsymbol(unfilteredrepo, changeid)
683 681
684 682 # If the changeset is obsolete, enrich the message with the reason
685 683 # that made this changeset not visible
686 684 if ctx.obsolete():
687 685 msg = obsutil._getfilteredreason(repo, changeid, ctx)
688 686 else:
689 687 msg = _(b"hidden revision '%s'") % changeid
690 688
691 689 hint = _(b'use --hidden to access hidden revisions')
692 690
693 691 return error.FilteredRepoLookupError(msg, hint=hint)
694 692 msg = _(b"filtered revision '%s' (not in '%s' subset)")
695 693 msg %= (changeid, repo.filtername)
696 694 return error.FilteredRepoLookupError(msg)
697 695
698 696
699 697 def revsingle(repo, revspec, default=b'.', localalias=None):
700 698 if not revspec and revspec != 0:
701 699 return repo[default]
702 700
703 701 l = revrange(repo, [revspec], localalias=localalias)
704 702 if not l:
705 703 raise error.InputError(_(b'empty revision set'))
706 704 return repo[l.last()]
707 705
708 706
709 707 def _pairspec(revspec):
710 708 tree = revsetlang.parse(revspec)
711 709 return tree and tree[0] in (
712 710 b'range',
713 711 b'rangepre',
714 712 b'rangepost',
715 713 b'rangeall',
716 714 )
717 715
718 716
719 717 def revpair(repo, revs):
720 718 if not revs:
721 719 return repo[b'.'], repo[None]
722 720
723 721 l = revrange(repo, revs)
724 722
725 723 if not l:
726 724 raise error.InputError(_(b'empty revision range'))
727 725
728 726 first = l.first()
729 727 second = l.last()
730 728
731 729 if (
732 730 first == second
733 731 and len(revs) >= 2
734 732 and not all(revrange(repo, [r]) for r in revs)
735 733 ):
736 734 raise error.InputError(_(b'empty revision on one side of range'))
737 735
738 736 # if top-level is range expression, the result must always be a pair
739 737 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
740 738 return repo[first], repo[None]
741 739
742 740 return repo[first], repo[second]
743 741
744 742
745 743 def revrange(repo, specs, localalias=None):
746 744 """Execute 1 to many revsets and return the union.
747 745
748 746 This is the preferred mechanism for executing revsets using user-specified
749 747 config options, such as revset aliases.
750 748
751 749 The revsets specified by ``specs`` will be executed via a chained ``OR``
752 750 expression. If ``specs`` is empty, an empty result is returned.
753 751
754 752 ``specs`` can contain integers, in which case they are assumed to be
755 753 revision numbers.
756 754
757 755 It is assumed the revsets are already formatted. If you have arguments
758 756 that need to be expanded in the revset, call ``revsetlang.formatspec()``
759 757 and pass the result as an element of ``specs``.
760 758
761 759 Specifying a single revset is allowed.
762 760
763 761 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
764 762 integer revisions.
765 763 """
766 764 allspecs = []
767 765 for spec in specs:
768 766 if isinstance(spec, int):
769 767 spec = revsetlang.formatspec(b'%d', spec)
770 768 allspecs.append(spec)
771 769 return repo.anyrevs(allspecs, user=True, localalias=localalias)
772 770
773 771
774 772 def increasingwindows(windowsize=8, sizelimit=512):
775 773 while True:
776 774 yield windowsize
777 775 if windowsize < sizelimit:
778 776 windowsize *= 2
779 777
780 778
781 779 def walkchangerevs(repo, revs, makefilematcher, prepare):
782 780 """Iterate over files and the revs in a "windowed" way.
783 781
784 782 Callers most commonly need to iterate backwards over the history
785 783 in which they are interested. Doing so has awful (quadratic-looking)
786 784 performance, so we use iterators in a "windowed" way.
787 785
788 786 We walk a window of revisions in the desired order. Within the
789 787 window, we first walk forwards to gather data, then in the desired
790 788 order (usually backwards) to display it.
791 789
792 790 This function returns an iterator yielding contexts. Before
793 791 yielding each context, the iterator will first call the prepare
794 792 function on each context in the window in forward order."""
795 793
796 794 if not revs:
797 795 return []
798 796 change = repo.__getitem__
799 797
800 798 def iterate():
801 799 it = iter(revs)
802 800 stopiteration = False
803 801 for windowsize in increasingwindows():
804 802 nrevs = []
805 803 for i in pycompat.xrange(windowsize):
806 804 rev = next(it, None)
807 805 if rev is None:
808 806 stopiteration = True
809 807 break
810 808 nrevs.append(rev)
811 809 for rev in sorted(nrevs):
812 810 ctx = change(rev)
813 811 prepare(ctx, makefilematcher(ctx))
814 812 for rev in nrevs:
815 813 yield change(rev)
816 814
817 815 if stopiteration:
818 816 break
819 817
820 818 return iterate()
821 819
822 820
823 821 def meaningfulparents(repo, ctx):
824 822 """Return list of meaningful (or all if debug) parentrevs for rev.
825 823
826 824 For merges (two non-nullrev revisions) both parents are meaningful.
827 825 Otherwise the first parent revision is considered meaningful if it
828 826 is not the preceding revision.
829 827 """
830 828 parents = ctx.parents()
831 829 if len(parents) > 1:
832 830 return parents
833 831 if repo.ui.debugflag:
834 832 return [parents[0], repo[nullrev]]
835 833 if parents[0].rev() >= intrev(ctx) - 1:
836 834 return []
837 835 return parents
838 836
839 837
840 838 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
841 839 """Return a function that produced paths for presenting to the user.
842 840
843 841 The returned function takes a repo-relative path and produces a path
844 842 that can be presented in the UI.
845 843
846 844 Depending on the value of ui.relative-paths, either a repo-relative or
847 845 cwd-relative path will be produced.
848 846
849 847 legacyrelativevalue is the value to use if ui.relative-paths=legacy
850 848
851 849 If forcerelativevalue is not None, then that value will be used regardless
852 850 of what ui.relative-paths is set to.
853 851 """
854 852 if forcerelativevalue is not None:
855 853 relative = forcerelativevalue
856 854 else:
857 855 config = repo.ui.config(b'ui', b'relative-paths')
858 856 if config == b'legacy':
859 857 relative = legacyrelativevalue
860 858 else:
861 859 relative = stringutil.parsebool(config)
862 860 if relative is None:
863 861 raise error.ConfigError(
864 862 _(b"ui.relative-paths is not a boolean ('%s')") % config
865 863 )
866 864
867 865 if relative:
868 866 cwd = repo.getcwd()
869 867 if cwd != b'':
870 868 # this branch would work even if cwd == b'' (ie cwd = repo
871 869 # root), but its generality makes the returned function slower
872 870 pathto = repo.pathto
873 871 return lambda f: pathto(f, cwd)
874 872 if repo.ui.configbool(b'ui', b'slash'):
875 873 return lambda f: f
876 874 else:
877 875 return util.localpath
878 876
879 877
880 878 def subdiruipathfn(subpath, uipathfn):
881 879 '''Create a new uipathfn that treats the file as relative to subpath.'''
882 880 return lambda f: uipathfn(posixpath.join(subpath, f))
883 881
884 882
885 883 def anypats(pats, opts):
886 884 """Checks if any patterns, including --include and --exclude were given.
887 885
888 886 Some commands (e.g. addremove) use this condition for deciding whether to
889 887 print absolute or relative paths.
890 888 """
891 889 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
892 890
893 891
894 892 def expandpats(pats):
895 893 """Expand bare globs when running on windows.
896 894 On posix we assume it already has already been done by sh."""
897 895 if not util.expandglobs:
898 896 return list(pats)
899 897 ret = []
900 898 for kindpat in pats:
901 899 kind, pat = matchmod._patsplit(kindpat, None)
902 900 if kind is None:
903 901 try:
904 902 globbed = glob.glob(pat)
905 903 except re.error:
906 904 globbed = [pat]
907 905 if globbed:
908 906 ret.extend(globbed)
909 907 continue
910 908 ret.append(kindpat)
911 909 return ret
912 910
913 911
914 912 def matchandpats(
915 913 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
916 914 ):
917 915 """Return a matcher and the patterns that were used.
918 916 The matcher will warn about bad matches, unless an alternate badfn callback
919 917 is provided."""
920 918 if opts is None:
921 919 opts = {}
922 920 if not globbed and default == b'relpath':
923 921 pats = expandpats(pats or [])
924 922
925 923 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
926 924
927 925 def bad(f, msg):
928 926 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
929 927
930 928 if badfn is None:
931 929 badfn = bad
932 930
933 931 m = ctx.match(
934 932 pats,
935 933 opts.get(b'include'),
936 934 opts.get(b'exclude'),
937 935 default,
938 936 listsubrepos=opts.get(b'subrepos'),
939 937 badfn=badfn,
940 938 )
941 939
942 940 if m.always():
943 941 pats = []
944 942 return m, pats
945 943
946 944
947 945 def match(
948 946 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
949 947 ):
950 948 '''Return a matcher that will warn about bad matches.'''
951 949 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
952 950
953 951
954 952 def matchall(repo):
955 953 '''Return a matcher that will efficiently match everything.'''
956 954 return matchmod.always()
957 955
958 956
959 957 def matchfiles(repo, files, badfn=None):
960 958 '''Return a matcher that will efficiently match exactly these files.'''
961 959 return matchmod.exact(files, badfn=badfn)
962 960
963 961
964 962 def parsefollowlinespattern(repo, rev, pat, msg):
965 963 """Return a file name from `pat` pattern suitable for usage in followlines
966 964 logic.
967 965 """
968 966 if not matchmod.patkind(pat):
969 967 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
970 968 else:
971 969 ctx = repo[rev]
972 970 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
973 971 files = [f for f in ctx if m(f)]
974 972 if len(files) != 1:
975 973 raise error.ParseError(msg)
976 974 return files[0]
977 975
978 976
979 977 def getorigvfs(ui, repo):
980 978 """return a vfs suitable to save 'orig' file
981 979
982 980 return None if no special directory is configured"""
983 981 origbackuppath = ui.config(b'ui', b'origbackuppath')
984 982 if not origbackuppath:
985 983 return None
986 984 return vfs.vfs(repo.wvfs.join(origbackuppath))
987 985
988 986
989 987 def backuppath(ui, repo, filepath):
990 988 """customize where working copy backup files (.orig files) are created
991 989
992 990 Fetch user defined path from config file: [ui] origbackuppath = <path>
993 991 Fall back to default (filepath with .orig suffix) if not specified
994 992
995 993 filepath is repo-relative
996 994
997 995 Returns an absolute path
998 996 """
999 997 origvfs = getorigvfs(ui, repo)
1000 998 if origvfs is None:
1001 999 return repo.wjoin(filepath + b".orig")
1002 1000
1003 1001 origbackupdir = origvfs.dirname(filepath)
1004 1002 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1005 1003 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1006 1004
1007 1005 # Remove any files that conflict with the backup file's path
1008 1006 for f in reversed(list(pathutil.finddirs(filepath))):
1009 1007 if origvfs.isfileorlink(f):
1010 1008 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1011 1009 origvfs.unlink(f)
1012 1010 break
1013 1011
1014 1012 origvfs.makedirs(origbackupdir)
1015 1013
1016 1014 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1017 1015 ui.note(
1018 1016 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1019 1017 )
1020 1018 origvfs.rmtree(filepath, forcibly=True)
1021 1019
1022 1020 return origvfs.join(filepath)
1023 1021
1024 1022
1025 1023 class _containsnode(object):
1026 1024 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1027 1025
1028 1026 def __init__(self, repo, revcontainer):
1029 1027 self._torev = repo.changelog.rev
1030 1028 self._revcontains = revcontainer.__contains__
1031 1029
1032 1030 def __contains__(self, node):
1033 1031 return self._revcontains(self._torev(node))
1034 1032
1035 1033
1036 1034 def cleanupnodes(
1037 1035 repo,
1038 1036 replacements,
1039 1037 operation,
1040 1038 moves=None,
1041 1039 metadata=None,
1042 1040 fixphase=False,
1043 1041 targetphase=None,
1044 1042 backup=True,
1045 1043 ):
1046 1044 """do common cleanups when old nodes are replaced by new nodes
1047 1045
1048 1046 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1049 1047 (we might also want to move working directory parent in the future)
1050 1048
1051 1049 By default, bookmark moves are calculated automatically from 'replacements',
1052 1050 but 'moves' can be used to override that. Also, 'moves' may include
1053 1051 additional bookmark moves that should not have associated obsmarkers.
1054 1052
1055 1053 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1056 1054 have replacements. operation is a string, like "rebase".
1057 1055
1058 1056 metadata is dictionary containing metadata to be stored in obsmarker if
1059 1057 obsolescence is enabled.
1060 1058 """
1061 1059 assert fixphase or targetphase is None
1062 1060 if not replacements and not moves:
1063 1061 return
1064 1062
1065 1063 # translate mapping's other forms
1066 1064 if not util.safehasattr(replacements, b'items'):
1067 1065 replacements = {(n,): () for n in replacements}
1068 1066 else:
1069 1067 # upgrading non tuple "source" to tuple ones for BC
1070 1068 repls = {}
1071 1069 for key, value in replacements.items():
1072 1070 if not isinstance(key, tuple):
1073 1071 key = (key,)
1074 1072 repls[key] = value
1075 1073 replacements = repls
1076 1074
1077 1075 # Unfiltered repo is needed since nodes in replacements might be hidden.
1078 1076 unfi = repo.unfiltered()
1079 1077
1080 1078 # Calculate bookmark movements
1081 1079 if moves is None:
1082 1080 moves = {}
1083 1081 for oldnodes, newnodes in replacements.items():
1084 1082 for oldnode in oldnodes:
1085 1083 if oldnode in moves:
1086 1084 continue
1087 1085 if len(newnodes) > 1:
1088 1086 # usually a split, take the one with biggest rev number
1089 1087 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1090 1088 elif len(newnodes) == 0:
1091 1089 # move bookmark backwards
1092 1090 allreplaced = []
1093 1091 for rep in replacements:
1094 1092 allreplaced.extend(rep)
1095 1093 roots = list(
1096 1094 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1097 1095 )
1098 1096 if roots:
1099 1097 newnode = roots[0].node()
1100 1098 else:
1101 1099 newnode = repo.nullid
1102 1100 else:
1103 1101 newnode = newnodes[0]
1104 1102 moves[oldnode] = newnode
1105 1103
1106 1104 allnewnodes = [n for ns in replacements.values() for n in ns]
1107 1105 toretract = {}
1108 1106 toadvance = {}
1109 1107 if fixphase:
1110 1108 precursors = {}
1111 1109 for oldnodes, newnodes in replacements.items():
1112 1110 for oldnode in oldnodes:
1113 1111 for newnode in newnodes:
1114 1112 precursors.setdefault(newnode, []).append(oldnode)
1115 1113
1116 1114 allnewnodes.sort(key=lambda n: unfi[n].rev())
1117 1115 newphases = {}
1118 1116
1119 1117 def phase(ctx):
1120 1118 return newphases.get(ctx.node(), ctx.phase())
1121 1119
1122 1120 for newnode in allnewnodes:
1123 1121 ctx = unfi[newnode]
1124 1122 parentphase = max(phase(p) for p in ctx.parents())
1125 1123 if targetphase is None:
1126 1124 oldphase = max(
1127 1125 unfi[oldnode].phase() for oldnode in precursors[newnode]
1128 1126 )
1129 1127 newphase = max(oldphase, parentphase)
1130 1128 else:
1131 1129 newphase = max(targetphase, parentphase)
1132 1130 newphases[newnode] = newphase
1133 1131 if newphase > ctx.phase():
1134 1132 toretract.setdefault(newphase, []).append(newnode)
1135 1133 elif newphase < ctx.phase():
1136 1134 toadvance.setdefault(newphase, []).append(newnode)
1137 1135
1138 1136 with repo.transaction(b'cleanup') as tr:
1139 1137 # Move bookmarks
1140 1138 bmarks = repo._bookmarks
1141 1139 bmarkchanges = []
1142 1140 for oldnode, newnode in moves.items():
1143 1141 oldbmarks = repo.nodebookmarks(oldnode)
1144 1142 if not oldbmarks:
1145 1143 continue
1146 1144 from . import bookmarks # avoid import cycle
1147 1145
1148 1146 repo.ui.debug(
1149 1147 b'moving bookmarks %r from %s to %s\n'
1150 1148 % (
1151 1149 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1152 1150 hex(oldnode),
1153 1151 hex(newnode),
1154 1152 )
1155 1153 )
1156 1154 # Delete divergent bookmarks being parents of related newnodes
1157 1155 deleterevs = repo.revs(
1158 1156 b'parents(roots(%ln & (::%n))) - parents(%n)',
1159 1157 allnewnodes,
1160 1158 newnode,
1161 1159 oldnode,
1162 1160 )
1163 1161 deletenodes = _containsnode(repo, deleterevs)
1164 1162 for name in oldbmarks:
1165 1163 bmarkchanges.append((name, newnode))
1166 1164 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1167 1165 bmarkchanges.append((b, None))
1168 1166
1169 1167 if bmarkchanges:
1170 1168 bmarks.applychanges(repo, tr, bmarkchanges)
1171 1169
1172 1170 for phase, nodes in toretract.items():
1173 1171 phases.retractboundary(repo, tr, phase, nodes)
1174 1172 for phase, nodes in toadvance.items():
1175 1173 phases.advanceboundary(repo, tr, phase, nodes)
1176 1174
1177 1175 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1178 1176 # Obsolete or strip nodes
1179 1177 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1180 1178 # If a node is already obsoleted, and we want to obsolete it
1181 1179 # without a successor, skip that obssolete request since it's
1182 1180 # unnecessary. That's the "if s or not isobs(n)" check below.
1183 1181 # Also sort the node in topology order, that might be useful for
1184 1182 # some obsstore logic.
1185 1183 # NOTE: the sorting might belong to createmarkers.
1186 1184 torev = unfi.changelog.rev
1187 1185 sortfunc = lambda ns: torev(ns[0][0])
1188 1186 rels = []
1189 1187 for ns, s in sorted(replacements.items(), key=sortfunc):
1190 1188 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1191 1189 rels.append(rel)
1192 1190 if rels:
1193 1191 obsolete.createmarkers(
1194 1192 repo, rels, operation=operation, metadata=metadata
1195 1193 )
1196 1194 elif phases.supportinternal(repo) and mayusearchived:
1197 1195 # this assume we do not have "unstable" nodes above the cleaned ones
1198 1196 allreplaced = set()
1199 1197 for ns in replacements.keys():
1200 1198 allreplaced.update(ns)
1201 1199 if backup:
1202 1200 from . import repair # avoid import cycle
1203 1201
1204 1202 node = min(allreplaced, key=repo.changelog.rev)
1205 1203 repair.backupbundle(
1206 1204 repo, allreplaced, allreplaced, node, operation
1207 1205 )
1208 1206 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1209 1207 else:
1210 1208 from . import repair # avoid import cycle
1211 1209
1212 1210 tostrip = list(n for ns in replacements for n in ns)
1213 1211 if tostrip:
1214 1212 repair.delayedstrip(
1215 1213 repo.ui, repo, tostrip, operation, backup=backup
1216 1214 )
1217 1215
1218 1216
1219 1217 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1220 1218 if opts is None:
1221 1219 opts = {}
1222 1220 m = matcher
1223 1221 dry_run = opts.get(b'dry_run')
1224 1222 try:
1225 1223 similarity = float(opts.get(b'similarity') or 0)
1226 1224 except ValueError:
1227 1225 raise error.InputError(_(b'similarity must be a number'))
1228 1226 if similarity < 0 or similarity > 100:
1229 1227 raise error.InputError(_(b'similarity must be between 0 and 100'))
1230 1228 similarity /= 100.0
1231 1229
1232 1230 ret = 0
1233 1231
1234 1232 wctx = repo[None]
1235 1233 for subpath in sorted(wctx.substate):
1236 1234 submatch = matchmod.subdirmatcher(subpath, m)
1237 1235 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1238 1236 sub = wctx.sub(subpath)
1239 1237 subprefix = repo.wvfs.reljoin(prefix, subpath)
1240 1238 subuipathfn = subdiruipathfn(subpath, uipathfn)
1241 1239 try:
1242 1240 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1243 1241 ret = 1
1244 1242 except error.LookupError:
1245 1243 repo.ui.status(
1246 1244 _(b"skipping missing subrepository: %s\n")
1247 1245 % uipathfn(subpath)
1248 1246 )
1249 1247
1250 1248 rejected = []
1251 1249
1252 1250 def badfn(f, msg):
1253 1251 if f in m.files():
1254 1252 m.bad(f, msg)
1255 1253 rejected.append(f)
1256 1254
1257 1255 badmatch = matchmod.badmatch(m, badfn)
1258 1256 added, unknown, deleted, removed, forgotten = _interestingfiles(
1259 1257 repo, badmatch
1260 1258 )
1261 1259
1262 1260 unknownset = set(unknown + forgotten)
1263 1261 toprint = unknownset.copy()
1264 1262 toprint.update(deleted)
1265 1263 for abs in sorted(toprint):
1266 1264 if repo.ui.verbose or not m.exact(abs):
1267 1265 if abs in unknownset:
1268 1266 status = _(b'adding %s\n') % uipathfn(abs)
1269 1267 label = b'ui.addremove.added'
1270 1268 else:
1271 1269 status = _(b'removing %s\n') % uipathfn(abs)
1272 1270 label = b'ui.addremove.removed'
1273 1271 repo.ui.status(status, label=label)
1274 1272
1275 1273 renames = _findrenames(
1276 1274 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1277 1275 )
1278 1276
1279 1277 if not dry_run:
1280 1278 _markchanges(repo, unknown + forgotten, deleted, renames)
1281 1279
1282 1280 for f in rejected:
1283 1281 if f in m.files():
1284 1282 return 1
1285 1283 return ret
1286 1284
1287 1285
1288 1286 def marktouched(repo, files, similarity=0.0):
1289 1287 """Assert that files have somehow been operated upon. files are relative to
1290 1288 the repo root."""
1291 1289 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1292 1290 rejected = []
1293 1291
1294 1292 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1295 1293
1296 1294 if repo.ui.verbose:
1297 1295 unknownset = set(unknown + forgotten)
1298 1296 toprint = unknownset.copy()
1299 1297 toprint.update(deleted)
1300 1298 for abs in sorted(toprint):
1301 1299 if abs in unknownset:
1302 1300 status = _(b'adding %s\n') % abs
1303 1301 else:
1304 1302 status = _(b'removing %s\n') % abs
1305 1303 repo.ui.status(status)
1306 1304
1307 1305 # TODO: We should probably have the caller pass in uipathfn and apply it to
1308 1306 # the messages above too. legacyrelativevalue=True is consistent with how
1309 1307 # it used to work.
1310 1308 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1311 1309 renames = _findrenames(
1312 1310 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1313 1311 )
1314 1312
1315 1313 _markchanges(repo, unknown + forgotten, deleted, renames)
1316 1314
1317 1315 for f in rejected:
1318 1316 if f in m.files():
1319 1317 return 1
1320 1318 return 0
1321 1319
1322 1320
1323 1321 def _interestingfiles(repo, matcher):
1324 1322 """Walk dirstate with matcher, looking for files that addremove would care
1325 1323 about.
1326 1324
1327 1325 This is different from dirstate.status because it doesn't care about
1328 1326 whether files are modified or clean."""
1329 1327 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1330 1328 audit_path = pathutil.pathauditor(repo.root, cached=True)
1331 1329
1332 1330 ctx = repo[None]
1333 1331 dirstate = repo.dirstate
1334 1332 matcher = repo.narrowmatch(matcher, includeexact=True)
1335 1333 walkresults = dirstate.walk(
1336 1334 matcher,
1337 1335 subrepos=sorted(ctx.substate),
1338 1336 unknown=True,
1339 1337 ignored=False,
1340 1338 full=False,
1341 1339 )
1342 1340 for abs, st in pycompat.iteritems(walkresults):
1343 1341 entry = dirstate.get_entry(abs)
1344 1342 if (not entry.any_tracked) and audit_path.check(abs):
1345 1343 unknown.append(abs)
1346 1344 elif (not entry.removed) and not st:
1347 1345 deleted.append(abs)
1348 1346 elif entry.removed and st:
1349 1347 forgotten.append(abs)
1350 1348 # for finding renames
1351 1349 elif entry.removed and not st:
1352 1350 removed.append(abs)
1353 1351 elif entry.added:
1354 1352 added.append(abs)
1355 1353
1356 1354 return added, unknown, deleted, removed, forgotten
1357 1355
1358 1356
1359 1357 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1360 1358 '''Find renames from removed files to added ones.'''
1361 1359 renames = {}
1362 1360 if similarity > 0:
1363 1361 for old, new, score in similar.findrenames(
1364 1362 repo, added, removed, similarity
1365 1363 ):
1366 1364 if (
1367 1365 repo.ui.verbose
1368 1366 or not matcher.exact(old)
1369 1367 or not matcher.exact(new)
1370 1368 ):
1371 1369 repo.ui.status(
1372 1370 _(
1373 1371 b'recording removal of %s as rename to %s '
1374 1372 b'(%d%% similar)\n'
1375 1373 )
1376 1374 % (uipathfn(old), uipathfn(new), score * 100)
1377 1375 )
1378 1376 renames[new] = old
1379 1377 return renames
1380 1378
1381 1379
1382 1380 def _markchanges(repo, unknown, deleted, renames):
1383 1381 """Marks the files in unknown as added, the files in deleted as removed,
1384 1382 and the files in renames as copied."""
1385 1383 wctx = repo[None]
1386 1384 with repo.wlock():
1387 1385 wctx.forget(deleted)
1388 1386 wctx.add(unknown)
1389 1387 for new, old in pycompat.iteritems(renames):
1390 1388 wctx.copy(old, new)
1391 1389
1392 1390
1393 1391 def getrenamedfn(repo, endrev=None):
1394 1392 if copiesmod.usechangesetcentricalgo(repo):
1395 1393
1396 1394 def getrenamed(fn, rev):
1397 1395 ctx = repo[rev]
1398 1396 p1copies = ctx.p1copies()
1399 1397 if fn in p1copies:
1400 1398 return p1copies[fn]
1401 1399 p2copies = ctx.p2copies()
1402 1400 if fn in p2copies:
1403 1401 return p2copies[fn]
1404 1402 return None
1405 1403
1406 1404 return getrenamed
1407 1405
1408 1406 rcache = {}
1409 1407 if endrev is None:
1410 1408 endrev = len(repo)
1411 1409
1412 1410 def getrenamed(fn, rev):
1413 1411 """looks up all renames for a file (up to endrev) the first
1414 1412 time the file is given. It indexes on the changerev and only
1415 1413 parses the manifest if linkrev != changerev.
1416 1414 Returns rename info for fn at changerev rev."""
1417 1415 if fn not in rcache:
1418 1416 rcache[fn] = {}
1419 1417 fl = repo.file(fn)
1420 1418 for i in fl:
1421 1419 lr = fl.linkrev(i)
1422 1420 renamed = fl.renamed(fl.node(i))
1423 1421 rcache[fn][lr] = renamed and renamed[0]
1424 1422 if lr >= endrev:
1425 1423 break
1426 1424 if rev in rcache[fn]:
1427 1425 return rcache[fn][rev]
1428 1426
1429 1427 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1430 1428 # filectx logic.
1431 1429 try:
1432 1430 return repo[rev][fn].copysource()
1433 1431 except error.LookupError:
1434 1432 return None
1435 1433
1436 1434 return getrenamed
1437 1435
1438 1436
1439 1437 def getcopiesfn(repo, endrev=None):
1440 1438 if copiesmod.usechangesetcentricalgo(repo):
1441 1439
1442 1440 def copiesfn(ctx):
1443 1441 if ctx.p2copies():
1444 1442 allcopies = ctx.p1copies().copy()
1445 1443 # There should be no overlap
1446 1444 allcopies.update(ctx.p2copies())
1447 1445 return sorted(allcopies.items())
1448 1446 else:
1449 1447 return sorted(ctx.p1copies().items())
1450 1448
1451 1449 else:
1452 1450 getrenamed = getrenamedfn(repo, endrev)
1453 1451
1454 1452 def copiesfn(ctx):
1455 1453 copies = []
1456 1454 for fn in ctx.files():
1457 1455 rename = getrenamed(fn, ctx.rev())
1458 1456 if rename:
1459 1457 copies.append((fn, rename))
1460 1458 return copies
1461 1459
1462 1460 return copiesfn
1463 1461
1464 1462
1465 1463 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1466 1464 """Update the dirstate to reflect the intent of copying src to dst. For
1467 1465 different reasons it might not end with dst being marked as copied from src.
1468 1466 """
1469 1467 origsrc = repo.dirstate.copied(src) or src
1470 1468 if dst == origsrc: # copying back a copy?
1471 1469 entry = repo.dirstate.get_entry(dst)
1472 1470 if (entry.added or not entry.tracked) and not dryrun:
1473 1471 repo.dirstate.set_tracked(dst)
1474 1472 else:
1475 1473 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1476 1474 if not ui.quiet:
1477 1475 ui.warn(
1478 1476 _(
1479 1477 b"%s has not been committed yet, so no copy "
1480 1478 b"data will be stored for %s.\n"
1481 1479 )
1482 1480 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1483 1481 )
1484 1482 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1485 1483 wctx.add([dst])
1486 1484 elif not dryrun:
1487 1485 wctx.copy(origsrc, dst)
1488 1486
1489 1487
1490 1488 def movedirstate(repo, newctx, match=None):
1491 1489 """Move the dirstate to newctx and adjust it as necessary.
1492 1490
1493 1491 A matcher can be provided as an optimization. It is probably a bug to pass
1494 1492 a matcher that doesn't match all the differences between the parent of the
1495 1493 working copy and newctx.
1496 1494 """
1497 1495 oldctx = repo[b'.']
1498 1496 ds = repo.dirstate
1499 1497 copies = dict(ds.copies())
1500 1498 ds.setparents(newctx.node(), repo.nullid)
1501 1499 s = newctx.status(oldctx, match=match)
1502 1500
1503 1501 for f in s.modified:
1504 1502 ds.update_file_p1(f, p1_tracked=True)
1505 1503
1506 1504 for f in s.added:
1507 1505 ds.update_file_p1(f, p1_tracked=False)
1508 1506
1509 1507 for f in s.removed:
1510 1508 ds.update_file_p1(f, p1_tracked=True)
1511 1509
1512 1510 # Merge old parent and old working dir copies
1513 1511 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1514 1512 oldcopies.update(copies)
1515 1513 copies = {
1516 1514 dst: oldcopies.get(src, src)
1517 1515 for dst, src in pycompat.iteritems(oldcopies)
1518 1516 }
1519 1517 # Adjust the dirstate copies
1520 1518 for dst, src in pycompat.iteritems(copies):
1521 1519 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1522 1520 src = None
1523 1521 ds.copy(src, dst)
1524 1522 repo._quick_access_changeid_invalidate()
1525 1523
1526 1524
1527 1525 def filterrequirements(requirements):
1528 1526 """filters the requirements into two sets:
1529 1527
1530 1528 wcreq: requirements which should be written in .hg/requires
1531 1529 storereq: which should be written in .hg/store/requires
1532 1530
1533 1531 Returns (wcreq, storereq)
1534 1532 """
1535 1533 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1536 1534 wc, store = set(), set()
1537 1535 for r in requirements:
1538 1536 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1539 1537 wc.add(r)
1540 1538 else:
1541 1539 store.add(r)
1542 1540 return wc, store
1543 1541 return requirements, None
1544 1542
1545 1543
1546 1544 def istreemanifest(repo):
1547 1545 """returns whether the repository is using treemanifest or not"""
1548 1546 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1549 1547
1550 1548
1551 1549 def writereporequirements(repo, requirements=None):
1552 1550 """writes requirements for the repo
1553 1551
1554 1552 Requirements are written to .hg/requires and .hg/store/requires based
1555 1553 on whether share-safe mode is enabled and which requirements are wdir
1556 1554 requirements and which are store requirements
1557 1555 """
1558 1556 if requirements:
1559 1557 repo.requirements = requirements
1560 1558 wcreq, storereq = filterrequirements(repo.requirements)
1561 1559 if wcreq is not None:
1562 1560 writerequires(repo.vfs, wcreq)
1563 1561 if storereq is not None:
1564 1562 writerequires(repo.svfs, storereq)
1565 1563 elif repo.ui.configbool(b'format', b'usestore'):
1566 1564 # only remove store requires if we are using store
1567 1565 repo.svfs.tryunlink(b'requires')
1568 1566
1569 1567
1570 1568 def writerequires(opener, requirements):
1571 1569 with opener(b'requires', b'w', atomictemp=True) as fp:
1572 1570 for r in sorted(requirements):
1573 1571 fp.write(b"%s\n" % r)
1574 1572
1575 1573
1576 1574 class filecachesubentry(object):
1577 1575 def __init__(self, path, stat):
1578 1576 self.path = path
1579 1577 self.cachestat = None
1580 1578 self._cacheable = None
1581 1579
1582 1580 if stat:
1583 1581 self.cachestat = filecachesubentry.stat(self.path)
1584 1582
1585 1583 if self.cachestat:
1586 1584 self._cacheable = self.cachestat.cacheable()
1587 1585 else:
1588 1586 # None means we don't know yet
1589 1587 self._cacheable = None
1590 1588
1591 1589 def refresh(self):
1592 1590 if self.cacheable():
1593 1591 self.cachestat = filecachesubentry.stat(self.path)
1594 1592
1595 1593 def cacheable(self):
1596 1594 if self._cacheable is not None:
1597 1595 return self._cacheable
1598 1596
1599 1597 # we don't know yet, assume it is for now
1600 1598 return True
1601 1599
1602 1600 def changed(self):
1603 1601 # no point in going further if we can't cache it
1604 1602 if not self.cacheable():
1605 1603 return True
1606 1604
1607 1605 newstat = filecachesubentry.stat(self.path)
1608 1606
1609 1607 # we may not know if it's cacheable yet, check again now
1610 1608 if newstat and self._cacheable is None:
1611 1609 self._cacheable = newstat.cacheable()
1612 1610
1613 1611 # check again
1614 1612 if not self._cacheable:
1615 1613 return True
1616 1614
1617 1615 if self.cachestat != newstat:
1618 1616 self.cachestat = newstat
1619 1617 return True
1620 1618 else:
1621 1619 return False
1622 1620
1623 1621 @staticmethod
1624 1622 def stat(path):
1625 1623 try:
1626 1624 return util.cachestat(path)
1627 1625 except OSError as e:
1628 1626 if e.errno != errno.ENOENT:
1629 1627 raise
1630 1628
1631 1629
1632 1630 class filecacheentry(object):
1633 1631 def __init__(self, paths, stat=True):
1634 1632 self._entries = []
1635 1633 for path in paths:
1636 1634 self._entries.append(filecachesubentry(path, stat))
1637 1635
1638 1636 def changed(self):
1639 1637 '''true if any entry has changed'''
1640 1638 for entry in self._entries:
1641 1639 if entry.changed():
1642 1640 return True
1643 1641 return False
1644 1642
1645 1643 def refresh(self):
1646 1644 for entry in self._entries:
1647 1645 entry.refresh()
1648 1646
1649 1647
1650 1648 class filecache(object):
1651 1649 """A property like decorator that tracks files under .hg/ for updates.
1652 1650
1653 1651 On first access, the files defined as arguments are stat()ed and the
1654 1652 results cached. The decorated function is called. The results are stashed
1655 1653 away in a ``_filecache`` dict on the object whose method is decorated.
1656 1654
1657 1655 On subsequent access, the cached result is used as it is set to the
1658 1656 instance dictionary.
1659 1657
1660 1658 On external property set/delete operations, the caller must update the
1661 1659 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1662 1660 instead of directly setting <attr>.
1663 1661
1664 1662 When using the property API, the cached data is always used if available.
1665 1663 No stat() is performed to check if the file has changed.
1666 1664
1667 1665 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1668 1666 can populate an entry before the property's getter is called. In this case,
1669 1667 entries in ``_filecache`` will be used during property operations,
1670 1668 if available. If the underlying file changes, it is up to external callers
1671 1669 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1672 1670 method result as well as possibly calling ``del obj._filecache[attr]`` to
1673 1671 remove the ``filecacheentry``.
1674 1672 """
1675 1673
1676 1674 def __init__(self, *paths):
1677 1675 self.paths = paths
1678 1676
1679 1677 def tracked_paths(self, obj):
1680 1678 return [self.join(obj, path) for path in self.paths]
1681 1679
1682 1680 def join(self, obj, fname):
1683 1681 """Used to compute the runtime path of a cached file.
1684 1682
1685 1683 Users should subclass filecache and provide their own version of this
1686 1684 function to call the appropriate join function on 'obj' (an instance
1687 1685 of the class that its member function was decorated).
1688 1686 """
1689 1687 raise NotImplementedError
1690 1688
1691 1689 def __call__(self, func):
1692 1690 self.func = func
1693 1691 self.sname = func.__name__
1694 1692 self.name = pycompat.sysbytes(self.sname)
1695 1693 return self
1696 1694
1697 1695 def __get__(self, obj, type=None):
1698 1696 # if accessed on the class, return the descriptor itself.
1699 1697 if obj is None:
1700 1698 return self
1701 1699
1702 1700 assert self.sname not in obj.__dict__
1703 1701
1704 1702 entry = obj._filecache.get(self.name)
1705 1703
1706 1704 if entry:
1707 1705 if entry.changed():
1708 1706 entry.obj = self.func(obj)
1709 1707 else:
1710 1708 paths = self.tracked_paths(obj)
1711 1709
1712 1710 # We stat -before- creating the object so our cache doesn't lie if
1713 1711 # a writer modified between the time we read and stat
1714 1712 entry = filecacheentry(paths, True)
1715 1713 entry.obj = self.func(obj)
1716 1714
1717 1715 obj._filecache[self.name] = entry
1718 1716
1719 1717 obj.__dict__[self.sname] = entry.obj
1720 1718 return entry.obj
1721 1719
1722 1720 # don't implement __set__(), which would make __dict__ lookup as slow as
1723 1721 # function call.
1724 1722
1725 1723 def set(self, obj, value):
1726 1724 if self.name not in obj._filecache:
1727 1725 # we add an entry for the missing value because X in __dict__
1728 1726 # implies X in _filecache
1729 1727 paths = self.tracked_paths(obj)
1730 1728 ce = filecacheentry(paths, False)
1731 1729 obj._filecache[self.name] = ce
1732 1730 else:
1733 1731 ce = obj._filecache[self.name]
1734 1732
1735 1733 ce.obj = value # update cached copy
1736 1734 obj.__dict__[self.sname] = value # update copy returned by obj.x
1737 1735
1738 1736
1739 1737 def extdatasource(repo, source):
1740 1738 """Gather a map of rev -> value dict from the specified source
1741 1739
1742 1740 A source spec is treated as a URL, with a special case shell: type
1743 1741 for parsing the output from a shell command.
1744 1742
1745 1743 The data is parsed as a series of newline-separated records where
1746 1744 each record is a revision specifier optionally followed by a space
1747 1745 and a freeform string value. If the revision is known locally, it
1748 1746 is converted to a rev, otherwise the record is skipped.
1749 1747
1750 1748 Note that both key and value are treated as UTF-8 and converted to
1751 1749 the local encoding. This allows uniformity between local and
1752 1750 remote data sources.
1753 1751 """
1754 1752
1755 1753 spec = repo.ui.config(b"extdata", source)
1756 1754 if not spec:
1757 1755 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1758 1756
1759 1757 data = {}
1760 1758 src = proc = None
1761 1759 try:
1762 1760 if spec.startswith(b"shell:"):
1763 1761 # external commands should be run relative to the repo root
1764 1762 cmd = spec[6:]
1765 1763 proc = subprocess.Popen(
1766 1764 procutil.tonativestr(cmd),
1767 1765 shell=True,
1768 1766 bufsize=-1,
1769 1767 close_fds=procutil.closefds,
1770 1768 stdout=subprocess.PIPE,
1771 1769 cwd=procutil.tonativestr(repo.root),
1772 1770 )
1773 1771 src = proc.stdout
1774 1772 else:
1775 1773 # treat as a URL or file
1776 1774 src = url.open(repo.ui, spec)
1777 1775 for l in src:
1778 1776 if b" " in l:
1779 1777 k, v = l.strip().split(b" ", 1)
1780 1778 else:
1781 1779 k, v = l.strip(), b""
1782 1780
1783 1781 k = encoding.tolocal(k)
1784 1782 try:
1785 1783 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1786 1784 except (error.LookupError, error.RepoLookupError, error.InputError):
1787 1785 pass # we ignore data for nodes that don't exist locally
1788 1786 finally:
1789 1787 if proc:
1790 1788 try:
1791 1789 proc.communicate()
1792 1790 except ValueError:
1793 1791 # This happens if we started iterating src and then
1794 1792 # get a parse error on a line. It should be safe to ignore.
1795 1793 pass
1796 1794 if src:
1797 1795 src.close()
1798 1796 if proc and proc.returncode != 0:
1799 1797 raise error.Abort(
1800 1798 _(b"extdata command '%s' failed: %s")
1801 1799 % (cmd, procutil.explainexit(proc.returncode))
1802 1800 )
1803 1801
1804 1802 return data
1805 1803
1806 1804
1807 1805 class progress(object):
1808 1806 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1809 1807 self.ui = ui
1810 1808 self.pos = 0
1811 1809 self.topic = topic
1812 1810 self.unit = unit
1813 1811 self.total = total
1814 1812 self.debug = ui.configbool(b'progress', b'debug')
1815 1813 self._updatebar = updatebar
1816 1814
1817 1815 def __enter__(self):
1818 1816 return self
1819 1817
1820 1818 def __exit__(self, exc_type, exc_value, exc_tb):
1821 1819 self.complete()
1822 1820
1823 1821 def update(self, pos, item=b"", total=None):
1824 1822 assert pos is not None
1825 1823 if total:
1826 1824 self.total = total
1827 1825 self.pos = pos
1828 1826 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1829 1827 if self.debug:
1830 1828 self._printdebug(item)
1831 1829
1832 1830 def increment(self, step=1, item=b"", total=None):
1833 1831 self.update(self.pos + step, item, total)
1834 1832
1835 1833 def complete(self):
1836 1834 self.pos = None
1837 1835 self.unit = b""
1838 1836 self.total = None
1839 1837 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1840 1838
1841 1839 def _printdebug(self, item):
1842 1840 unit = b''
1843 1841 if self.unit:
1844 1842 unit = b' ' + self.unit
1845 1843 if item:
1846 1844 item = b' ' + item
1847 1845
1848 1846 if self.total:
1849 1847 pct = 100.0 * self.pos / self.total
1850 1848 self.ui.debug(
1851 1849 b'%s:%s %d/%d%s (%4.2f%%)\n'
1852 1850 % (self.topic, item, self.pos, self.total, unit, pct)
1853 1851 )
1854 1852 else:
1855 1853 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1856 1854
1857 1855
1858 1856 def gdinitconfig(ui):
1859 1857 """helper function to know if a repo should be created as general delta"""
1860 1858 # experimental config: format.generaldelta
1861 1859 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1862 1860 b'format', b'usegeneraldelta'
1863 1861 )
1864 1862
1865 1863
1866 1864 def gddeltaconfig(ui):
1867 1865 """helper function to know if incoming delta should be optimised"""
1868 1866 # experimental config: format.generaldelta
1869 1867 return ui.configbool(b'format', b'generaldelta')
1870 1868
1871 1869
1872 1870 class simplekeyvaluefile(object):
1873 1871 """A simple file with key=value lines
1874 1872
1875 1873 Keys must be alphanumerics and start with a letter, values must not
1876 1874 contain '\n' characters"""
1877 1875
1878 1876 firstlinekey = b'__firstline'
1879 1877
1880 1878 def __init__(self, vfs, path, keys=None):
1881 1879 self.vfs = vfs
1882 1880 self.path = path
1883 1881
1884 1882 def read(self, firstlinenonkeyval=False):
1885 1883 """Read the contents of a simple key-value file
1886 1884
1887 1885 'firstlinenonkeyval' indicates whether the first line of file should
1888 1886 be treated as a key-value pair or reuturned fully under the
1889 1887 __firstline key."""
1890 1888 lines = self.vfs.readlines(self.path)
1891 1889 d = {}
1892 1890 if firstlinenonkeyval:
1893 1891 if not lines:
1894 1892 e = _(b"empty simplekeyvalue file")
1895 1893 raise error.CorruptedState(e)
1896 1894 # we don't want to include '\n' in the __firstline
1897 1895 d[self.firstlinekey] = lines[0][:-1]
1898 1896 del lines[0]
1899 1897
1900 1898 try:
1901 1899 # the 'if line.strip()' part prevents us from failing on empty
1902 1900 # lines which only contain '\n' therefore are not skipped
1903 1901 # by 'if line'
1904 1902 updatedict = dict(
1905 1903 line[:-1].split(b'=', 1) for line in lines if line.strip()
1906 1904 )
1907 1905 if self.firstlinekey in updatedict:
1908 1906 e = _(b"%r can't be used as a key")
1909 1907 raise error.CorruptedState(e % self.firstlinekey)
1910 1908 d.update(updatedict)
1911 1909 except ValueError as e:
1912 1910 raise error.CorruptedState(stringutil.forcebytestr(e))
1913 1911 return d
1914 1912
1915 1913 def write(self, data, firstline=None):
1916 1914 """Write key=>value mapping to a file
1917 1915 data is a dict. Keys must be alphanumerical and start with a letter.
1918 1916 Values must not contain newline characters.
1919 1917
1920 1918 If 'firstline' is not None, it is written to file before
1921 1919 everything else, as it is, not in a key=value form"""
1922 1920 lines = []
1923 1921 if firstline is not None:
1924 1922 lines.append(b'%s\n' % firstline)
1925 1923
1926 1924 for k, v in data.items():
1927 1925 if k == self.firstlinekey:
1928 1926 e = b"key name '%s' is reserved" % self.firstlinekey
1929 1927 raise error.ProgrammingError(e)
1930 1928 if not k[0:1].isalpha():
1931 1929 e = b"keys must start with a letter in a key-value file"
1932 1930 raise error.ProgrammingError(e)
1933 1931 if not k.isalnum():
1934 1932 e = b"invalid key name in a simple key-value file"
1935 1933 raise error.ProgrammingError(e)
1936 1934 if b'\n' in v:
1937 1935 e = b"invalid value in a simple key-value file"
1938 1936 raise error.ProgrammingError(e)
1939 1937 lines.append(b"%s=%s\n" % (k, v))
1940 1938 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1941 1939 fp.write(b''.join(lines))
1942 1940
1943 1941
1944 1942 _reportobsoletedsource = [
1945 1943 b'debugobsolete',
1946 1944 b'pull',
1947 1945 b'push',
1948 1946 b'serve',
1949 1947 b'unbundle',
1950 1948 ]
1951 1949
1952 1950 _reportnewcssource = [
1953 1951 b'pull',
1954 1952 b'unbundle',
1955 1953 ]
1956 1954
1957 1955
1958 1956 def prefetchfiles(repo, revmatches):
1959 1957 """Invokes the registered file prefetch functions, allowing extensions to
1960 1958 ensure the corresponding files are available locally, before the command
1961 1959 uses them.
1962 1960
1963 1961 Args:
1964 1962 revmatches: a list of (revision, match) tuples to indicate the files to
1965 1963 fetch at each revision. If any of the match elements is None, it matches
1966 1964 all files.
1967 1965 """
1968 1966
1969 1967 def _matcher(m):
1970 1968 if m:
1971 1969 assert isinstance(m, matchmod.basematcher)
1972 1970 # The command itself will complain about files that don't exist, so
1973 1971 # don't duplicate the message.
1974 1972 return matchmod.badmatch(m, lambda fn, msg: None)
1975 1973 else:
1976 1974 return matchall(repo)
1977 1975
1978 1976 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1979 1977
1980 1978 fileprefetchhooks(repo, revbadmatches)
1981 1979
1982 1980
1983 1981 # a list of (repo, revs, match) prefetch functions
1984 1982 fileprefetchhooks = util.hooks()
1985 1983
1986 1984 # A marker that tells the evolve extension to suppress its own reporting
1987 1985 _reportstroubledchangesets = True
1988 1986
1989 1987
1990 1988 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1991 1989 """register a callback to issue a summary after the transaction is closed
1992 1990
1993 1991 If as_validator is true, then the callbacks are registered as transaction
1994 1992 validators instead
1995 1993 """
1996 1994
1997 1995 def txmatch(sources):
1998 1996 return any(txnname.startswith(source) for source in sources)
1999 1997
2000 1998 categories = []
2001 1999
2002 2000 def reportsummary(func):
2003 2001 """decorator for report callbacks."""
2004 2002 # The repoview life cycle is shorter than the one of the actual
2005 2003 # underlying repository. So the filtered object can die before the
2006 2004 # weakref is used leading to troubles. We keep a reference to the
2007 2005 # unfiltered object and restore the filtering when retrieving the
2008 2006 # repository through the weakref.
2009 2007 filtername = repo.filtername
2010 2008 reporef = weakref.ref(repo.unfiltered())
2011 2009
2012 2010 def wrapped(tr):
2013 2011 repo = reporef()
2014 2012 if filtername:
2015 2013 assert repo is not None # help pytype
2016 2014 repo = repo.filtered(filtername)
2017 2015 func(repo, tr)
2018 2016
2019 2017 newcat = b'%02i-txnreport' % len(categories)
2020 2018 if as_validator:
2021 2019 otr.addvalidator(newcat, wrapped)
2022 2020 else:
2023 2021 otr.addpostclose(newcat, wrapped)
2024 2022 categories.append(newcat)
2025 2023 return wrapped
2026 2024
2027 2025 @reportsummary
2028 2026 def reportchangegroup(repo, tr):
2029 2027 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2030 2028 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2031 2029 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2032 2030 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2033 2031 if cgchangesets or cgrevisions or cgfiles:
2034 2032 htext = b""
2035 2033 if cgheads:
2036 2034 htext = _(b" (%+d heads)") % cgheads
2037 2035 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2038 2036 if as_validator:
2039 2037 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2040 2038 assert repo is not None # help pytype
2041 2039 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2042 2040
2043 2041 if txmatch(_reportobsoletedsource):
2044 2042
2045 2043 @reportsummary
2046 2044 def reportobsoleted(repo, tr):
2047 2045 obsoleted = obsutil.getobsoleted(repo, tr)
2048 2046 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2049 2047 if newmarkers:
2050 2048 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2051 2049 if obsoleted:
2052 2050 msg = _(b'obsoleted %i changesets\n')
2053 2051 if as_validator:
2054 2052 msg = _(b'obsoleting %i changesets\n')
2055 2053 repo.ui.status(msg % len(obsoleted))
2056 2054
2057 2055 if obsolete.isenabled(
2058 2056 repo, obsolete.createmarkersopt
2059 2057 ) and repo.ui.configbool(
2060 2058 b'experimental', b'evolution.report-instabilities'
2061 2059 ):
2062 2060 instabilitytypes = [
2063 2061 (b'orphan', b'orphan'),
2064 2062 (b'phase-divergent', b'phasedivergent'),
2065 2063 (b'content-divergent', b'contentdivergent'),
2066 2064 ]
2067 2065
2068 2066 def getinstabilitycounts(repo):
2069 2067 filtered = repo.changelog.filteredrevs
2070 2068 counts = {}
2071 2069 for instability, revset in instabilitytypes:
2072 2070 counts[instability] = len(
2073 2071 set(obsolete.getrevs(repo, revset)) - filtered
2074 2072 )
2075 2073 return counts
2076 2074
2077 2075 oldinstabilitycounts = getinstabilitycounts(repo)
2078 2076
2079 2077 @reportsummary
2080 2078 def reportnewinstabilities(repo, tr):
2081 2079 newinstabilitycounts = getinstabilitycounts(repo)
2082 2080 for instability, revset in instabilitytypes:
2083 2081 delta = (
2084 2082 newinstabilitycounts[instability]
2085 2083 - oldinstabilitycounts[instability]
2086 2084 )
2087 2085 msg = getinstabilitymessage(delta, instability)
2088 2086 if msg:
2089 2087 repo.ui.warn(msg)
2090 2088
2091 2089 if txmatch(_reportnewcssource):
2092 2090
2093 2091 @reportsummary
2094 2092 def reportnewcs(repo, tr):
2095 2093 """Report the range of new revisions pulled/unbundled."""
2096 2094 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2097 2095 unfi = repo.unfiltered()
2098 2096 if origrepolen >= len(unfi):
2099 2097 return
2100 2098
2101 2099 # Compute the bounds of new visible revisions' range.
2102 2100 revs = smartset.spanset(repo, start=origrepolen)
2103 2101 if revs:
2104 2102 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2105 2103
2106 2104 if minrev == maxrev:
2107 2105 revrange = minrev
2108 2106 else:
2109 2107 revrange = b'%s:%s' % (minrev, maxrev)
2110 2108 draft = len(repo.revs(b'%ld and draft()', revs))
2111 2109 secret = len(repo.revs(b'%ld and secret()', revs))
2112 2110 if not (draft or secret):
2113 2111 msg = _(b'new changesets %s\n') % revrange
2114 2112 elif draft and secret:
2115 2113 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2116 2114 msg %= (revrange, draft, secret)
2117 2115 elif draft:
2118 2116 msg = _(b'new changesets %s (%d drafts)\n')
2119 2117 msg %= (revrange, draft)
2120 2118 elif secret:
2121 2119 msg = _(b'new changesets %s (%d secrets)\n')
2122 2120 msg %= (revrange, secret)
2123 2121 else:
2124 2122 errormsg = b'entered unreachable condition'
2125 2123 raise error.ProgrammingError(errormsg)
2126 2124 repo.ui.status(msg)
2127 2125
2128 2126 # search new changesets directly pulled as obsolete
2129 2127 duplicates = tr.changes.get(b'revduplicates', ())
2130 2128 obsadded = unfi.revs(
2131 2129 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2132 2130 )
2133 2131 cl = repo.changelog
2134 2132 extinctadded = [r for r in obsadded if r not in cl]
2135 2133 if extinctadded:
2136 2134 # They are not just obsolete, but obsolete and invisible
2137 2135 # we call them "extinct" internally but the terms have not been
2138 2136 # exposed to users.
2139 2137 msg = b'(%d other changesets obsolete on arrival)\n'
2140 2138 repo.ui.status(msg % len(extinctadded))
2141 2139
2142 2140 @reportsummary
2143 2141 def reportphasechanges(repo, tr):
2144 2142 """Report statistics of phase changes for changesets pre-existing
2145 2143 pull/unbundle.
2146 2144 """
2147 2145 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2148 2146 published = []
2149 2147 for revs, (old, new) in tr.changes.get(b'phases', []):
2150 2148 if new != phases.public:
2151 2149 continue
2152 2150 published.extend(rev for rev in revs if rev < origrepolen)
2153 2151 if not published:
2154 2152 return
2155 2153 msg = _(b'%d local changesets published\n')
2156 2154 if as_validator:
2157 2155 msg = _(b'%d local changesets will be published\n')
2158 2156 repo.ui.status(msg % len(published))
2159 2157
2160 2158
2161 2159 def getinstabilitymessage(delta, instability):
2162 2160 """function to return the message to show warning about new instabilities
2163 2161
2164 2162 exists as a separate function so that extension can wrap to show more
2165 2163 information like how to fix instabilities"""
2166 2164 if delta > 0:
2167 2165 return _(b'%i new %s changesets\n') % (delta, instability)
2168 2166
2169 2167
2170 2168 def nodesummaries(repo, nodes, maxnumnodes=4):
2171 2169 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2172 2170 return b' '.join(short(h) for h in nodes)
2173 2171 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2174 2172 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2175 2173
2176 2174
2177 2175 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2178 2176 """check that no named branch has multiple heads"""
2179 2177 if desc in (b'strip', b'repair'):
2180 2178 # skip the logic during strip
2181 2179 return
2182 2180 visible = repo.filtered(filtername)
2183 2181 # possible improvement: we could restrict the check to affected branch
2184 2182 bm = visible.branchmap()
2185 2183 for name in bm:
2186 2184 heads = bm.branchheads(name, closed=accountclosed)
2187 2185 if len(heads) > 1:
2188 2186 msg = _(b'rejecting multiple heads on branch "%s"')
2189 2187 msg %= name
2190 2188 hint = _(b'%d heads: %s')
2191 2189 hint %= (len(heads), nodesummaries(repo, heads))
2192 2190 raise error.Abort(msg, hint=hint)
2193 2191
2194 2192
2195 2193 def wrapconvertsink(sink):
2196 2194 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2197 2195 before it is used, whether or not the convert extension was formally loaded.
2198 2196 """
2199 2197 return sink
2200 2198
2201 2199
2202 2200 def unhidehashlikerevs(repo, specs, hiddentype):
2203 2201 """parse the user specs and unhide changesets whose hash or revision number
2204 2202 is passed.
2205 2203
2206 2204 hiddentype can be: 1) 'warn': warn while unhiding changesets
2207 2205 2) 'nowarn': don't warn while unhiding changesets
2208 2206
2209 2207 returns a repo object with the required changesets unhidden
2210 2208 """
2211 2209 if not specs:
2212 2210 return repo
2213 2211
2214 2212 if not repo.filtername or not repo.ui.configbool(
2215 2213 b'experimental', b'directaccess'
2216 2214 ):
2217 2215 return repo
2218 2216
2219 2217 if repo.filtername not in (b'visible', b'visible-hidden'):
2220 2218 return repo
2221 2219
2222 2220 symbols = set()
2223 2221 for spec in specs:
2224 2222 try:
2225 2223 tree = revsetlang.parse(spec)
2226 2224 except error.ParseError: # will be reported by scmutil.revrange()
2227 2225 continue
2228 2226
2229 2227 symbols.update(revsetlang.gethashlikesymbols(tree))
2230 2228
2231 2229 if not symbols:
2232 2230 return repo
2233 2231
2234 2232 revs = _getrevsfromsymbols(repo, symbols)
2235 2233
2236 2234 if not revs:
2237 2235 return repo
2238 2236
2239 2237 if hiddentype == b'warn':
2240 2238 unfi = repo.unfiltered()
2241 2239 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2242 2240 repo.ui.warn(
2243 2241 _(
2244 2242 b"warning: accessing hidden changesets for write "
2245 2243 b"operation: %s\n"
2246 2244 )
2247 2245 % revstr
2248 2246 )
2249 2247
2250 2248 # we have to use new filtername to separate branch/tags cache until we can
2251 2249 # disbale these cache when revisions are dynamically pinned.
2252 2250 return repo.filtered(b'visible-hidden', revs)
2253 2251
2254 2252
2255 2253 def _getrevsfromsymbols(repo, symbols):
2256 2254 """parse the list of symbols and returns a set of revision numbers of hidden
2257 2255 changesets present in symbols"""
2258 2256 revs = set()
2259 2257 unfi = repo.unfiltered()
2260 2258 unficl = unfi.changelog
2261 2259 cl = repo.changelog
2262 2260 tiprev = len(unficl)
2263 2261 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2264 2262 for s in symbols:
2265 2263 try:
2266 2264 n = int(s)
2267 2265 if n <= tiprev:
2268 2266 if not allowrevnums:
2269 2267 continue
2270 2268 else:
2271 2269 if n not in cl:
2272 2270 revs.add(n)
2273 2271 continue
2274 2272 except ValueError:
2275 2273 pass
2276 2274
2277 2275 try:
2278 2276 s = resolvehexnodeidprefix(unfi, s)
2279 2277 except (error.LookupError, error.WdirUnsupported):
2280 2278 s = None
2281 2279
2282 2280 if s is not None:
2283 2281 rev = unficl.rev(s)
2284 2282 if rev not in cl:
2285 2283 revs.add(rev)
2286 2284
2287 2285 return revs
2288 2286
2289 2287
2290 2288 def bookmarkrevs(repo, mark):
2291 2289 """Select revisions reachable by a given bookmark
2292 2290
2293 2291 If the bookmarked revision isn't a head, an empty set will be returned.
2294 2292 """
2295 2293 return repo.revs(format_bookmark_revspec(mark))
2296 2294
2297 2295
2298 2296 def format_bookmark_revspec(mark):
2299 2297 """Build a revset expression to select revisions reachable by a given
2300 2298 bookmark"""
2301 2299 mark = b'literal:' + mark
2302 2300 return revsetlang.formatspec(
2303 2301 b"ancestors(bookmark(%s)) - "
2304 2302 b"ancestors(head() and not bookmark(%s)) - "
2305 2303 b"ancestors(bookmark() and not bookmark(%s))",
2306 2304 mark,
2307 2305 mark,
2308 2306 mark,
2309 2307 )
General Comments 0
You need to be logged in to leave comments. Login now