##// END OF EJS Templates
scmutil: introduce filterrequirements() to split reqs into wc and store ones...
Pulkit Goyal -
r46054:9a99ab82 default
parent child Browse files
Show More
@@ -1,2230 +1,2254 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28 from .pycompat import getattr
29 29 from .thirdparty import attr
30 30 from . import (
31 31 copies as copiesmod,
32 32 encoding,
33 33 error,
34 34 match as matchmod,
35 35 obsolete,
36 36 obsutil,
37 37 pathutil,
38 38 phases,
39 39 policy,
40 40 pycompat,
41 requirements as requirementsmod,
41 42 revsetlang,
42 43 similar,
43 44 smartset,
44 45 url,
45 46 util,
46 47 vfs,
47 48 )
48 49
49 50 from .utils import (
50 51 hashutil,
51 52 procutil,
52 53 stringutil,
53 54 )
54 55
55 56 if pycompat.iswindows:
56 57 from . import scmwindows as scmplatform
57 58 else:
58 59 from . import scmposix as scmplatform
59 60
60 61 parsers = policy.importmod('parsers')
61 62 rustrevlog = policy.importrust('revlog')
62 63
63 64 termsize = scmplatform.termsize
64 65
65 66
66 67 @attr.s(slots=True, repr=False)
67 68 class status(object):
68 69 '''Struct with a list of files per status.
69 70
70 71 The 'deleted', 'unknown' and 'ignored' properties are only
71 72 relevant to the working copy.
72 73 '''
73 74
74 75 modified = attr.ib(default=attr.Factory(list))
75 76 added = attr.ib(default=attr.Factory(list))
76 77 removed = attr.ib(default=attr.Factory(list))
77 78 deleted = attr.ib(default=attr.Factory(list))
78 79 unknown = attr.ib(default=attr.Factory(list))
79 80 ignored = attr.ib(default=attr.Factory(list))
80 81 clean = attr.ib(default=attr.Factory(list))
81 82
82 83 def __iter__(self):
83 84 yield self.modified
84 85 yield self.added
85 86 yield self.removed
86 87 yield self.deleted
87 88 yield self.unknown
88 89 yield self.ignored
89 90 yield self.clean
90 91
91 92 def __repr__(self):
92 93 return (
93 94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
94 95 r'unknown=%s, ignored=%s, clean=%s>'
95 96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
96 97
97 98
98 99 def itersubrepos(ctx1, ctx2):
99 100 """find subrepos in ctx1 or ctx2"""
100 101 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 103 # has been modified (in ctx2) but not yet committed (in ctx1).
103 104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105 106
106 107 missing = set()
107 108
108 109 for subpath in ctx2.substate:
109 110 if subpath not in ctx1.substate:
110 111 del subpaths[subpath]
111 112 missing.add(subpath)
112 113
113 114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
114 115 yield subpath, ctx.sub(subpath)
115 116
116 117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 118 # status and diff will have an accurate result when it does
118 119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 120 # against itself.
120 121 for subpath in missing:
121 122 yield subpath, ctx2.nullsub(subpath, ctx1)
122 123
123 124
124 125 def nochangesfound(ui, repo, excluded=None):
125 126 '''Report no changes for push/pull, excluded is None or a list of
126 127 nodes excluded from the push/pull.
127 128 '''
128 129 secretlist = []
129 130 if excluded:
130 131 for n in excluded:
131 132 ctx = repo[n]
132 133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 134 secretlist.append(n)
134 135
135 136 if secretlist:
136 137 ui.status(
137 138 _(b"no changes found (ignored %d secret changesets)\n")
138 139 % len(secretlist)
139 140 )
140 141 else:
141 142 ui.status(_(b"no changes found\n"))
142 143
143 144
144 145 def callcatch(ui, func):
145 146 """call func() with global exception handling
146 147
147 148 return func() if no exception happens. otherwise do some error handling
148 149 and return an exit code accordingly. does not handle all exceptions.
149 150 """
150 151 try:
151 152 try:
152 153 return func()
153 154 except: # re-raises
154 155 ui.traceback()
155 156 raise
156 157 # Global exception handling, alphabetically
157 158 # Mercurial-specific first, followed by built-in and library exceptions
158 159 except error.LockHeld as inst:
159 160 if inst.errno == errno.ETIMEDOUT:
160 161 reason = _(b'timed out waiting for lock held by %r') % (
161 162 pycompat.bytestr(inst.locker)
162 163 )
163 164 else:
164 165 reason = _(b'lock held by %r') % inst.locker
165 166 ui.error(
166 167 _(b"abort: %s: %s\n")
167 168 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
168 169 )
169 170 if not inst.locker:
170 171 ui.error(_(b"(lock might be very busy)\n"))
171 172 except error.LockUnavailable as inst:
172 173 ui.error(
173 174 _(b"abort: could not lock %s: %s\n")
174 175 % (
175 176 inst.desc or stringutil.forcebytestr(inst.filename),
176 177 encoding.strtolocal(inst.strerror),
177 178 )
178 179 )
179 180 except error.OutOfBandError as inst:
180 181 if inst.args:
181 182 msg = _(b"abort: remote error:\n")
182 183 else:
183 184 msg = _(b"abort: remote error\n")
184 185 ui.error(msg)
185 186 if inst.args:
186 187 ui.error(b''.join(inst.args))
187 188 if inst.hint:
188 189 ui.error(b'(%s)\n' % inst.hint)
189 190 except error.RepoError as inst:
190 191 ui.error(_(b"abort: %s!\n") % inst)
191 192 if inst.hint:
192 193 ui.error(_(b"(%s)\n") % inst.hint)
193 194 except error.ResponseError as inst:
194 195 ui.error(_(b"abort: %s") % inst.args[0])
195 196 msg = inst.args[1]
196 197 if isinstance(msg, type(u'')):
197 198 msg = pycompat.sysbytes(msg)
198 199 if not isinstance(msg, bytes):
199 200 ui.error(b" %r\n" % (msg,))
200 201 elif not msg:
201 202 ui.error(_(b" empty string\n"))
202 203 else:
203 204 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
204 205 except error.CensoredNodeError as inst:
205 206 ui.error(_(b"abort: file censored %s!\n") % inst)
206 207 except error.StorageError as inst:
207 208 ui.error(_(b"abort: %s!\n") % inst)
208 209 if inst.hint:
209 210 ui.error(_(b"(%s)\n") % inst.hint)
210 211 except error.InterventionRequired as inst:
211 212 ui.error(b"%s\n" % inst)
212 213 if inst.hint:
213 214 ui.error(_(b"(%s)\n") % inst.hint)
214 215 return 1
215 216 except error.WdirUnsupported:
216 217 ui.error(_(b"abort: working directory revision cannot be specified\n"))
217 218 except error.Abort as inst:
218 219 ui.error(_(b"abort: %s\n") % inst)
219 220 if inst.hint:
220 221 ui.error(_(b"(%s)\n") % inst.hint)
221 222 except ImportError as inst:
222 223 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
223 224 m = stringutil.forcebytestr(inst).split()[-1]
224 225 if m in b"mpatch bdiff".split():
225 226 ui.error(_(b"(did you forget to compile extensions?)\n"))
226 227 elif m in b"zlib".split():
227 228 ui.error(_(b"(is your Python install correct?)\n"))
228 229 except (IOError, OSError) as inst:
229 230 if util.safehasattr(inst, b"code"): # HTTPError
230 231 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
231 232 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
232 233 try: # usually it is in the form (errno, strerror)
233 234 reason = inst.reason.args[1]
234 235 except (AttributeError, IndexError):
235 236 # it might be anything, for example a string
236 237 reason = inst.reason
237 238 if isinstance(reason, pycompat.unicode):
238 239 # SSLError of Python 2.7.9 contains a unicode
239 240 reason = encoding.unitolocal(reason)
240 241 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
241 242 elif (
242 243 util.safehasattr(inst, b"args")
243 244 and inst.args
244 245 and inst.args[0] == errno.EPIPE
245 246 ):
246 247 pass
247 248 elif getattr(inst, "strerror", None): # common IOError or OSError
248 249 if getattr(inst, "filename", None) is not None:
249 250 ui.error(
250 251 _(b"abort: %s: '%s'\n")
251 252 % (
252 253 encoding.strtolocal(inst.strerror),
253 254 stringutil.forcebytestr(inst.filename),
254 255 )
255 256 )
256 257 else:
257 258 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
258 259 else: # suspicious IOError
259 260 raise
260 261 except MemoryError:
261 262 ui.error(_(b"abort: out of memory\n"))
262 263 except SystemExit as inst:
263 264 # Commands shouldn't sys.exit directly, but give a return code.
264 265 # Just in case catch this and and pass exit code to caller.
265 266 return inst.code
266 267
267 268 return -1
268 269
269 270
270 271 def checknewlabel(repo, lbl, kind):
271 272 # Do not use the "kind" parameter in ui output.
272 273 # It makes strings difficult to translate.
273 274 if lbl in [b'tip', b'.', b'null']:
274 275 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
275 276 for c in (b':', b'\0', b'\n', b'\r'):
276 277 if c in lbl:
277 278 raise error.Abort(
278 279 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
279 280 )
280 281 try:
281 282 int(lbl)
282 283 raise error.Abort(_(b"cannot use an integer as a name"))
283 284 except ValueError:
284 285 pass
285 286 if lbl.strip() != lbl:
286 287 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
287 288
288 289
289 290 def checkfilename(f):
290 291 '''Check that the filename f is an acceptable filename for a tracked file'''
291 292 if b'\r' in f or b'\n' in f:
292 293 raise error.Abort(
293 294 _(b"'\\n' and '\\r' disallowed in filenames: %r")
294 295 % pycompat.bytestr(f)
295 296 )
296 297
297 298
298 299 def checkportable(ui, f):
299 300 '''Check if filename f is portable and warn or abort depending on config'''
300 301 checkfilename(f)
301 302 abort, warn = checkportabilityalert(ui)
302 303 if abort or warn:
303 304 msg = util.checkwinfilename(f)
304 305 if msg:
305 306 msg = b"%s: %s" % (msg, procutil.shellquote(f))
306 307 if abort:
307 308 raise error.Abort(msg)
308 309 ui.warn(_(b"warning: %s\n") % msg)
309 310
310 311
311 312 def checkportabilityalert(ui):
312 313 '''check if the user's config requests nothing, a warning, or abort for
313 314 non-portable filenames'''
314 315 val = ui.config(b'ui', b'portablefilenames')
315 316 lval = val.lower()
316 317 bval = stringutil.parsebool(val)
317 318 abort = pycompat.iswindows or lval == b'abort'
318 319 warn = bval or lval == b'warn'
319 320 if bval is None and not (warn or abort or lval == b'ignore'):
320 321 raise error.ConfigError(
321 322 _(b"ui.portablefilenames value is invalid ('%s')") % val
322 323 )
323 324 return abort, warn
324 325
325 326
326 327 class casecollisionauditor(object):
327 328 def __init__(self, ui, abort, dirstate):
328 329 self._ui = ui
329 330 self._abort = abort
330 331 allfiles = b'\0'.join(dirstate)
331 332 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
332 333 self._dirstate = dirstate
333 334 # The purpose of _newfiles is so that we don't complain about
334 335 # case collisions if someone were to call this object with the
335 336 # same filename twice.
336 337 self._newfiles = set()
337 338
338 339 def __call__(self, f):
339 340 if f in self._newfiles:
340 341 return
341 342 fl = encoding.lower(f)
342 343 if fl in self._loweredfiles and f not in self._dirstate:
343 344 msg = _(b'possible case-folding collision for %s') % f
344 345 if self._abort:
345 346 raise error.Abort(msg)
346 347 self._ui.warn(_(b"warning: %s\n") % msg)
347 348 self._loweredfiles.add(fl)
348 349 self._newfiles.add(f)
349 350
350 351
351 352 def filteredhash(repo, maxrev):
352 353 """build hash of filtered revisions in the current repoview.
353 354
354 355 Multiple caches perform up-to-date validation by checking that the
355 356 tiprev and tipnode stored in the cache file match the current repository.
356 357 However, this is not sufficient for validating repoviews because the set
357 358 of revisions in the view may change without the repository tiprev and
358 359 tipnode changing.
359 360
360 361 This function hashes all the revs filtered from the view and returns
361 362 that SHA-1 digest.
362 363 """
363 364 cl = repo.changelog
364 365 if not cl.filteredrevs:
365 366 return None
366 367 key = None
367 368 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
368 369 if revs:
369 370 s = hashutil.sha1()
370 371 for rev in revs:
371 372 s.update(b'%d;' % rev)
372 373 key = s.digest()
373 374 return key
374 375
375 376
376 377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
377 378 '''yield every hg repository under path, always recursively.
378 379 The recurse flag will only control recursion into repo working dirs'''
379 380
380 381 def errhandler(err):
381 382 if err.filename == path:
382 383 raise err
383 384
384 385 samestat = getattr(os.path, 'samestat', None)
385 386 if followsym and samestat is not None:
386 387
387 388 def adddir(dirlst, dirname):
388 389 dirstat = os.stat(dirname)
389 390 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
390 391 if not match:
391 392 dirlst.append(dirstat)
392 393 return not match
393 394
394 395 else:
395 396 followsym = False
396 397
397 398 if (seen_dirs is None) and followsym:
398 399 seen_dirs = []
399 400 adddir(seen_dirs, path)
400 401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
401 402 dirs.sort()
402 403 if b'.hg' in dirs:
403 404 yield root # found a repository
404 405 qroot = os.path.join(root, b'.hg', b'patches')
405 406 if os.path.isdir(os.path.join(qroot, b'.hg')):
406 407 yield qroot # we have a patch queue repo here
407 408 if recurse:
408 409 # avoid recursing inside the .hg directory
409 410 dirs.remove(b'.hg')
410 411 else:
411 412 dirs[:] = [] # don't descend further
412 413 elif followsym:
413 414 newdirs = []
414 415 for d in dirs:
415 416 fname = os.path.join(root, d)
416 417 if adddir(seen_dirs, fname):
417 418 if os.path.islink(fname):
418 419 for hgname in walkrepos(fname, True, seen_dirs):
419 420 yield hgname
420 421 else:
421 422 newdirs.append(d)
422 423 dirs[:] = newdirs
423 424
424 425
425 426 def binnode(ctx):
426 427 """Return binary node id for a given basectx"""
427 428 node = ctx.node()
428 429 if node is None:
429 430 return wdirid
430 431 return node
431 432
432 433
433 434 def intrev(ctx):
434 435 """Return integer for a given basectx that can be used in comparison or
435 436 arithmetic operation"""
436 437 rev = ctx.rev()
437 438 if rev is None:
438 439 return wdirrev
439 440 return rev
440 441
441 442
442 443 def formatchangeid(ctx):
443 444 """Format changectx as '{rev}:{node|formatnode}', which is the default
444 445 template provided by logcmdutil.changesettemplater"""
445 446 repo = ctx.repo()
446 447 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
447 448
448 449
449 450 def formatrevnode(ui, rev, node):
450 451 """Format given revision and node depending on the current verbosity"""
451 452 if ui.debugflag:
452 453 hexfunc = hex
453 454 else:
454 455 hexfunc = short
455 456 return b'%d:%s' % (rev, hexfunc(node))
456 457
457 458
458 459 def resolvehexnodeidprefix(repo, prefix):
459 460 if prefix.startswith(b'x'):
460 461 prefix = prefix[1:]
461 462 try:
462 463 # Uses unfiltered repo because it's faster when prefix is ambiguous/
463 464 # This matches the shortesthexnodeidprefix() function below.
464 465 node = repo.unfiltered().changelog._partialmatch(prefix)
465 466 except error.AmbiguousPrefixLookupError:
466 467 revset = repo.ui.config(
467 468 b'experimental', b'revisions.disambiguatewithin'
468 469 )
469 470 if revset:
470 471 # Clear config to avoid infinite recursion
471 472 configoverrides = {
472 473 (b'experimental', b'revisions.disambiguatewithin'): None
473 474 }
474 475 with repo.ui.configoverride(configoverrides):
475 476 revs = repo.anyrevs([revset], user=True)
476 477 matches = []
477 478 for rev in revs:
478 479 node = repo.changelog.node(rev)
479 480 if hex(node).startswith(prefix):
480 481 matches.append(node)
481 482 if len(matches) == 1:
482 483 return matches[0]
483 484 raise
484 485 if node is None:
485 486 return
486 487 repo.changelog.rev(node) # make sure node isn't filtered
487 488 return node
488 489
489 490
490 491 def mayberevnum(repo, prefix):
491 492 """Checks if the given prefix may be mistaken for a revision number"""
492 493 try:
493 494 i = int(prefix)
494 495 # if we are a pure int, then starting with zero will not be
495 496 # confused as a rev; or, obviously, if the int is larger
496 497 # than the value of the tip rev. We still need to disambiguate if
497 498 # prefix == '0', since that *is* a valid revnum.
498 499 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
499 500 return False
500 501 return True
501 502 except ValueError:
502 503 return False
503 504
504 505
505 506 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
506 507 """Find the shortest unambiguous prefix that matches hexnode.
507 508
508 509 If "cache" is not None, it must be a dictionary that can be used for
509 510 caching between calls to this method.
510 511 """
511 512 # _partialmatch() of filtered changelog could take O(len(repo)) time,
512 513 # which would be unacceptably slow. so we look for hash collision in
513 514 # unfiltered space, which means some hashes may be slightly longer.
514 515
515 516 minlength = max(minlength, 1)
516 517
517 518 def disambiguate(prefix):
518 519 """Disambiguate against revnums."""
519 520 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
520 521 if mayberevnum(repo, prefix):
521 522 return b'x' + prefix
522 523 else:
523 524 return prefix
524 525
525 526 hexnode = hex(node)
526 527 for length in range(len(prefix), len(hexnode) + 1):
527 528 prefix = hexnode[:length]
528 529 if not mayberevnum(repo, prefix):
529 530 return prefix
530 531
531 532 cl = repo.unfiltered().changelog
532 533 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
533 534 if revset:
534 535 revs = None
535 536 if cache is not None:
536 537 revs = cache.get(b'disambiguationrevset')
537 538 if revs is None:
538 539 revs = repo.anyrevs([revset], user=True)
539 540 if cache is not None:
540 541 cache[b'disambiguationrevset'] = revs
541 542 if cl.rev(node) in revs:
542 543 hexnode = hex(node)
543 544 nodetree = None
544 545 if cache is not None:
545 546 nodetree = cache.get(b'disambiguationnodetree')
546 547 if not nodetree:
547 548 if util.safehasattr(parsers, 'nodetree'):
548 549 # The CExt is the only implementation to provide a nodetree
549 550 # class so far.
550 551 index = cl.index
551 552 if util.safehasattr(index, 'get_cindex'):
552 553 # the rust wrapped need to give access to its internal index
553 554 index = index.get_cindex()
554 555 nodetree = parsers.nodetree(index, len(revs))
555 556 for r in revs:
556 557 nodetree.insert(r)
557 558 if cache is not None:
558 559 cache[b'disambiguationnodetree'] = nodetree
559 560 if nodetree is not None:
560 561 length = max(nodetree.shortest(node), minlength)
561 562 prefix = hexnode[:length]
562 563 return disambiguate(prefix)
563 564 for length in range(minlength, len(hexnode) + 1):
564 565 matches = []
565 566 prefix = hexnode[:length]
566 567 for rev in revs:
567 568 otherhexnode = repo[rev].hex()
568 569 if prefix == otherhexnode[:length]:
569 570 matches.append(otherhexnode)
570 571 if len(matches) == 1:
571 572 return disambiguate(prefix)
572 573
573 574 try:
574 575 return disambiguate(cl.shortest(node, minlength))
575 576 except error.LookupError:
576 577 raise error.RepoLookupError()
577 578
578 579
579 580 def isrevsymbol(repo, symbol):
580 581 """Checks if a symbol exists in the repo.
581 582
582 583 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
583 584 symbol is an ambiguous nodeid prefix.
584 585 """
585 586 try:
586 587 revsymbol(repo, symbol)
587 588 return True
588 589 except error.RepoLookupError:
589 590 return False
590 591
591 592
592 593 def revsymbol(repo, symbol):
593 594 """Returns a context given a single revision symbol (as string).
594 595
595 596 This is similar to revsingle(), but accepts only a single revision symbol,
596 597 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
597 598 not "max(public())".
598 599 """
599 600 if not isinstance(symbol, bytes):
600 601 msg = (
601 602 b"symbol (%s of type %s) was not a string, did you mean "
602 603 b"repo[symbol]?" % (symbol, type(symbol))
603 604 )
604 605 raise error.ProgrammingError(msg)
605 606 try:
606 607 if symbol in (b'.', b'tip', b'null'):
607 608 return repo[symbol]
608 609
609 610 try:
610 611 r = int(symbol)
611 612 if b'%d' % r != symbol:
612 613 raise ValueError
613 614 l = len(repo.changelog)
614 615 if r < 0:
615 616 r += l
616 617 if r < 0 or r >= l and r != wdirrev:
617 618 raise ValueError
618 619 return repo[r]
619 620 except error.FilteredIndexError:
620 621 raise
621 622 except (ValueError, OverflowError, IndexError):
622 623 pass
623 624
624 625 if len(symbol) == 40:
625 626 try:
626 627 node = bin(symbol)
627 628 rev = repo.changelog.rev(node)
628 629 return repo[rev]
629 630 except error.FilteredLookupError:
630 631 raise
631 632 except (TypeError, LookupError):
632 633 pass
633 634
634 635 # look up bookmarks through the name interface
635 636 try:
636 637 node = repo.names.singlenode(repo, symbol)
637 638 rev = repo.changelog.rev(node)
638 639 return repo[rev]
639 640 except KeyError:
640 641 pass
641 642
642 643 node = resolvehexnodeidprefix(repo, symbol)
643 644 if node is not None:
644 645 rev = repo.changelog.rev(node)
645 646 return repo[rev]
646 647
647 648 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
648 649
649 650 except error.WdirUnsupported:
650 651 return repo[None]
651 652 except (
652 653 error.FilteredIndexError,
653 654 error.FilteredLookupError,
654 655 error.FilteredRepoLookupError,
655 656 ):
656 657 raise _filterederror(repo, symbol)
657 658
658 659
659 660 def _filterederror(repo, changeid):
660 661 """build an exception to be raised about a filtered changeid
661 662
662 663 This is extracted in a function to help extensions (eg: evolve) to
663 664 experiment with various message variants."""
664 665 if repo.filtername.startswith(b'visible'):
665 666
666 667 # Check if the changeset is obsolete
667 668 unfilteredrepo = repo.unfiltered()
668 669 ctx = revsymbol(unfilteredrepo, changeid)
669 670
670 671 # If the changeset is obsolete, enrich the message with the reason
671 672 # that made this changeset not visible
672 673 if ctx.obsolete():
673 674 msg = obsutil._getfilteredreason(repo, changeid, ctx)
674 675 else:
675 676 msg = _(b"hidden revision '%s'") % changeid
676 677
677 678 hint = _(b'use --hidden to access hidden revisions')
678 679
679 680 return error.FilteredRepoLookupError(msg, hint=hint)
680 681 msg = _(b"filtered revision '%s' (not in '%s' subset)")
681 682 msg %= (changeid, repo.filtername)
682 683 return error.FilteredRepoLookupError(msg)
683 684
684 685
685 686 def revsingle(repo, revspec, default=b'.', localalias=None):
686 687 if not revspec and revspec != 0:
687 688 return repo[default]
688 689
689 690 l = revrange(repo, [revspec], localalias=localalias)
690 691 if not l:
691 692 raise error.Abort(_(b'empty revision set'))
692 693 return repo[l.last()]
693 694
694 695
695 696 def _pairspec(revspec):
696 697 tree = revsetlang.parse(revspec)
697 698 return tree and tree[0] in (
698 699 b'range',
699 700 b'rangepre',
700 701 b'rangepost',
701 702 b'rangeall',
702 703 )
703 704
704 705
705 706 def revpair(repo, revs):
706 707 if not revs:
707 708 return repo[b'.'], repo[None]
708 709
709 710 l = revrange(repo, revs)
710 711
711 712 if not l:
712 713 raise error.Abort(_(b'empty revision range'))
713 714
714 715 first = l.first()
715 716 second = l.last()
716 717
717 718 if (
718 719 first == second
719 720 and len(revs) >= 2
720 721 and not all(revrange(repo, [r]) for r in revs)
721 722 ):
722 723 raise error.Abort(_(b'empty revision on one side of range'))
723 724
724 725 # if top-level is range expression, the result must always be a pair
725 726 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
726 727 return repo[first], repo[None]
727 728
728 729 return repo[first], repo[second]
729 730
730 731
731 732 def revrange(repo, specs, localalias=None):
732 733 """Execute 1 to many revsets and return the union.
733 734
734 735 This is the preferred mechanism for executing revsets using user-specified
735 736 config options, such as revset aliases.
736 737
737 738 The revsets specified by ``specs`` will be executed via a chained ``OR``
738 739 expression. If ``specs`` is empty, an empty result is returned.
739 740
740 741 ``specs`` can contain integers, in which case they are assumed to be
741 742 revision numbers.
742 743
743 744 It is assumed the revsets are already formatted. If you have arguments
744 745 that need to be expanded in the revset, call ``revsetlang.formatspec()``
745 746 and pass the result as an element of ``specs``.
746 747
747 748 Specifying a single revset is allowed.
748 749
749 750 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
750 751 integer revisions.
751 752 """
752 753 allspecs = []
753 754 for spec in specs:
754 755 if isinstance(spec, int):
755 756 spec = revsetlang.formatspec(b'%d', spec)
756 757 allspecs.append(spec)
757 758 return repo.anyrevs(allspecs, user=True, localalias=localalias)
758 759
759 760
760 761 def meaningfulparents(repo, ctx):
761 762 """Return list of meaningful (or all if debug) parentrevs for rev.
762 763
763 764 For merges (two non-nullrev revisions) both parents are meaningful.
764 765 Otherwise the first parent revision is considered meaningful if it
765 766 is not the preceding revision.
766 767 """
767 768 parents = ctx.parents()
768 769 if len(parents) > 1:
769 770 return parents
770 771 if repo.ui.debugflag:
771 772 return [parents[0], repo[nullrev]]
772 773 if parents[0].rev() >= intrev(ctx) - 1:
773 774 return []
774 775 return parents
775 776
776 777
777 778 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
778 779 """Return a function that produced paths for presenting to the user.
779 780
780 781 The returned function takes a repo-relative path and produces a path
781 782 that can be presented in the UI.
782 783
783 784 Depending on the value of ui.relative-paths, either a repo-relative or
784 785 cwd-relative path will be produced.
785 786
786 787 legacyrelativevalue is the value to use if ui.relative-paths=legacy
787 788
788 789 If forcerelativevalue is not None, then that value will be used regardless
789 790 of what ui.relative-paths is set to.
790 791 """
791 792 if forcerelativevalue is not None:
792 793 relative = forcerelativevalue
793 794 else:
794 795 config = repo.ui.config(b'ui', b'relative-paths')
795 796 if config == b'legacy':
796 797 relative = legacyrelativevalue
797 798 else:
798 799 relative = stringutil.parsebool(config)
799 800 if relative is None:
800 801 raise error.ConfigError(
801 802 _(b"ui.relative-paths is not a boolean ('%s')") % config
802 803 )
803 804
804 805 if relative:
805 806 cwd = repo.getcwd()
806 807 if cwd != b'':
807 808 # this branch would work even if cwd == b'' (ie cwd = repo
808 809 # root), but its generality makes the returned function slower
809 810 pathto = repo.pathto
810 811 return lambda f: pathto(f, cwd)
811 812 if repo.ui.configbool(b'ui', b'slash'):
812 813 return lambda f: f
813 814 else:
814 815 return util.localpath
815 816
816 817
817 818 def subdiruipathfn(subpath, uipathfn):
818 819 '''Create a new uipathfn that treats the file as relative to subpath.'''
819 820 return lambda f: uipathfn(posixpath.join(subpath, f))
820 821
821 822
822 823 def anypats(pats, opts):
823 824 '''Checks if any patterns, including --include and --exclude were given.
824 825
825 826 Some commands (e.g. addremove) use this condition for deciding whether to
826 827 print absolute or relative paths.
827 828 '''
828 829 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
829 830
830 831
831 832 def expandpats(pats):
832 833 '''Expand bare globs when running on windows.
833 834 On posix we assume it already has already been done by sh.'''
834 835 if not util.expandglobs:
835 836 return list(pats)
836 837 ret = []
837 838 for kindpat in pats:
838 839 kind, pat = matchmod._patsplit(kindpat, None)
839 840 if kind is None:
840 841 try:
841 842 globbed = glob.glob(pat)
842 843 except re.error:
843 844 globbed = [pat]
844 845 if globbed:
845 846 ret.extend(globbed)
846 847 continue
847 848 ret.append(kindpat)
848 849 return ret
849 850
850 851
851 852 def matchandpats(
852 853 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
853 854 ):
854 855 '''Return a matcher and the patterns that were used.
855 856 The matcher will warn about bad matches, unless an alternate badfn callback
856 857 is provided.'''
857 858 if opts is None:
858 859 opts = {}
859 860 if not globbed and default == b'relpath':
860 861 pats = expandpats(pats or [])
861 862
862 863 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
863 864
864 865 def bad(f, msg):
865 866 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
866 867
867 868 if badfn is None:
868 869 badfn = bad
869 870
870 871 m = ctx.match(
871 872 pats,
872 873 opts.get(b'include'),
873 874 opts.get(b'exclude'),
874 875 default,
875 876 listsubrepos=opts.get(b'subrepos'),
876 877 badfn=badfn,
877 878 )
878 879
879 880 if m.always():
880 881 pats = []
881 882 return m, pats
882 883
883 884
884 885 def match(
885 886 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
886 887 ):
887 888 '''Return a matcher that will warn about bad matches.'''
888 889 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
889 890
890 891
891 892 def matchall(repo):
892 893 '''Return a matcher that will efficiently match everything.'''
893 894 return matchmod.always()
894 895
895 896
896 897 def matchfiles(repo, files, badfn=None):
897 898 '''Return a matcher that will efficiently match exactly these files.'''
898 899 return matchmod.exact(files, badfn=badfn)
899 900
900 901
901 902 def parsefollowlinespattern(repo, rev, pat, msg):
902 903 """Return a file name from `pat` pattern suitable for usage in followlines
903 904 logic.
904 905 """
905 906 if not matchmod.patkind(pat):
906 907 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
907 908 else:
908 909 ctx = repo[rev]
909 910 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
910 911 files = [f for f in ctx if m(f)]
911 912 if len(files) != 1:
912 913 raise error.ParseError(msg)
913 914 return files[0]
914 915
915 916
916 917 def getorigvfs(ui, repo):
917 918 """return a vfs suitable to save 'orig' file
918 919
919 920 return None if no special directory is configured"""
920 921 origbackuppath = ui.config(b'ui', b'origbackuppath')
921 922 if not origbackuppath:
922 923 return None
923 924 return vfs.vfs(repo.wvfs.join(origbackuppath))
924 925
925 926
926 927 def backuppath(ui, repo, filepath):
927 928 '''customize where working copy backup files (.orig files) are created
928 929
929 930 Fetch user defined path from config file: [ui] origbackuppath = <path>
930 931 Fall back to default (filepath with .orig suffix) if not specified
931 932
932 933 filepath is repo-relative
933 934
934 935 Returns an absolute path
935 936 '''
936 937 origvfs = getorigvfs(ui, repo)
937 938 if origvfs is None:
938 939 return repo.wjoin(filepath + b".orig")
939 940
940 941 origbackupdir = origvfs.dirname(filepath)
941 942 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
942 943 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
943 944
944 945 # Remove any files that conflict with the backup file's path
945 946 for f in reversed(list(pathutil.finddirs(filepath))):
946 947 if origvfs.isfileorlink(f):
947 948 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
948 949 origvfs.unlink(f)
949 950 break
950 951
951 952 origvfs.makedirs(origbackupdir)
952 953
953 954 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
954 955 ui.note(
955 956 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
956 957 )
957 958 origvfs.rmtree(filepath, forcibly=True)
958 959
959 960 return origvfs.join(filepath)
960 961
961 962
962 963 class _containsnode(object):
963 964 """proxy __contains__(node) to container.__contains__ which accepts revs"""
964 965
965 966 def __init__(self, repo, revcontainer):
966 967 self._torev = repo.changelog.rev
967 968 self._revcontains = revcontainer.__contains__
968 969
969 970 def __contains__(self, node):
970 971 return self._revcontains(self._torev(node))
971 972
972 973
973 974 def cleanupnodes(
974 975 repo,
975 976 replacements,
976 977 operation,
977 978 moves=None,
978 979 metadata=None,
979 980 fixphase=False,
980 981 targetphase=None,
981 982 backup=True,
982 983 ):
983 984 """do common cleanups when old nodes are replaced by new nodes
984 985
985 986 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
986 987 (we might also want to move working directory parent in the future)
987 988
988 989 By default, bookmark moves are calculated automatically from 'replacements',
989 990 but 'moves' can be used to override that. Also, 'moves' may include
990 991 additional bookmark moves that should not have associated obsmarkers.
991 992
992 993 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
993 994 have replacements. operation is a string, like "rebase".
994 995
995 996 metadata is dictionary containing metadata to be stored in obsmarker if
996 997 obsolescence is enabled.
997 998 """
998 999 assert fixphase or targetphase is None
999 1000 if not replacements and not moves:
1000 1001 return
1001 1002
1002 1003 # translate mapping's other forms
1003 1004 if not util.safehasattr(replacements, b'items'):
1004 1005 replacements = {(n,): () for n in replacements}
1005 1006 else:
1006 1007 # upgrading non tuple "source" to tuple ones for BC
1007 1008 repls = {}
1008 1009 for key, value in replacements.items():
1009 1010 if not isinstance(key, tuple):
1010 1011 key = (key,)
1011 1012 repls[key] = value
1012 1013 replacements = repls
1013 1014
1014 1015 # Unfiltered repo is needed since nodes in replacements might be hidden.
1015 1016 unfi = repo.unfiltered()
1016 1017
1017 1018 # Calculate bookmark movements
1018 1019 if moves is None:
1019 1020 moves = {}
1020 1021 for oldnodes, newnodes in replacements.items():
1021 1022 for oldnode in oldnodes:
1022 1023 if oldnode in moves:
1023 1024 continue
1024 1025 if len(newnodes) > 1:
1025 1026 # usually a split, take the one with biggest rev number
1026 1027 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1027 1028 elif len(newnodes) == 0:
1028 1029 # move bookmark backwards
1029 1030 allreplaced = []
1030 1031 for rep in replacements:
1031 1032 allreplaced.extend(rep)
1032 1033 roots = list(
1033 1034 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1034 1035 )
1035 1036 if roots:
1036 1037 newnode = roots[0].node()
1037 1038 else:
1038 1039 newnode = nullid
1039 1040 else:
1040 1041 newnode = newnodes[0]
1041 1042 moves[oldnode] = newnode
1042 1043
1043 1044 allnewnodes = [n for ns in replacements.values() for n in ns]
1044 1045 toretract = {}
1045 1046 toadvance = {}
1046 1047 if fixphase:
1047 1048 precursors = {}
1048 1049 for oldnodes, newnodes in replacements.items():
1049 1050 for oldnode in oldnodes:
1050 1051 for newnode in newnodes:
1051 1052 precursors.setdefault(newnode, []).append(oldnode)
1052 1053
1053 1054 allnewnodes.sort(key=lambda n: unfi[n].rev())
1054 1055 newphases = {}
1055 1056
1056 1057 def phase(ctx):
1057 1058 return newphases.get(ctx.node(), ctx.phase())
1058 1059
1059 1060 for newnode in allnewnodes:
1060 1061 ctx = unfi[newnode]
1061 1062 parentphase = max(phase(p) for p in ctx.parents())
1062 1063 if targetphase is None:
1063 1064 oldphase = max(
1064 1065 unfi[oldnode].phase() for oldnode in precursors[newnode]
1065 1066 )
1066 1067 newphase = max(oldphase, parentphase)
1067 1068 else:
1068 1069 newphase = max(targetphase, parentphase)
1069 1070 newphases[newnode] = newphase
1070 1071 if newphase > ctx.phase():
1071 1072 toretract.setdefault(newphase, []).append(newnode)
1072 1073 elif newphase < ctx.phase():
1073 1074 toadvance.setdefault(newphase, []).append(newnode)
1074 1075
1075 1076 with repo.transaction(b'cleanup') as tr:
1076 1077 # Move bookmarks
1077 1078 bmarks = repo._bookmarks
1078 1079 bmarkchanges = []
1079 1080 for oldnode, newnode in moves.items():
1080 1081 oldbmarks = repo.nodebookmarks(oldnode)
1081 1082 if not oldbmarks:
1082 1083 continue
1083 1084 from . import bookmarks # avoid import cycle
1084 1085
1085 1086 repo.ui.debug(
1086 1087 b'moving bookmarks %r from %s to %s\n'
1087 1088 % (
1088 1089 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1089 1090 hex(oldnode),
1090 1091 hex(newnode),
1091 1092 )
1092 1093 )
1093 1094 # Delete divergent bookmarks being parents of related newnodes
1094 1095 deleterevs = repo.revs(
1095 1096 b'parents(roots(%ln & (::%n))) - parents(%n)',
1096 1097 allnewnodes,
1097 1098 newnode,
1098 1099 oldnode,
1099 1100 )
1100 1101 deletenodes = _containsnode(repo, deleterevs)
1101 1102 for name in oldbmarks:
1102 1103 bmarkchanges.append((name, newnode))
1103 1104 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1104 1105 bmarkchanges.append((b, None))
1105 1106
1106 1107 if bmarkchanges:
1107 1108 bmarks.applychanges(repo, tr, bmarkchanges)
1108 1109
1109 1110 for phase, nodes in toretract.items():
1110 1111 phases.retractboundary(repo, tr, phase, nodes)
1111 1112 for phase, nodes in toadvance.items():
1112 1113 phases.advanceboundary(repo, tr, phase, nodes)
1113 1114
1114 1115 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1115 1116 # Obsolete or strip nodes
1116 1117 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1117 1118 # If a node is already obsoleted, and we want to obsolete it
1118 1119 # without a successor, skip that obssolete request since it's
1119 1120 # unnecessary. That's the "if s or not isobs(n)" check below.
1120 1121 # Also sort the node in topology order, that might be useful for
1121 1122 # some obsstore logic.
1122 1123 # NOTE: the sorting might belong to createmarkers.
1123 1124 torev = unfi.changelog.rev
1124 1125 sortfunc = lambda ns: torev(ns[0][0])
1125 1126 rels = []
1126 1127 for ns, s in sorted(replacements.items(), key=sortfunc):
1127 1128 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1128 1129 rels.append(rel)
1129 1130 if rels:
1130 1131 obsolete.createmarkers(
1131 1132 repo, rels, operation=operation, metadata=metadata
1132 1133 )
1133 1134 elif phases.supportinternal(repo) and mayusearchived:
1134 1135 # this assume we do not have "unstable" nodes above the cleaned ones
1135 1136 allreplaced = set()
1136 1137 for ns in replacements.keys():
1137 1138 allreplaced.update(ns)
1138 1139 if backup:
1139 1140 from . import repair # avoid import cycle
1140 1141
1141 1142 node = min(allreplaced, key=repo.changelog.rev)
1142 1143 repair.backupbundle(
1143 1144 repo, allreplaced, allreplaced, node, operation
1144 1145 )
1145 1146 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1146 1147 else:
1147 1148 from . import repair # avoid import cycle
1148 1149
1149 1150 tostrip = list(n for ns in replacements for n in ns)
1150 1151 if tostrip:
1151 1152 repair.delayedstrip(
1152 1153 repo.ui, repo, tostrip, operation, backup=backup
1153 1154 )
1154 1155
1155 1156
1156 1157 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1157 1158 if opts is None:
1158 1159 opts = {}
1159 1160 m = matcher
1160 1161 dry_run = opts.get(b'dry_run')
1161 1162 try:
1162 1163 similarity = float(opts.get(b'similarity') or 0)
1163 1164 except ValueError:
1164 1165 raise error.Abort(_(b'similarity must be a number'))
1165 1166 if similarity < 0 or similarity > 100:
1166 1167 raise error.Abort(_(b'similarity must be between 0 and 100'))
1167 1168 similarity /= 100.0
1168 1169
1169 1170 ret = 0
1170 1171
1171 1172 wctx = repo[None]
1172 1173 for subpath in sorted(wctx.substate):
1173 1174 submatch = matchmod.subdirmatcher(subpath, m)
1174 1175 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1175 1176 sub = wctx.sub(subpath)
1176 1177 subprefix = repo.wvfs.reljoin(prefix, subpath)
1177 1178 subuipathfn = subdiruipathfn(subpath, uipathfn)
1178 1179 try:
1179 1180 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1180 1181 ret = 1
1181 1182 except error.LookupError:
1182 1183 repo.ui.status(
1183 1184 _(b"skipping missing subrepository: %s\n")
1184 1185 % uipathfn(subpath)
1185 1186 )
1186 1187
1187 1188 rejected = []
1188 1189
1189 1190 def badfn(f, msg):
1190 1191 if f in m.files():
1191 1192 m.bad(f, msg)
1192 1193 rejected.append(f)
1193 1194
1194 1195 badmatch = matchmod.badmatch(m, badfn)
1195 1196 added, unknown, deleted, removed, forgotten = _interestingfiles(
1196 1197 repo, badmatch
1197 1198 )
1198 1199
1199 1200 unknownset = set(unknown + forgotten)
1200 1201 toprint = unknownset.copy()
1201 1202 toprint.update(deleted)
1202 1203 for abs in sorted(toprint):
1203 1204 if repo.ui.verbose or not m.exact(abs):
1204 1205 if abs in unknownset:
1205 1206 status = _(b'adding %s\n') % uipathfn(abs)
1206 1207 label = b'ui.addremove.added'
1207 1208 else:
1208 1209 status = _(b'removing %s\n') % uipathfn(abs)
1209 1210 label = b'ui.addremove.removed'
1210 1211 repo.ui.status(status, label=label)
1211 1212
1212 1213 renames = _findrenames(
1213 1214 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1214 1215 )
1215 1216
1216 1217 if not dry_run:
1217 1218 _markchanges(repo, unknown + forgotten, deleted, renames)
1218 1219
1219 1220 for f in rejected:
1220 1221 if f in m.files():
1221 1222 return 1
1222 1223 return ret
1223 1224
1224 1225
1225 1226 def marktouched(repo, files, similarity=0.0):
1226 1227 '''Assert that files have somehow been operated upon. files are relative to
1227 1228 the repo root.'''
1228 1229 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1229 1230 rejected = []
1230 1231
1231 1232 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1232 1233
1233 1234 if repo.ui.verbose:
1234 1235 unknownset = set(unknown + forgotten)
1235 1236 toprint = unknownset.copy()
1236 1237 toprint.update(deleted)
1237 1238 for abs in sorted(toprint):
1238 1239 if abs in unknownset:
1239 1240 status = _(b'adding %s\n') % abs
1240 1241 else:
1241 1242 status = _(b'removing %s\n') % abs
1242 1243 repo.ui.status(status)
1243 1244
1244 1245 # TODO: We should probably have the caller pass in uipathfn and apply it to
1245 1246 # the messages above too. legacyrelativevalue=True is consistent with how
1246 1247 # it used to work.
1247 1248 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1248 1249 renames = _findrenames(
1249 1250 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1250 1251 )
1251 1252
1252 1253 _markchanges(repo, unknown + forgotten, deleted, renames)
1253 1254
1254 1255 for f in rejected:
1255 1256 if f in m.files():
1256 1257 return 1
1257 1258 return 0
1258 1259
1259 1260
1260 1261 def _interestingfiles(repo, matcher):
1261 1262 '''Walk dirstate with matcher, looking for files that addremove would care
1262 1263 about.
1263 1264
1264 1265 This is different from dirstate.status because it doesn't care about
1265 1266 whether files are modified or clean.'''
1266 1267 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1267 1268 audit_path = pathutil.pathauditor(repo.root, cached=True)
1268 1269
1269 1270 ctx = repo[None]
1270 1271 dirstate = repo.dirstate
1271 1272 matcher = repo.narrowmatch(matcher, includeexact=True)
1272 1273 walkresults = dirstate.walk(
1273 1274 matcher,
1274 1275 subrepos=sorted(ctx.substate),
1275 1276 unknown=True,
1276 1277 ignored=False,
1277 1278 full=False,
1278 1279 )
1279 1280 for abs, st in pycompat.iteritems(walkresults):
1280 1281 dstate = dirstate[abs]
1281 1282 if dstate == b'?' and audit_path.check(abs):
1282 1283 unknown.append(abs)
1283 1284 elif dstate != b'r' and not st:
1284 1285 deleted.append(abs)
1285 1286 elif dstate == b'r' and st:
1286 1287 forgotten.append(abs)
1287 1288 # for finding renames
1288 1289 elif dstate == b'r' and not st:
1289 1290 removed.append(abs)
1290 1291 elif dstate == b'a':
1291 1292 added.append(abs)
1292 1293
1293 1294 return added, unknown, deleted, removed, forgotten
1294 1295
1295 1296
1296 1297 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1297 1298 '''Find renames from removed files to added ones.'''
1298 1299 renames = {}
1299 1300 if similarity > 0:
1300 1301 for old, new, score in similar.findrenames(
1301 1302 repo, added, removed, similarity
1302 1303 ):
1303 1304 if (
1304 1305 repo.ui.verbose
1305 1306 or not matcher.exact(old)
1306 1307 or not matcher.exact(new)
1307 1308 ):
1308 1309 repo.ui.status(
1309 1310 _(
1310 1311 b'recording removal of %s as rename to %s '
1311 1312 b'(%d%% similar)\n'
1312 1313 )
1313 1314 % (uipathfn(old), uipathfn(new), score * 100)
1314 1315 )
1315 1316 renames[new] = old
1316 1317 return renames
1317 1318
1318 1319
1319 1320 def _markchanges(repo, unknown, deleted, renames):
1320 1321 '''Marks the files in unknown as added, the files in deleted as removed,
1321 1322 and the files in renames as copied.'''
1322 1323 wctx = repo[None]
1323 1324 with repo.wlock():
1324 1325 wctx.forget(deleted)
1325 1326 wctx.add(unknown)
1326 1327 for new, old in pycompat.iteritems(renames):
1327 1328 wctx.copy(old, new)
1328 1329
1329 1330
1330 1331 def getrenamedfn(repo, endrev=None):
1331 1332 if copiesmod.usechangesetcentricalgo(repo):
1332 1333
1333 1334 def getrenamed(fn, rev):
1334 1335 ctx = repo[rev]
1335 1336 p1copies = ctx.p1copies()
1336 1337 if fn in p1copies:
1337 1338 return p1copies[fn]
1338 1339 p2copies = ctx.p2copies()
1339 1340 if fn in p2copies:
1340 1341 return p2copies[fn]
1341 1342 return None
1342 1343
1343 1344 return getrenamed
1344 1345
1345 1346 rcache = {}
1346 1347 if endrev is None:
1347 1348 endrev = len(repo)
1348 1349
1349 1350 def getrenamed(fn, rev):
1350 1351 '''looks up all renames for a file (up to endrev) the first
1351 1352 time the file is given. It indexes on the changerev and only
1352 1353 parses the manifest if linkrev != changerev.
1353 1354 Returns rename info for fn at changerev rev.'''
1354 1355 if fn not in rcache:
1355 1356 rcache[fn] = {}
1356 1357 fl = repo.file(fn)
1357 1358 for i in fl:
1358 1359 lr = fl.linkrev(i)
1359 1360 renamed = fl.renamed(fl.node(i))
1360 1361 rcache[fn][lr] = renamed and renamed[0]
1361 1362 if lr >= endrev:
1362 1363 break
1363 1364 if rev in rcache[fn]:
1364 1365 return rcache[fn][rev]
1365 1366
1366 1367 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1367 1368 # filectx logic.
1368 1369 try:
1369 1370 return repo[rev][fn].copysource()
1370 1371 except error.LookupError:
1371 1372 return None
1372 1373
1373 1374 return getrenamed
1374 1375
1375 1376
1376 1377 def getcopiesfn(repo, endrev=None):
1377 1378 if copiesmod.usechangesetcentricalgo(repo):
1378 1379
1379 1380 def copiesfn(ctx):
1380 1381 if ctx.p2copies():
1381 1382 allcopies = ctx.p1copies().copy()
1382 1383 # There should be no overlap
1383 1384 allcopies.update(ctx.p2copies())
1384 1385 return sorted(allcopies.items())
1385 1386 else:
1386 1387 return sorted(ctx.p1copies().items())
1387 1388
1388 1389 else:
1389 1390 getrenamed = getrenamedfn(repo, endrev)
1390 1391
1391 1392 def copiesfn(ctx):
1392 1393 copies = []
1393 1394 for fn in ctx.files():
1394 1395 rename = getrenamed(fn, ctx.rev())
1395 1396 if rename:
1396 1397 copies.append((fn, rename))
1397 1398 return copies
1398 1399
1399 1400 return copiesfn
1400 1401
1401 1402
1402 1403 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1403 1404 """Update the dirstate to reflect the intent of copying src to dst. For
1404 1405 different reasons it might not end with dst being marked as copied from src.
1405 1406 """
1406 1407 origsrc = repo.dirstate.copied(src) or src
1407 1408 if dst == origsrc: # copying back a copy?
1408 1409 if repo.dirstate[dst] not in b'mn' and not dryrun:
1409 1410 repo.dirstate.normallookup(dst)
1410 1411 else:
1411 1412 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1412 1413 if not ui.quiet:
1413 1414 ui.warn(
1414 1415 _(
1415 1416 b"%s has not been committed yet, so no copy "
1416 1417 b"data will be stored for %s.\n"
1417 1418 )
1418 1419 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1419 1420 )
1420 1421 if repo.dirstate[dst] in b'?r' and not dryrun:
1421 1422 wctx.add([dst])
1422 1423 elif not dryrun:
1423 1424 wctx.copy(origsrc, dst)
1424 1425
1425 1426
1426 1427 def movedirstate(repo, newctx, match=None):
1427 1428 """Move the dirstate to newctx and adjust it as necessary.
1428 1429
1429 1430 A matcher can be provided as an optimization. It is probably a bug to pass
1430 1431 a matcher that doesn't match all the differences between the parent of the
1431 1432 working copy and newctx.
1432 1433 """
1433 1434 oldctx = repo[b'.']
1434 1435 ds = repo.dirstate
1435 1436 copies = dict(ds.copies())
1436 1437 ds.setparents(newctx.node(), nullid)
1437 1438 s = newctx.status(oldctx, match=match)
1438 1439 for f in s.modified:
1439 1440 if ds[f] == b'r':
1440 1441 # modified + removed -> removed
1441 1442 continue
1442 1443 ds.normallookup(f)
1443 1444
1444 1445 for f in s.added:
1445 1446 if ds[f] == b'r':
1446 1447 # added + removed -> unknown
1447 1448 ds.drop(f)
1448 1449 elif ds[f] != b'a':
1449 1450 ds.add(f)
1450 1451
1451 1452 for f in s.removed:
1452 1453 if ds[f] == b'a':
1453 1454 # removed + added -> normal
1454 1455 ds.normallookup(f)
1455 1456 elif ds[f] != b'r':
1456 1457 ds.remove(f)
1457 1458
1458 1459 # Merge old parent and old working dir copies
1459 1460 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1460 1461 oldcopies.update(copies)
1461 1462 copies = {
1462 1463 dst: oldcopies.get(src, src)
1463 1464 for dst, src in pycompat.iteritems(oldcopies)
1464 1465 }
1465 1466 # Adjust the dirstate copies
1466 1467 for dst, src in pycompat.iteritems(copies):
1467 1468 if src not in newctx or dst in newctx or ds[dst] != b'a':
1468 1469 src = None
1469 1470 ds.copy(src, dst)
1470 1471 repo._quick_access_changeid_invalidate()
1471 1472
1472 1473
1474 def filterrequirements(requirements):
1475 """ filters the requirements into two sets:
1476
1477 wcreq: requirements which should be written in .hg/requires
1478 storereq: which should be written in .hg/store/requires
1479
1480 Returns (wcreq, storereq)
1481 """
1482 if False:
1483 wc, store = set(), set()
1484 for r in requirements:
1485 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1486 wc.add(r)
1487 else:
1488 store.add(r)
1489 return wc, store
1490 return requirements, None
1491
1492
1473 1493 def writereporequirements(repo, requirements=None):
1474 1494 """ writes requirements for the repo to .hg/requires """
1475 1495 if requirements:
1476 1496 repo.requirements = requirements
1477 writerequires(repo.vfs, repo.requirements)
1497 wcreq, storereq = filterrequirements(repo.requirements)
1498 if wcreq is not None:
1499 writerequires(repo.vfs, wcreq)
1500 if storereq is not None:
1501 writerequires(repo.svfs, storereq)
1478 1502
1479 1503
1480 1504 def writerequires(opener, requirements):
1481 1505 with opener(b'requires', b'w', atomictemp=True) as fp:
1482 1506 for r in sorted(requirements):
1483 1507 fp.write(b"%s\n" % r)
1484 1508
1485 1509
1486 1510 class filecachesubentry(object):
1487 1511 def __init__(self, path, stat):
1488 1512 self.path = path
1489 1513 self.cachestat = None
1490 1514 self._cacheable = None
1491 1515
1492 1516 if stat:
1493 1517 self.cachestat = filecachesubentry.stat(self.path)
1494 1518
1495 1519 if self.cachestat:
1496 1520 self._cacheable = self.cachestat.cacheable()
1497 1521 else:
1498 1522 # None means we don't know yet
1499 1523 self._cacheable = None
1500 1524
1501 1525 def refresh(self):
1502 1526 if self.cacheable():
1503 1527 self.cachestat = filecachesubentry.stat(self.path)
1504 1528
1505 1529 def cacheable(self):
1506 1530 if self._cacheable is not None:
1507 1531 return self._cacheable
1508 1532
1509 1533 # we don't know yet, assume it is for now
1510 1534 return True
1511 1535
1512 1536 def changed(self):
1513 1537 # no point in going further if we can't cache it
1514 1538 if not self.cacheable():
1515 1539 return True
1516 1540
1517 1541 newstat = filecachesubentry.stat(self.path)
1518 1542
1519 1543 # we may not know if it's cacheable yet, check again now
1520 1544 if newstat and self._cacheable is None:
1521 1545 self._cacheable = newstat.cacheable()
1522 1546
1523 1547 # check again
1524 1548 if not self._cacheable:
1525 1549 return True
1526 1550
1527 1551 if self.cachestat != newstat:
1528 1552 self.cachestat = newstat
1529 1553 return True
1530 1554 else:
1531 1555 return False
1532 1556
1533 1557 @staticmethod
1534 1558 def stat(path):
1535 1559 try:
1536 1560 return util.cachestat(path)
1537 1561 except OSError as e:
1538 1562 if e.errno != errno.ENOENT:
1539 1563 raise
1540 1564
1541 1565
1542 1566 class filecacheentry(object):
1543 1567 def __init__(self, paths, stat=True):
1544 1568 self._entries = []
1545 1569 for path in paths:
1546 1570 self._entries.append(filecachesubentry(path, stat))
1547 1571
1548 1572 def changed(self):
1549 1573 '''true if any entry has changed'''
1550 1574 for entry in self._entries:
1551 1575 if entry.changed():
1552 1576 return True
1553 1577 return False
1554 1578
1555 1579 def refresh(self):
1556 1580 for entry in self._entries:
1557 1581 entry.refresh()
1558 1582
1559 1583
1560 1584 class filecache(object):
1561 1585 """A property like decorator that tracks files under .hg/ for updates.
1562 1586
1563 1587 On first access, the files defined as arguments are stat()ed and the
1564 1588 results cached. The decorated function is called. The results are stashed
1565 1589 away in a ``_filecache`` dict on the object whose method is decorated.
1566 1590
1567 1591 On subsequent access, the cached result is used as it is set to the
1568 1592 instance dictionary.
1569 1593
1570 1594 On external property set/delete operations, the caller must update the
1571 1595 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1572 1596 instead of directly setting <attr>.
1573 1597
1574 1598 When using the property API, the cached data is always used if available.
1575 1599 No stat() is performed to check if the file has changed.
1576 1600
1577 1601 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1578 1602 can populate an entry before the property's getter is called. In this case,
1579 1603 entries in ``_filecache`` will be used during property operations,
1580 1604 if available. If the underlying file changes, it is up to external callers
1581 1605 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1582 1606 method result as well as possibly calling ``del obj._filecache[attr]`` to
1583 1607 remove the ``filecacheentry``.
1584 1608 """
1585 1609
1586 1610 def __init__(self, *paths):
1587 1611 self.paths = paths
1588 1612
1589 1613 def join(self, obj, fname):
1590 1614 """Used to compute the runtime path of a cached file.
1591 1615
1592 1616 Users should subclass filecache and provide their own version of this
1593 1617 function to call the appropriate join function on 'obj' (an instance
1594 1618 of the class that its member function was decorated).
1595 1619 """
1596 1620 raise NotImplementedError
1597 1621
1598 1622 def __call__(self, func):
1599 1623 self.func = func
1600 1624 self.sname = func.__name__
1601 1625 self.name = pycompat.sysbytes(self.sname)
1602 1626 return self
1603 1627
1604 1628 def __get__(self, obj, type=None):
1605 1629 # if accessed on the class, return the descriptor itself.
1606 1630 if obj is None:
1607 1631 return self
1608 1632
1609 1633 assert self.sname not in obj.__dict__
1610 1634
1611 1635 entry = obj._filecache.get(self.name)
1612 1636
1613 1637 if entry:
1614 1638 if entry.changed():
1615 1639 entry.obj = self.func(obj)
1616 1640 else:
1617 1641 paths = [self.join(obj, path) for path in self.paths]
1618 1642
1619 1643 # We stat -before- creating the object so our cache doesn't lie if
1620 1644 # a writer modified between the time we read and stat
1621 1645 entry = filecacheentry(paths, True)
1622 1646 entry.obj = self.func(obj)
1623 1647
1624 1648 obj._filecache[self.name] = entry
1625 1649
1626 1650 obj.__dict__[self.sname] = entry.obj
1627 1651 return entry.obj
1628 1652
1629 1653 # don't implement __set__(), which would make __dict__ lookup as slow as
1630 1654 # function call.
1631 1655
1632 1656 def set(self, obj, value):
1633 1657 if self.name not in obj._filecache:
1634 1658 # we add an entry for the missing value because X in __dict__
1635 1659 # implies X in _filecache
1636 1660 paths = [self.join(obj, path) for path in self.paths]
1637 1661 ce = filecacheentry(paths, False)
1638 1662 obj._filecache[self.name] = ce
1639 1663 else:
1640 1664 ce = obj._filecache[self.name]
1641 1665
1642 1666 ce.obj = value # update cached copy
1643 1667 obj.__dict__[self.sname] = value # update copy returned by obj.x
1644 1668
1645 1669
1646 1670 def extdatasource(repo, source):
1647 1671 """Gather a map of rev -> value dict from the specified source
1648 1672
1649 1673 A source spec is treated as a URL, with a special case shell: type
1650 1674 for parsing the output from a shell command.
1651 1675
1652 1676 The data is parsed as a series of newline-separated records where
1653 1677 each record is a revision specifier optionally followed by a space
1654 1678 and a freeform string value. If the revision is known locally, it
1655 1679 is converted to a rev, otherwise the record is skipped.
1656 1680
1657 1681 Note that both key and value are treated as UTF-8 and converted to
1658 1682 the local encoding. This allows uniformity between local and
1659 1683 remote data sources.
1660 1684 """
1661 1685
1662 1686 spec = repo.ui.config(b"extdata", source)
1663 1687 if not spec:
1664 1688 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1665 1689
1666 1690 data = {}
1667 1691 src = proc = None
1668 1692 try:
1669 1693 if spec.startswith(b"shell:"):
1670 1694 # external commands should be run relative to the repo root
1671 1695 cmd = spec[6:]
1672 1696 proc = subprocess.Popen(
1673 1697 procutil.tonativestr(cmd),
1674 1698 shell=True,
1675 1699 bufsize=-1,
1676 1700 close_fds=procutil.closefds,
1677 1701 stdout=subprocess.PIPE,
1678 1702 cwd=procutil.tonativestr(repo.root),
1679 1703 )
1680 1704 src = proc.stdout
1681 1705 else:
1682 1706 # treat as a URL or file
1683 1707 src = url.open(repo.ui, spec)
1684 1708 for l in src:
1685 1709 if b" " in l:
1686 1710 k, v = l.strip().split(b" ", 1)
1687 1711 else:
1688 1712 k, v = l.strip(), b""
1689 1713
1690 1714 k = encoding.tolocal(k)
1691 1715 try:
1692 1716 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1693 1717 except (error.LookupError, error.RepoLookupError):
1694 1718 pass # we ignore data for nodes that don't exist locally
1695 1719 finally:
1696 1720 if proc:
1697 1721 try:
1698 1722 proc.communicate()
1699 1723 except ValueError:
1700 1724 # This happens if we started iterating src and then
1701 1725 # get a parse error on a line. It should be safe to ignore.
1702 1726 pass
1703 1727 if src:
1704 1728 src.close()
1705 1729 if proc and proc.returncode != 0:
1706 1730 raise error.Abort(
1707 1731 _(b"extdata command '%s' failed: %s")
1708 1732 % (cmd, procutil.explainexit(proc.returncode))
1709 1733 )
1710 1734
1711 1735 return data
1712 1736
1713 1737
1714 1738 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1715 1739 if lock is None:
1716 1740 raise error.LockInheritanceContractViolation(
1717 1741 b'lock can only be inherited while held'
1718 1742 )
1719 1743 if environ is None:
1720 1744 environ = {}
1721 1745 with lock.inherit() as locker:
1722 1746 environ[envvar] = locker
1723 1747 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1724 1748
1725 1749
1726 1750 def wlocksub(repo, cmd, *args, **kwargs):
1727 1751 """run cmd as a subprocess that allows inheriting repo's wlock
1728 1752
1729 1753 This can only be called while the wlock is held. This takes all the
1730 1754 arguments that ui.system does, and returns the exit code of the
1731 1755 subprocess."""
1732 1756 return _locksub(
1733 1757 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1734 1758 )
1735 1759
1736 1760
1737 1761 class progress(object):
1738 1762 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1739 1763 self.ui = ui
1740 1764 self.pos = 0
1741 1765 self.topic = topic
1742 1766 self.unit = unit
1743 1767 self.total = total
1744 1768 self.debug = ui.configbool(b'progress', b'debug')
1745 1769 self._updatebar = updatebar
1746 1770
1747 1771 def __enter__(self):
1748 1772 return self
1749 1773
1750 1774 def __exit__(self, exc_type, exc_value, exc_tb):
1751 1775 self.complete()
1752 1776
1753 1777 def update(self, pos, item=b"", total=None):
1754 1778 assert pos is not None
1755 1779 if total:
1756 1780 self.total = total
1757 1781 self.pos = pos
1758 1782 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1759 1783 if self.debug:
1760 1784 self._printdebug(item)
1761 1785
1762 1786 def increment(self, step=1, item=b"", total=None):
1763 1787 self.update(self.pos + step, item, total)
1764 1788
1765 1789 def complete(self):
1766 1790 self.pos = None
1767 1791 self.unit = b""
1768 1792 self.total = None
1769 1793 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1770 1794
1771 1795 def _printdebug(self, item):
1772 1796 unit = b''
1773 1797 if self.unit:
1774 1798 unit = b' ' + self.unit
1775 1799 if item:
1776 1800 item = b' ' + item
1777 1801
1778 1802 if self.total:
1779 1803 pct = 100.0 * self.pos / self.total
1780 1804 self.ui.debug(
1781 1805 b'%s:%s %d/%d%s (%4.2f%%)\n'
1782 1806 % (self.topic, item, self.pos, self.total, unit, pct)
1783 1807 )
1784 1808 else:
1785 1809 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1786 1810
1787 1811
1788 1812 def gdinitconfig(ui):
1789 1813 """helper function to know if a repo should be created as general delta
1790 1814 """
1791 1815 # experimental config: format.generaldelta
1792 1816 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1793 1817 b'format', b'usegeneraldelta'
1794 1818 )
1795 1819
1796 1820
1797 1821 def gddeltaconfig(ui):
1798 1822 """helper function to know if incoming delta should be optimised
1799 1823 """
1800 1824 # experimental config: format.generaldelta
1801 1825 return ui.configbool(b'format', b'generaldelta')
1802 1826
1803 1827
1804 1828 class simplekeyvaluefile(object):
1805 1829 """A simple file with key=value lines
1806 1830
1807 1831 Keys must be alphanumerics and start with a letter, values must not
1808 1832 contain '\n' characters"""
1809 1833
1810 1834 firstlinekey = b'__firstline'
1811 1835
1812 1836 def __init__(self, vfs, path, keys=None):
1813 1837 self.vfs = vfs
1814 1838 self.path = path
1815 1839
1816 1840 def read(self, firstlinenonkeyval=False):
1817 1841 """Read the contents of a simple key-value file
1818 1842
1819 1843 'firstlinenonkeyval' indicates whether the first line of file should
1820 1844 be treated as a key-value pair or reuturned fully under the
1821 1845 __firstline key."""
1822 1846 lines = self.vfs.readlines(self.path)
1823 1847 d = {}
1824 1848 if firstlinenonkeyval:
1825 1849 if not lines:
1826 1850 e = _(b"empty simplekeyvalue file")
1827 1851 raise error.CorruptedState(e)
1828 1852 # we don't want to include '\n' in the __firstline
1829 1853 d[self.firstlinekey] = lines[0][:-1]
1830 1854 del lines[0]
1831 1855
1832 1856 try:
1833 1857 # the 'if line.strip()' part prevents us from failing on empty
1834 1858 # lines which only contain '\n' therefore are not skipped
1835 1859 # by 'if line'
1836 1860 updatedict = dict(
1837 1861 line[:-1].split(b'=', 1) for line in lines if line.strip()
1838 1862 )
1839 1863 if self.firstlinekey in updatedict:
1840 1864 e = _(b"%r can't be used as a key")
1841 1865 raise error.CorruptedState(e % self.firstlinekey)
1842 1866 d.update(updatedict)
1843 1867 except ValueError as e:
1844 1868 raise error.CorruptedState(stringutil.forcebytestr(e))
1845 1869 return d
1846 1870
1847 1871 def write(self, data, firstline=None):
1848 1872 """Write key=>value mapping to a file
1849 1873 data is a dict. Keys must be alphanumerical and start with a letter.
1850 1874 Values must not contain newline characters.
1851 1875
1852 1876 If 'firstline' is not None, it is written to file before
1853 1877 everything else, as it is, not in a key=value form"""
1854 1878 lines = []
1855 1879 if firstline is not None:
1856 1880 lines.append(b'%s\n' % firstline)
1857 1881
1858 1882 for k, v in data.items():
1859 1883 if k == self.firstlinekey:
1860 1884 e = b"key name '%s' is reserved" % self.firstlinekey
1861 1885 raise error.ProgrammingError(e)
1862 1886 if not k[0:1].isalpha():
1863 1887 e = b"keys must start with a letter in a key-value file"
1864 1888 raise error.ProgrammingError(e)
1865 1889 if not k.isalnum():
1866 1890 e = b"invalid key name in a simple key-value file"
1867 1891 raise error.ProgrammingError(e)
1868 1892 if b'\n' in v:
1869 1893 e = b"invalid value in a simple key-value file"
1870 1894 raise error.ProgrammingError(e)
1871 1895 lines.append(b"%s=%s\n" % (k, v))
1872 1896 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1873 1897 fp.write(b''.join(lines))
1874 1898
1875 1899
1876 1900 _reportobsoletedsource = [
1877 1901 b'debugobsolete',
1878 1902 b'pull',
1879 1903 b'push',
1880 1904 b'serve',
1881 1905 b'unbundle',
1882 1906 ]
1883 1907
1884 1908 _reportnewcssource = [
1885 1909 b'pull',
1886 1910 b'unbundle',
1887 1911 ]
1888 1912
1889 1913
1890 1914 def prefetchfiles(repo, revmatches):
1891 1915 """Invokes the registered file prefetch functions, allowing extensions to
1892 1916 ensure the corresponding files are available locally, before the command
1893 1917 uses them.
1894 1918
1895 1919 Args:
1896 1920 revmatches: a list of (revision, match) tuples to indicate the files to
1897 1921 fetch at each revision. If any of the match elements is None, it matches
1898 1922 all files.
1899 1923 """
1900 1924
1901 1925 def _matcher(m):
1902 1926 if m:
1903 1927 assert isinstance(m, matchmod.basematcher)
1904 1928 # The command itself will complain about files that don't exist, so
1905 1929 # don't duplicate the message.
1906 1930 return matchmod.badmatch(m, lambda fn, msg: None)
1907 1931 else:
1908 1932 return matchall(repo)
1909 1933
1910 1934 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1911 1935
1912 1936 fileprefetchhooks(repo, revbadmatches)
1913 1937
1914 1938
1915 1939 # a list of (repo, revs, match) prefetch functions
1916 1940 fileprefetchhooks = util.hooks()
1917 1941
1918 1942 # A marker that tells the evolve extension to suppress its own reporting
1919 1943 _reportstroubledchangesets = True
1920 1944
1921 1945
1922 1946 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1923 1947 """register a callback to issue a summary after the transaction is closed
1924 1948
1925 1949 If as_validator is true, then the callbacks are registered as transaction
1926 1950 validators instead
1927 1951 """
1928 1952
1929 1953 def txmatch(sources):
1930 1954 return any(txnname.startswith(source) for source in sources)
1931 1955
1932 1956 categories = []
1933 1957
1934 1958 def reportsummary(func):
1935 1959 """decorator for report callbacks."""
1936 1960 # The repoview life cycle is shorter than the one of the actual
1937 1961 # underlying repository. So the filtered object can die before the
1938 1962 # weakref is used leading to troubles. We keep a reference to the
1939 1963 # unfiltered object and restore the filtering when retrieving the
1940 1964 # repository through the weakref.
1941 1965 filtername = repo.filtername
1942 1966 reporef = weakref.ref(repo.unfiltered())
1943 1967
1944 1968 def wrapped(tr):
1945 1969 repo = reporef()
1946 1970 if filtername:
1947 1971 assert repo is not None # help pytype
1948 1972 repo = repo.filtered(filtername)
1949 1973 func(repo, tr)
1950 1974
1951 1975 newcat = b'%02i-txnreport' % len(categories)
1952 1976 if as_validator:
1953 1977 otr.addvalidator(newcat, wrapped)
1954 1978 else:
1955 1979 otr.addpostclose(newcat, wrapped)
1956 1980 categories.append(newcat)
1957 1981 return wrapped
1958 1982
1959 1983 @reportsummary
1960 1984 def reportchangegroup(repo, tr):
1961 1985 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1962 1986 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1963 1987 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1964 1988 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1965 1989 if cgchangesets or cgrevisions or cgfiles:
1966 1990 htext = b""
1967 1991 if cgheads:
1968 1992 htext = _(b" (%+d heads)") % cgheads
1969 1993 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1970 1994 if as_validator:
1971 1995 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
1972 1996 assert repo is not None # help pytype
1973 1997 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1974 1998
1975 1999 if txmatch(_reportobsoletedsource):
1976 2000
1977 2001 @reportsummary
1978 2002 def reportobsoleted(repo, tr):
1979 2003 obsoleted = obsutil.getobsoleted(repo, tr)
1980 2004 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
1981 2005 if newmarkers:
1982 2006 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
1983 2007 if obsoleted:
1984 2008 msg = _(b'obsoleted %i changesets\n')
1985 2009 if as_validator:
1986 2010 msg = _(b'obsoleting %i changesets\n')
1987 2011 repo.ui.status(msg % len(obsoleted))
1988 2012
1989 2013 if obsolete.isenabled(
1990 2014 repo, obsolete.createmarkersopt
1991 2015 ) and repo.ui.configbool(
1992 2016 b'experimental', b'evolution.report-instabilities'
1993 2017 ):
1994 2018 instabilitytypes = [
1995 2019 (b'orphan', b'orphan'),
1996 2020 (b'phase-divergent', b'phasedivergent'),
1997 2021 (b'content-divergent', b'contentdivergent'),
1998 2022 ]
1999 2023
2000 2024 def getinstabilitycounts(repo):
2001 2025 filtered = repo.changelog.filteredrevs
2002 2026 counts = {}
2003 2027 for instability, revset in instabilitytypes:
2004 2028 counts[instability] = len(
2005 2029 set(obsolete.getrevs(repo, revset)) - filtered
2006 2030 )
2007 2031 return counts
2008 2032
2009 2033 oldinstabilitycounts = getinstabilitycounts(repo)
2010 2034
2011 2035 @reportsummary
2012 2036 def reportnewinstabilities(repo, tr):
2013 2037 newinstabilitycounts = getinstabilitycounts(repo)
2014 2038 for instability, revset in instabilitytypes:
2015 2039 delta = (
2016 2040 newinstabilitycounts[instability]
2017 2041 - oldinstabilitycounts[instability]
2018 2042 )
2019 2043 msg = getinstabilitymessage(delta, instability)
2020 2044 if msg:
2021 2045 repo.ui.warn(msg)
2022 2046
2023 2047 if txmatch(_reportnewcssource):
2024 2048
2025 2049 @reportsummary
2026 2050 def reportnewcs(repo, tr):
2027 2051 """Report the range of new revisions pulled/unbundled."""
2028 2052 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2029 2053 unfi = repo.unfiltered()
2030 2054 if origrepolen >= len(unfi):
2031 2055 return
2032 2056
2033 2057 # Compute the bounds of new visible revisions' range.
2034 2058 revs = smartset.spanset(repo, start=origrepolen)
2035 2059 if revs:
2036 2060 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2037 2061
2038 2062 if minrev == maxrev:
2039 2063 revrange = minrev
2040 2064 else:
2041 2065 revrange = b'%s:%s' % (minrev, maxrev)
2042 2066 draft = len(repo.revs(b'%ld and draft()', revs))
2043 2067 secret = len(repo.revs(b'%ld and secret()', revs))
2044 2068 if not (draft or secret):
2045 2069 msg = _(b'new changesets %s\n') % revrange
2046 2070 elif draft and secret:
2047 2071 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2048 2072 msg %= (revrange, draft, secret)
2049 2073 elif draft:
2050 2074 msg = _(b'new changesets %s (%d drafts)\n')
2051 2075 msg %= (revrange, draft)
2052 2076 elif secret:
2053 2077 msg = _(b'new changesets %s (%d secrets)\n')
2054 2078 msg %= (revrange, secret)
2055 2079 else:
2056 2080 errormsg = b'entered unreachable condition'
2057 2081 raise error.ProgrammingError(errormsg)
2058 2082 repo.ui.status(msg)
2059 2083
2060 2084 # search new changesets directly pulled as obsolete
2061 2085 duplicates = tr.changes.get(b'revduplicates', ())
2062 2086 obsadded = unfi.revs(
2063 2087 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2064 2088 )
2065 2089 cl = repo.changelog
2066 2090 extinctadded = [r for r in obsadded if r not in cl]
2067 2091 if extinctadded:
2068 2092 # They are not just obsolete, but obsolete and invisible
2069 2093 # we call them "extinct" internally but the terms have not been
2070 2094 # exposed to users.
2071 2095 msg = b'(%d other changesets obsolete on arrival)\n'
2072 2096 repo.ui.status(msg % len(extinctadded))
2073 2097
2074 2098 @reportsummary
2075 2099 def reportphasechanges(repo, tr):
2076 2100 """Report statistics of phase changes for changesets pre-existing
2077 2101 pull/unbundle.
2078 2102 """
2079 2103 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2080 2104 published = []
2081 2105 for revs, (old, new) in tr.changes.get(b'phases', []):
2082 2106 if new != phases.public:
2083 2107 continue
2084 2108 published.extend(rev for rev in revs if rev < origrepolen)
2085 2109 if not published:
2086 2110 return
2087 2111 msg = _(b'%d local changesets published\n')
2088 2112 if as_validator:
2089 2113 msg = _(b'%d local changesets will be published\n')
2090 2114 repo.ui.status(msg % len(published))
2091 2115
2092 2116
2093 2117 def getinstabilitymessage(delta, instability):
2094 2118 """function to return the message to show warning about new instabilities
2095 2119
2096 2120 exists as a separate function so that extension can wrap to show more
2097 2121 information like how to fix instabilities"""
2098 2122 if delta > 0:
2099 2123 return _(b'%i new %s changesets\n') % (delta, instability)
2100 2124
2101 2125
2102 2126 def nodesummaries(repo, nodes, maxnumnodes=4):
2103 2127 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2104 2128 return b' '.join(short(h) for h in nodes)
2105 2129 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2106 2130 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2107 2131
2108 2132
2109 2133 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2110 2134 """check that no named branch has multiple heads"""
2111 2135 if desc in (b'strip', b'repair'):
2112 2136 # skip the logic during strip
2113 2137 return
2114 2138 visible = repo.filtered(b'visible')
2115 2139 # possible improvement: we could restrict the check to affected branch
2116 2140 bm = visible.branchmap()
2117 2141 for name in bm:
2118 2142 heads = bm.branchheads(name, closed=accountclosed)
2119 2143 if len(heads) > 1:
2120 2144 msg = _(b'rejecting multiple heads on branch "%s"')
2121 2145 msg %= name
2122 2146 hint = _(b'%d heads: %s')
2123 2147 hint %= (len(heads), nodesummaries(repo, heads))
2124 2148 raise error.Abort(msg, hint=hint)
2125 2149
2126 2150
2127 2151 def wrapconvertsink(sink):
2128 2152 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2129 2153 before it is used, whether or not the convert extension was formally loaded.
2130 2154 """
2131 2155 return sink
2132 2156
2133 2157
2134 2158 def unhidehashlikerevs(repo, specs, hiddentype):
2135 2159 """parse the user specs and unhide changesets whose hash or revision number
2136 2160 is passed.
2137 2161
2138 2162 hiddentype can be: 1) 'warn': warn while unhiding changesets
2139 2163 2) 'nowarn': don't warn while unhiding changesets
2140 2164
2141 2165 returns a repo object with the required changesets unhidden
2142 2166 """
2143 2167 if not repo.filtername or not repo.ui.configbool(
2144 2168 b'experimental', b'directaccess'
2145 2169 ):
2146 2170 return repo
2147 2171
2148 2172 if repo.filtername not in (b'visible', b'visible-hidden'):
2149 2173 return repo
2150 2174
2151 2175 symbols = set()
2152 2176 for spec in specs:
2153 2177 try:
2154 2178 tree = revsetlang.parse(spec)
2155 2179 except error.ParseError: # will be reported by scmutil.revrange()
2156 2180 continue
2157 2181
2158 2182 symbols.update(revsetlang.gethashlikesymbols(tree))
2159 2183
2160 2184 if not symbols:
2161 2185 return repo
2162 2186
2163 2187 revs = _getrevsfromsymbols(repo, symbols)
2164 2188
2165 2189 if not revs:
2166 2190 return repo
2167 2191
2168 2192 if hiddentype == b'warn':
2169 2193 unfi = repo.unfiltered()
2170 2194 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2171 2195 repo.ui.warn(
2172 2196 _(
2173 2197 b"warning: accessing hidden changesets for write "
2174 2198 b"operation: %s\n"
2175 2199 )
2176 2200 % revstr
2177 2201 )
2178 2202
2179 2203 # we have to use new filtername to separate branch/tags cache until we can
2180 2204 # disbale these cache when revisions are dynamically pinned.
2181 2205 return repo.filtered(b'visible-hidden', revs)
2182 2206
2183 2207
2184 2208 def _getrevsfromsymbols(repo, symbols):
2185 2209 """parse the list of symbols and returns a set of revision numbers of hidden
2186 2210 changesets present in symbols"""
2187 2211 revs = set()
2188 2212 unfi = repo.unfiltered()
2189 2213 unficl = unfi.changelog
2190 2214 cl = repo.changelog
2191 2215 tiprev = len(unficl)
2192 2216 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2193 2217 for s in symbols:
2194 2218 try:
2195 2219 n = int(s)
2196 2220 if n <= tiprev:
2197 2221 if not allowrevnums:
2198 2222 continue
2199 2223 else:
2200 2224 if n not in cl:
2201 2225 revs.add(n)
2202 2226 continue
2203 2227 except ValueError:
2204 2228 pass
2205 2229
2206 2230 try:
2207 2231 s = resolvehexnodeidprefix(unfi, s)
2208 2232 except (error.LookupError, error.WdirUnsupported):
2209 2233 s = None
2210 2234
2211 2235 if s is not None:
2212 2236 rev = unficl.rev(s)
2213 2237 if rev not in cl:
2214 2238 revs.add(rev)
2215 2239
2216 2240 return revs
2217 2241
2218 2242
2219 2243 def bookmarkrevs(repo, mark):
2220 2244 """
2221 2245 Select revisions reachable by a given bookmark
2222 2246 """
2223 2247 return repo.revs(
2224 2248 b"ancestors(bookmark(%s)) - "
2225 2249 b"ancestors(head() and not bookmark(%s)) - "
2226 2250 b"ancestors(bookmark() and not bookmark(%s))",
2227 2251 mark,
2228 2252 mark,
2229 2253 mark,
2230 2254 )
General Comments 0
You need to be logged in to leave comments. Login now