##// END OF EJS Templates
branchmap: add a cache validation cache, avoid expensive re-hash on every use...
Kyle Lippincott -
r46088:89f0d9f8 default
parent child Browse files
Show More
@@ -1,585 +1,597 b''
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from .node import (
12 12 bin,
13 13 hex,
14 14 nullid,
15 15 )
16 16 from .thirdparty import attr
17 17
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 metadata,
22 22 pycompat,
23 23 revlog,
24 24 )
25 25 from .utils import (
26 26 dateutil,
27 27 stringutil,
28 28 )
29 29
30 30 from .revlogutils import sidedata as sidedatamod
31 31
32 32 _defaultextra = {b'branch': b'default'}
33 33
34 34
35 35 def _string_escape(text):
36 36 """
37 37 >>> from .pycompat import bytechr as chr
38 38 >>> d = {b'nl': chr(10), b'bs': chr(92), b'cr': chr(13), b'nul': chr(0)}
39 39 >>> s = b"ab%(nl)scd%(bs)s%(bs)sn%(nul)s12ab%(cr)scd%(bs)s%(nl)s" % d
40 40 >>> s
41 41 'ab\\ncd\\\\\\\\n\\x0012ab\\rcd\\\\\\n'
42 42 >>> res = _string_escape(s)
43 43 >>> s == _string_unescape(res)
44 44 True
45 45 """
46 46 # subset of the string_escape codec
47 47 text = (
48 48 text.replace(b'\\', b'\\\\')
49 49 .replace(b'\n', b'\\n')
50 50 .replace(b'\r', b'\\r')
51 51 )
52 52 return text.replace(b'\0', b'\\0')
53 53
54 54
55 55 def _string_unescape(text):
56 56 if b'\\0' in text:
57 57 # fix up \0 without getting into trouble with \\0
58 58 text = text.replace(b'\\\\', b'\\\\\n')
59 59 text = text.replace(b'\\0', b'\0')
60 60 text = text.replace(b'\n', b'')
61 61 return stringutil.unescapestr(text)
62 62
63 63
64 64 def decodeextra(text):
65 65 """
66 66 >>> from .pycompat import bytechr as chr
67 67 >>> sorted(decodeextra(encodeextra({b'foo': b'bar', b'baz': chr(0) + b'2'})
68 68 ... ).items())
69 69 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
70 70 >>> sorted(decodeextra(encodeextra({b'foo': b'bar',
71 71 ... b'baz': chr(92) + chr(0) + b'2'})
72 72 ... ).items())
73 73 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
74 74 """
75 75 extra = _defaultextra.copy()
76 76 for l in text.split(b'\0'):
77 77 if l:
78 78 k, v = _string_unescape(l).split(b':', 1)
79 79 extra[k] = v
80 80 return extra
81 81
82 82
83 83 def encodeextra(d):
84 84 # keys must be sorted to produce a deterministic changelog entry
85 85 items = [_string_escape(b'%s:%s' % (k, d[k])) for k in sorted(d)]
86 86 return b"\0".join(items)
87 87
88 88
89 89 def stripdesc(desc):
90 90 """strip trailing whitespace and leading and trailing empty lines"""
91 91 return b'\n'.join([l.rstrip() for l in desc.splitlines()]).strip(b'\n')
92 92
93 93
94 94 class appender(object):
95 95 '''the changelog index must be updated last on disk, so we use this class
96 96 to delay writes to it'''
97 97
98 98 def __init__(self, vfs, name, mode, buf):
99 99 self.data = buf
100 100 fp = vfs(name, mode)
101 101 self.fp = fp
102 102 self.offset = fp.tell()
103 103 self.size = vfs.fstat(fp).st_size
104 104 self._end = self.size
105 105
106 106 def end(self):
107 107 return self._end
108 108
109 109 def tell(self):
110 110 return self.offset
111 111
112 112 def flush(self):
113 113 pass
114 114
115 115 @property
116 116 def closed(self):
117 117 return self.fp.closed
118 118
119 119 def close(self):
120 120 self.fp.close()
121 121
122 122 def seek(self, offset, whence=0):
123 123 '''virtual file offset spans real file and data'''
124 124 if whence == 0:
125 125 self.offset = offset
126 126 elif whence == 1:
127 127 self.offset += offset
128 128 elif whence == 2:
129 129 self.offset = self.end() + offset
130 130 if self.offset < self.size:
131 131 self.fp.seek(self.offset)
132 132
133 133 def read(self, count=-1):
134 134 '''only trick here is reads that span real file and data'''
135 135 ret = b""
136 136 if self.offset < self.size:
137 137 s = self.fp.read(count)
138 138 ret = s
139 139 self.offset += len(s)
140 140 if count > 0:
141 141 count -= len(s)
142 142 if count != 0:
143 143 doff = self.offset - self.size
144 144 self.data.insert(0, b"".join(self.data))
145 145 del self.data[1:]
146 146 s = self.data[0][doff : doff + count]
147 147 self.offset += len(s)
148 148 ret += s
149 149 return ret
150 150
151 151 def write(self, s):
152 152 self.data.append(bytes(s))
153 153 self.offset += len(s)
154 154 self._end += len(s)
155 155
156 156 def __enter__(self):
157 157 self.fp.__enter__()
158 158 return self
159 159
160 160 def __exit__(self, *args):
161 161 return self.fp.__exit__(*args)
162 162
163 163
164 164 class _divertopener(object):
165 165 def __init__(self, opener, target):
166 166 self._opener = opener
167 167 self._target = target
168 168
169 169 def __call__(self, name, mode=b'r', checkambig=False, **kwargs):
170 170 if name != self._target:
171 171 return self._opener(name, mode, **kwargs)
172 172 return self._opener(name + b".a", mode, **kwargs)
173 173
174 174 def __getattr__(self, attr):
175 175 return getattr(self._opener, attr)
176 176
177 177
178 178 def _delayopener(opener, target, buf):
179 179 """build an opener that stores chunks in 'buf' instead of 'target'"""
180 180
181 181 def _delay(name, mode=b'r', checkambig=False, **kwargs):
182 182 if name != target:
183 183 return opener(name, mode, **kwargs)
184 184 assert not kwargs
185 185 return appender(opener, name, mode, buf)
186 186
187 187 return _delay
188 188
189 189
190 190 @attr.s
191 191 class _changelogrevision(object):
192 192 # Extensions might modify _defaultextra, so let the constructor below pass
193 193 # it in
194 194 extra = attr.ib()
195 195 manifest = attr.ib(default=nullid)
196 196 user = attr.ib(default=b'')
197 197 date = attr.ib(default=(0, 0))
198 198 files = attr.ib(default=attr.Factory(list))
199 199 filesadded = attr.ib(default=None)
200 200 filesremoved = attr.ib(default=None)
201 201 p1copies = attr.ib(default=None)
202 202 p2copies = attr.ib(default=None)
203 203 description = attr.ib(default=b'')
204 204
205 205
206 206 class changelogrevision(object):
207 207 """Holds results of a parsed changelog revision.
208 208
209 209 Changelog revisions consist of multiple pieces of data, including
210 210 the manifest node, user, and date. This object exposes a view into
211 211 the parsed object.
212 212 """
213 213
214 214 __slots__ = (
215 215 '_offsets',
216 216 '_text',
217 217 '_sidedata',
218 218 '_cpsd',
219 219 )
220 220
221 221 def __new__(cls, text, sidedata, cpsd):
222 222 if not text:
223 223 return _changelogrevision(extra=_defaultextra)
224 224
225 225 self = super(changelogrevision, cls).__new__(cls)
226 226 # We could return here and implement the following as an __init__.
227 227 # But doing it here is equivalent and saves an extra function call.
228 228
229 229 # format used:
230 230 # nodeid\n : manifest node in ascii
231 231 # user\n : user, no \n or \r allowed
232 232 # time tz extra\n : date (time is int or float, timezone is int)
233 233 # : extra is metadata, encoded and separated by '\0'
234 234 # : older versions ignore it
235 235 # files\n\n : files modified by the cset, no \n or \r allowed
236 236 # (.*) : comment (free text, ideally utf-8)
237 237 #
238 238 # changelog v0 doesn't use extra
239 239
240 240 nl1 = text.index(b'\n')
241 241 nl2 = text.index(b'\n', nl1 + 1)
242 242 nl3 = text.index(b'\n', nl2 + 1)
243 243
244 244 # The list of files may be empty. Which means nl3 is the first of the
245 245 # double newline that precedes the description.
246 246 if text[nl3 + 1 : nl3 + 2] == b'\n':
247 247 doublenl = nl3
248 248 else:
249 249 doublenl = text.index(b'\n\n', nl3 + 1)
250 250
251 251 self._offsets = (nl1, nl2, nl3, doublenl)
252 252 self._text = text
253 253 self._sidedata = sidedata
254 254 self._cpsd = cpsd
255 255
256 256 return self
257 257
258 258 @property
259 259 def manifest(self):
260 260 return bin(self._text[0 : self._offsets[0]])
261 261
262 262 @property
263 263 def user(self):
264 264 off = self._offsets
265 265 return encoding.tolocal(self._text[off[0] + 1 : off[1]])
266 266
267 267 @property
268 268 def _rawdate(self):
269 269 off = self._offsets
270 270 dateextra = self._text[off[1] + 1 : off[2]]
271 271 return dateextra.split(b' ', 2)[0:2]
272 272
273 273 @property
274 274 def _rawextra(self):
275 275 off = self._offsets
276 276 dateextra = self._text[off[1] + 1 : off[2]]
277 277 fields = dateextra.split(b' ', 2)
278 278 if len(fields) != 3:
279 279 return None
280 280
281 281 return fields[2]
282 282
283 283 @property
284 284 def date(self):
285 285 raw = self._rawdate
286 286 time = float(raw[0])
287 287 # Various tools did silly things with the timezone.
288 288 try:
289 289 timezone = int(raw[1])
290 290 except ValueError:
291 291 timezone = 0
292 292
293 293 return time, timezone
294 294
295 295 @property
296 296 def extra(self):
297 297 raw = self._rawextra
298 298 if raw is None:
299 299 return _defaultextra
300 300
301 301 return decodeextra(raw)
302 302
303 303 @property
304 304 def files(self):
305 305 off = self._offsets
306 306 if off[2] == off[3]:
307 307 return []
308 308
309 309 return self._text[off[2] + 1 : off[3]].split(b'\n')
310 310
311 311 @property
312 312 def filesadded(self):
313 313 if self._cpsd:
314 314 rawindices = self._sidedata.get(sidedatamod.SD_FILESADDED)
315 315 if not rawindices:
316 316 return []
317 317 else:
318 318 rawindices = self.extra.get(b'filesadded')
319 319 if rawindices is None:
320 320 return None
321 321 return metadata.decodefileindices(self.files, rawindices)
322 322
323 323 @property
324 324 def filesremoved(self):
325 325 if self._cpsd:
326 326 rawindices = self._sidedata.get(sidedatamod.SD_FILESREMOVED)
327 327 if not rawindices:
328 328 return []
329 329 else:
330 330 rawindices = self.extra.get(b'filesremoved')
331 331 if rawindices is None:
332 332 return None
333 333 return metadata.decodefileindices(self.files, rawindices)
334 334
335 335 @property
336 336 def p1copies(self):
337 337 if self._cpsd:
338 338 rawcopies = self._sidedata.get(sidedatamod.SD_P1COPIES)
339 339 if not rawcopies:
340 340 return {}
341 341 else:
342 342 rawcopies = self.extra.get(b'p1copies')
343 343 if rawcopies is None:
344 344 return None
345 345 return metadata.decodecopies(self.files, rawcopies)
346 346
347 347 @property
348 348 def p2copies(self):
349 349 if self._cpsd:
350 350 rawcopies = self._sidedata.get(sidedatamod.SD_P2COPIES)
351 351 if not rawcopies:
352 352 return {}
353 353 else:
354 354 rawcopies = self.extra.get(b'p2copies')
355 355 if rawcopies is None:
356 356 return None
357 357 return metadata.decodecopies(self.files, rawcopies)
358 358
359 359 @property
360 360 def description(self):
361 361 return encoding.tolocal(self._text[self._offsets[3] + 2 :])
362 362
363 363
364 364 class changelog(revlog.revlog):
365 365 def __init__(self, opener, trypending=False):
366 366 """Load a changelog revlog using an opener.
367 367
368 368 If ``trypending`` is true, we attempt to load the index from a
369 369 ``00changelog.i.a`` file instead of the default ``00changelog.i``.
370 370 The ``00changelog.i.a`` file contains index (and possibly inline
371 371 revision) data for a transaction that hasn't been finalized yet.
372 372 It exists in a separate file to facilitate readers (such as
373 373 hooks processes) accessing data before a transaction is finalized.
374 374 """
375 375 if trypending and opener.exists(b'00changelog.i.a'):
376 376 indexfile = b'00changelog.i.a'
377 377 else:
378 378 indexfile = b'00changelog.i'
379 379
380 380 datafile = b'00changelog.d'
381 381 revlog.revlog.__init__(
382 382 self,
383 383 opener,
384 384 indexfile,
385 385 datafile=datafile,
386 386 checkambig=True,
387 387 mmaplargeindex=True,
388 388 persistentnodemap=opener.options.get(b'persistent-nodemap', False),
389 389 )
390 390
391 391 if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
392 392 # changelogs don't benefit from generaldelta.
393 393
394 394 self.version &= ~revlog.FLAG_GENERALDELTA
395 395 self._generaldelta = False
396 396
397 397 # Delta chains for changelogs tend to be very small because entries
398 398 # tend to be small and don't delta well with each. So disable delta
399 399 # chains.
400 400 self._storedeltachains = False
401 401
402 402 self._realopener = opener
403 403 self._delayed = False
404 404 self._delaybuf = None
405 405 self._divert = False
406 self.filteredrevs = frozenset()
406 self._filteredrevs = frozenset()
407 self._filteredrevs_hashcache = {}
407 408 self._copiesstorage = opener.options.get(b'copies-storage')
408 409
410 @property
411 def filteredrevs(self):
412 return self._filteredrevs
413
414 @filteredrevs.setter
415 def filteredrevs(self, val):
416 # Ensure all updates go through this function
417 assert isinstance(val, frozenset)
418 self._filteredrevs = val
419 self._filteredrevs_hashcache = {}
420
409 421 def delayupdate(self, tr):
410 422 """delay visibility of index updates to other readers"""
411 423
412 424 if not self._delayed:
413 425 if len(self) == 0:
414 426 self._divert = True
415 427 if self._realopener.exists(self.indexfile + b'.a'):
416 428 self._realopener.unlink(self.indexfile + b'.a')
417 429 self.opener = _divertopener(self._realopener, self.indexfile)
418 430 else:
419 431 self._delaybuf = []
420 432 self.opener = _delayopener(
421 433 self._realopener, self.indexfile, self._delaybuf
422 434 )
423 435 self._delayed = True
424 436 tr.addpending(b'cl-%i' % id(self), self._writepending)
425 437 tr.addfinalize(b'cl-%i' % id(self), self._finalize)
426 438
427 439 def _finalize(self, tr):
428 440 """finalize index updates"""
429 441 self._delayed = False
430 442 self.opener = self._realopener
431 443 # move redirected index data back into place
432 444 if self._divert:
433 445 assert not self._delaybuf
434 446 tmpname = self.indexfile + b".a"
435 447 nfile = self.opener.open(tmpname)
436 448 nfile.close()
437 449 self.opener.rename(tmpname, self.indexfile, checkambig=True)
438 450 elif self._delaybuf:
439 451 fp = self.opener(self.indexfile, b'a', checkambig=True)
440 452 fp.write(b"".join(self._delaybuf))
441 453 fp.close()
442 454 self._delaybuf = None
443 455 self._divert = False
444 456 # split when we're done
445 457 self._enforceinlinesize(tr)
446 458
447 459 def _writepending(self, tr):
448 460 """create a file containing the unfinalized state for
449 461 pretxnchangegroup"""
450 462 if self._delaybuf:
451 463 # make a temporary copy of the index
452 464 fp1 = self._realopener(self.indexfile)
453 465 pendingfilename = self.indexfile + b".a"
454 466 # register as a temp file to ensure cleanup on failure
455 467 tr.registertmp(pendingfilename)
456 468 # write existing data
457 469 fp2 = self._realopener(pendingfilename, b"w")
458 470 fp2.write(fp1.read())
459 471 # add pending data
460 472 fp2.write(b"".join(self._delaybuf))
461 473 fp2.close()
462 474 # switch modes so finalize can simply rename
463 475 self._delaybuf = None
464 476 self._divert = True
465 477 self.opener = _divertopener(self._realopener, self.indexfile)
466 478
467 479 if self._divert:
468 480 return True
469 481
470 482 return False
471 483
472 484 def _enforceinlinesize(self, tr, fp=None):
473 485 if not self._delayed:
474 486 revlog.revlog._enforceinlinesize(self, tr, fp)
475 487
476 488 def read(self, node):
477 489 """Obtain data from a parsed changelog revision.
478 490
479 491 Returns a 6-tuple of:
480 492
481 493 - manifest node in binary
482 494 - author/user as a localstr
483 495 - date as a 2-tuple of (time, timezone)
484 496 - list of files
485 497 - commit message as a localstr
486 498 - dict of extra metadata
487 499
488 500 Unless you need to access all fields, consider calling
489 501 ``changelogrevision`` instead, as it is faster for partial object
490 502 access.
491 503 """
492 504 d, s = self._revisiondata(node)
493 505 c = changelogrevision(
494 506 d, s, self._copiesstorage == b'changeset-sidedata'
495 507 )
496 508 return (c.manifest, c.user, c.date, c.files, c.description, c.extra)
497 509
498 510 def changelogrevision(self, nodeorrev):
499 511 """Obtain a ``changelogrevision`` for a node or revision."""
500 512 text, sidedata = self._revisiondata(nodeorrev)
501 513 return changelogrevision(
502 514 text, sidedata, self._copiesstorage == b'changeset-sidedata'
503 515 )
504 516
505 517 def readfiles(self, node):
506 518 """
507 519 short version of read that only returns the files modified by the cset
508 520 """
509 521 text = self.revision(node)
510 522 if not text:
511 523 return []
512 524 last = text.index(b"\n\n")
513 525 l = text[:last].split(b'\n')
514 526 return l[3:]
515 527
516 528 def add(
517 529 self,
518 530 manifest,
519 531 files,
520 532 desc,
521 533 transaction,
522 534 p1,
523 535 p2,
524 536 user,
525 537 date=None,
526 538 extra=None,
527 539 ):
528 540 # Convert to UTF-8 encoded bytestrings as the very first
529 541 # thing: calling any method on a localstr object will turn it
530 542 # into a str object and the cached UTF-8 string is thus lost.
531 543 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
532 544
533 545 user = user.strip()
534 546 # An empty username or a username with a "\n" will make the
535 547 # revision text contain two "\n\n" sequences -> corrupt
536 548 # repository since read cannot unpack the revision.
537 549 if not user:
538 550 raise error.StorageError(_(b"empty username"))
539 551 if b"\n" in user:
540 552 raise error.StorageError(
541 553 _(b"username %r contains a newline") % pycompat.bytestr(user)
542 554 )
543 555
544 556 desc = stripdesc(desc)
545 557
546 558 if date:
547 559 parseddate = b"%d %d" % dateutil.parsedate(date)
548 560 else:
549 561 parseddate = b"%d %d" % dateutil.makedate()
550 562 if extra:
551 563 branch = extra.get(b"branch")
552 564 if branch in (b"default", b""):
553 565 del extra[b"branch"]
554 566 elif branch in (b".", b"null", b"tip"):
555 567 raise error.StorageError(
556 568 _(b'the name \'%s\' is reserved') % branch
557 569 )
558 570 sortedfiles = sorted(files.touched)
559 571 sidedata = None
560 572 if self._copiesstorage == b'changeset-sidedata':
561 573 sidedata = metadata.encode_copies_sidedata(files)
562 574
563 575 if extra:
564 576 extra = encodeextra(extra)
565 577 parseddate = b"%s %s" % (parseddate, extra)
566 578 l = [hex(manifest), user, parseddate] + sortedfiles + [b"", desc]
567 579 text = b"\n".join(l)
568 580 return self.addrevision(
569 581 text, transaction, len(self), p1, p2, sidedata=sidedata
570 582 )
571 583
572 584 def branchinfo(self, rev):
573 585 """return the branch name and open/close state of a revision
574 586
575 587 This function exists because creating a changectx object
576 588 just to access this is costly."""
577 589 extra = self.read(rev)[5]
578 590 return encoding.tolocal(extra.get(b"branch")), b'close' in extra
579 591
580 592 def _nodeduplicatecallback(self, transaction, node):
581 593 # keep track of revisions that got "re-added", eg: unbunde of know rev.
582 594 #
583 595 # We track them in a list to preserve their order from the source bundle
584 596 duplicates = transaction.changes.setdefault(b'revduplicates', [])
585 597 duplicates.append(self.rev(node))
@@ -1,2254 +1,2256 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirrev,
27 27 )
28 28 from .pycompat import getattr
29 29 from .thirdparty import attr
30 30 from . import (
31 31 copies as copiesmod,
32 32 encoding,
33 33 error,
34 34 match as matchmod,
35 35 obsolete,
36 36 obsutil,
37 37 pathutil,
38 38 phases,
39 39 policy,
40 40 pycompat,
41 41 requirements as requirementsmod,
42 42 revsetlang,
43 43 similar,
44 44 smartset,
45 45 url,
46 46 util,
47 47 vfs,
48 48 )
49 49
50 50 from .utils import (
51 51 hashutil,
52 52 procutil,
53 53 stringutil,
54 54 )
55 55
56 56 if pycompat.iswindows:
57 57 from . import scmwindows as scmplatform
58 58 else:
59 59 from . import scmposix as scmplatform
60 60
61 61 parsers = policy.importmod('parsers')
62 62 rustrevlog = policy.importrust('revlog')
63 63
64 64 termsize = scmplatform.termsize
65 65
66 66
67 67 @attr.s(slots=True, repr=False)
68 68 class status(object):
69 69 '''Struct with a list of files per status.
70 70
71 71 The 'deleted', 'unknown' and 'ignored' properties are only
72 72 relevant to the working copy.
73 73 '''
74 74
75 75 modified = attr.ib(default=attr.Factory(list))
76 76 added = attr.ib(default=attr.Factory(list))
77 77 removed = attr.ib(default=attr.Factory(list))
78 78 deleted = attr.ib(default=attr.Factory(list))
79 79 unknown = attr.ib(default=attr.Factory(list))
80 80 ignored = attr.ib(default=attr.Factory(list))
81 81 clean = attr.ib(default=attr.Factory(list))
82 82
83 83 def __iter__(self):
84 84 yield self.modified
85 85 yield self.added
86 86 yield self.removed
87 87 yield self.deleted
88 88 yield self.unknown
89 89 yield self.ignored
90 90 yield self.clean
91 91
92 92 def __repr__(self):
93 93 return (
94 94 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
95 95 r'unknown=%s, ignored=%s, clean=%s>'
96 96 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
97 97
98 98
99 99 def itersubrepos(ctx1, ctx2):
100 100 """find subrepos in ctx1 or ctx2"""
101 101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106 106
107 107 missing = set()
108 108
109 109 for subpath in ctx2.substate:
110 110 if subpath not in ctx1.substate:
111 111 del subpaths[subpath]
112 112 missing.add(subpath)
113 113
114 114 for subpath, ctx in sorted(pycompat.iteritems(subpaths)):
115 115 yield subpath, ctx.sub(subpath)
116 116
117 117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 118 # status and diff will have an accurate result when it does
119 119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 120 # against itself.
121 121 for subpath in missing:
122 122 yield subpath, ctx2.nullsub(subpath, ctx1)
123 123
124 124
125 125 def nochangesfound(ui, repo, excluded=None):
126 126 '''Report no changes for push/pull, excluded is None or a list of
127 127 nodes excluded from the push/pull.
128 128 '''
129 129 secretlist = []
130 130 if excluded:
131 131 for n in excluded:
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(
138 138 _(b"no changes found (ignored %d secret changesets)\n")
139 139 % len(secretlist)
140 140 )
141 141 else:
142 142 ui.status(_(b"no changes found\n"))
143 143
144 144
145 145 def callcatch(ui, func):
146 146 """call func() with global exception handling
147 147
148 148 return func() if no exception happens. otherwise do some error handling
149 149 and return an exit code accordingly. does not handle all exceptions.
150 150 """
151 151 try:
152 152 try:
153 153 return func()
154 154 except: # re-raises
155 155 ui.traceback()
156 156 raise
157 157 # Global exception handling, alphabetically
158 158 # Mercurial-specific first, followed by built-in and library exceptions
159 159 except error.LockHeld as inst:
160 160 if inst.errno == errno.ETIMEDOUT:
161 161 reason = _(b'timed out waiting for lock held by %r') % (
162 162 pycompat.bytestr(inst.locker)
163 163 )
164 164 else:
165 165 reason = _(b'lock held by %r') % inst.locker
166 166 ui.error(
167 167 _(b"abort: %s: %s\n")
168 168 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
169 169 )
170 170 if not inst.locker:
171 171 ui.error(_(b"(lock might be very busy)\n"))
172 172 except error.LockUnavailable as inst:
173 173 ui.error(
174 174 _(b"abort: could not lock %s: %s\n")
175 175 % (
176 176 inst.desc or stringutil.forcebytestr(inst.filename),
177 177 encoding.strtolocal(inst.strerror),
178 178 )
179 179 )
180 180 except error.OutOfBandError as inst:
181 181 if inst.args:
182 182 msg = _(b"abort: remote error:\n")
183 183 else:
184 184 msg = _(b"abort: remote error\n")
185 185 ui.error(msg)
186 186 if inst.args:
187 187 ui.error(b''.join(inst.args))
188 188 if inst.hint:
189 189 ui.error(b'(%s)\n' % inst.hint)
190 190 except error.RepoError as inst:
191 191 ui.error(_(b"abort: %s!\n") % inst)
192 192 if inst.hint:
193 193 ui.error(_(b"(%s)\n") % inst.hint)
194 194 except error.ResponseError as inst:
195 195 ui.error(_(b"abort: %s") % inst.args[0])
196 196 msg = inst.args[1]
197 197 if isinstance(msg, type(u'')):
198 198 msg = pycompat.sysbytes(msg)
199 199 if not isinstance(msg, bytes):
200 200 ui.error(b" %r\n" % (msg,))
201 201 elif not msg:
202 202 ui.error(_(b" empty string\n"))
203 203 else:
204 204 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
205 205 except error.CensoredNodeError as inst:
206 206 ui.error(_(b"abort: file censored %s!\n") % inst)
207 207 except error.StorageError as inst:
208 208 ui.error(_(b"abort: %s!\n") % inst)
209 209 if inst.hint:
210 210 ui.error(_(b"(%s)\n") % inst.hint)
211 211 except error.InterventionRequired as inst:
212 212 ui.error(b"%s\n" % inst)
213 213 if inst.hint:
214 214 ui.error(_(b"(%s)\n") % inst.hint)
215 215 return 1
216 216 except error.WdirUnsupported:
217 217 ui.error(_(b"abort: working directory revision cannot be specified\n"))
218 218 except error.Abort as inst:
219 219 ui.error(_(b"abort: %s\n") % inst)
220 220 if inst.hint:
221 221 ui.error(_(b"(%s)\n") % inst.hint)
222 222 except ImportError as inst:
223 223 ui.error(_(b"abort: %s!\n") % stringutil.forcebytestr(inst))
224 224 m = stringutil.forcebytestr(inst).split()[-1]
225 225 if m in b"mpatch bdiff".split():
226 226 ui.error(_(b"(did you forget to compile extensions?)\n"))
227 227 elif m in b"zlib".split():
228 228 ui.error(_(b"(is your Python install correct?)\n"))
229 229 except (IOError, OSError) as inst:
230 230 if util.safehasattr(inst, b"code"): # HTTPError
231 231 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
232 232 elif util.safehasattr(inst, b"reason"): # URLError or SSLError
233 233 try: # usually it is in the form (errno, strerror)
234 234 reason = inst.reason.args[1]
235 235 except (AttributeError, IndexError):
236 236 # it might be anything, for example a string
237 237 reason = inst.reason
238 238 if isinstance(reason, pycompat.unicode):
239 239 # SSLError of Python 2.7.9 contains a unicode
240 240 reason = encoding.unitolocal(reason)
241 241 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
242 242 elif (
243 243 util.safehasattr(inst, b"args")
244 244 and inst.args
245 245 and inst.args[0] == errno.EPIPE
246 246 ):
247 247 pass
248 248 elif getattr(inst, "strerror", None): # common IOError or OSError
249 249 if getattr(inst, "filename", None) is not None:
250 250 ui.error(
251 251 _(b"abort: %s: '%s'\n")
252 252 % (
253 253 encoding.strtolocal(inst.strerror),
254 254 stringutil.forcebytestr(inst.filename),
255 255 )
256 256 )
257 257 else:
258 258 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
259 259 else: # suspicious IOError
260 260 raise
261 261 except MemoryError:
262 262 ui.error(_(b"abort: out of memory\n"))
263 263 except SystemExit as inst:
264 264 # Commands shouldn't sys.exit directly, but give a return code.
265 265 # Just in case catch this and and pass exit code to caller.
266 266 return inst.code
267 267
268 268 return -1
269 269
270 270
271 271 def checknewlabel(repo, lbl, kind):
272 272 # Do not use the "kind" parameter in ui output.
273 273 # It makes strings difficult to translate.
274 274 if lbl in [b'tip', b'.', b'null']:
275 275 raise error.Abort(_(b"the name '%s' is reserved") % lbl)
276 276 for c in (b':', b'\0', b'\n', b'\r'):
277 277 if c in lbl:
278 278 raise error.Abort(
279 279 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
280 280 )
281 281 try:
282 282 int(lbl)
283 283 raise error.Abort(_(b"cannot use an integer as a name"))
284 284 except ValueError:
285 285 pass
286 286 if lbl.strip() != lbl:
287 287 raise error.Abort(_(b"leading or trailing whitespace in name %r") % lbl)
288 288
289 289
290 290 def checkfilename(f):
291 291 '''Check that the filename f is an acceptable filename for a tracked file'''
292 292 if b'\r' in f or b'\n' in f:
293 293 raise error.Abort(
294 294 _(b"'\\n' and '\\r' disallowed in filenames: %r")
295 295 % pycompat.bytestr(f)
296 296 )
297 297
298 298
299 299 def checkportable(ui, f):
300 300 '''Check if filename f is portable and warn or abort depending on config'''
301 301 checkfilename(f)
302 302 abort, warn = checkportabilityalert(ui)
303 303 if abort or warn:
304 304 msg = util.checkwinfilename(f)
305 305 if msg:
306 306 msg = b"%s: %s" % (msg, procutil.shellquote(f))
307 307 if abort:
308 308 raise error.Abort(msg)
309 309 ui.warn(_(b"warning: %s\n") % msg)
310 310
311 311
312 312 def checkportabilityalert(ui):
313 313 '''check if the user's config requests nothing, a warning, or abort for
314 314 non-portable filenames'''
315 315 val = ui.config(b'ui', b'portablefilenames')
316 316 lval = val.lower()
317 317 bval = stringutil.parsebool(val)
318 318 abort = pycompat.iswindows or lval == b'abort'
319 319 warn = bval or lval == b'warn'
320 320 if bval is None and not (warn or abort or lval == b'ignore'):
321 321 raise error.ConfigError(
322 322 _(b"ui.portablefilenames value is invalid ('%s')") % val
323 323 )
324 324 return abort, warn
325 325
326 326
327 327 class casecollisionauditor(object):
328 328 def __init__(self, ui, abort, dirstate):
329 329 self._ui = ui
330 330 self._abort = abort
331 331 allfiles = b'\0'.join(dirstate)
332 332 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
333 333 self._dirstate = dirstate
334 334 # The purpose of _newfiles is so that we don't complain about
335 335 # case collisions if someone were to call this object with the
336 336 # same filename twice.
337 337 self._newfiles = set()
338 338
339 339 def __call__(self, f):
340 340 if f in self._newfiles:
341 341 return
342 342 fl = encoding.lower(f)
343 343 if fl in self._loweredfiles and f not in self._dirstate:
344 344 msg = _(b'possible case-folding collision for %s') % f
345 345 if self._abort:
346 346 raise error.Abort(msg)
347 347 self._ui.warn(_(b"warning: %s\n") % msg)
348 348 self._loweredfiles.add(fl)
349 349 self._newfiles.add(f)
350 350
351 351
352 352 def filteredhash(repo, maxrev):
353 353 """build hash of filtered revisions in the current repoview.
354 354
355 355 Multiple caches perform up-to-date validation by checking that the
356 356 tiprev and tipnode stored in the cache file match the current repository.
357 357 However, this is not sufficient for validating repoviews because the set
358 358 of revisions in the view may change without the repository tiprev and
359 359 tipnode changing.
360 360
361 361 This function hashes all the revs filtered from the view and returns
362 362 that SHA-1 digest.
363 363 """
364 364 cl = repo.changelog
365 365 if not cl.filteredrevs:
366 366 return None
367 key = None
368 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
369 if revs:
370 s = hashutil.sha1()
371 for rev in revs:
372 s.update(b'%d;' % rev)
373 key = s.digest()
367 key = cl._filteredrevs_hashcache.get(maxrev)
368 if not key:
369 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
370 if revs:
371 s = hashutil.sha1()
372 for rev in revs:
373 s.update(b'%d;' % rev)
374 key = s.digest()
375 cl._filteredrevs_hashcache[maxrev] = key
374 376 return key
375 377
376 378
377 379 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
378 380 '''yield every hg repository under path, always recursively.
379 381 The recurse flag will only control recursion into repo working dirs'''
380 382
381 383 def errhandler(err):
382 384 if err.filename == path:
383 385 raise err
384 386
385 387 samestat = getattr(os.path, 'samestat', None)
386 388 if followsym and samestat is not None:
387 389
388 390 def adddir(dirlst, dirname):
389 391 dirstat = os.stat(dirname)
390 392 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
391 393 if not match:
392 394 dirlst.append(dirstat)
393 395 return not match
394 396
395 397 else:
396 398 followsym = False
397 399
398 400 if (seen_dirs is None) and followsym:
399 401 seen_dirs = []
400 402 adddir(seen_dirs, path)
401 403 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
402 404 dirs.sort()
403 405 if b'.hg' in dirs:
404 406 yield root # found a repository
405 407 qroot = os.path.join(root, b'.hg', b'patches')
406 408 if os.path.isdir(os.path.join(qroot, b'.hg')):
407 409 yield qroot # we have a patch queue repo here
408 410 if recurse:
409 411 # avoid recursing inside the .hg directory
410 412 dirs.remove(b'.hg')
411 413 else:
412 414 dirs[:] = [] # don't descend further
413 415 elif followsym:
414 416 newdirs = []
415 417 for d in dirs:
416 418 fname = os.path.join(root, d)
417 419 if adddir(seen_dirs, fname):
418 420 if os.path.islink(fname):
419 421 for hgname in walkrepos(fname, True, seen_dirs):
420 422 yield hgname
421 423 else:
422 424 newdirs.append(d)
423 425 dirs[:] = newdirs
424 426
425 427
426 428 def binnode(ctx):
427 429 """Return binary node id for a given basectx"""
428 430 node = ctx.node()
429 431 if node is None:
430 432 return wdirid
431 433 return node
432 434
433 435
434 436 def intrev(ctx):
435 437 """Return integer for a given basectx that can be used in comparison or
436 438 arithmetic operation"""
437 439 rev = ctx.rev()
438 440 if rev is None:
439 441 return wdirrev
440 442 return rev
441 443
442 444
443 445 def formatchangeid(ctx):
444 446 """Format changectx as '{rev}:{node|formatnode}', which is the default
445 447 template provided by logcmdutil.changesettemplater"""
446 448 repo = ctx.repo()
447 449 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
448 450
449 451
450 452 def formatrevnode(ui, rev, node):
451 453 """Format given revision and node depending on the current verbosity"""
452 454 if ui.debugflag:
453 455 hexfunc = hex
454 456 else:
455 457 hexfunc = short
456 458 return b'%d:%s' % (rev, hexfunc(node))
457 459
458 460
459 461 def resolvehexnodeidprefix(repo, prefix):
460 462 if prefix.startswith(b'x'):
461 463 prefix = prefix[1:]
462 464 try:
463 465 # Uses unfiltered repo because it's faster when prefix is ambiguous/
464 466 # This matches the shortesthexnodeidprefix() function below.
465 467 node = repo.unfiltered().changelog._partialmatch(prefix)
466 468 except error.AmbiguousPrefixLookupError:
467 469 revset = repo.ui.config(
468 470 b'experimental', b'revisions.disambiguatewithin'
469 471 )
470 472 if revset:
471 473 # Clear config to avoid infinite recursion
472 474 configoverrides = {
473 475 (b'experimental', b'revisions.disambiguatewithin'): None
474 476 }
475 477 with repo.ui.configoverride(configoverrides):
476 478 revs = repo.anyrevs([revset], user=True)
477 479 matches = []
478 480 for rev in revs:
479 481 node = repo.changelog.node(rev)
480 482 if hex(node).startswith(prefix):
481 483 matches.append(node)
482 484 if len(matches) == 1:
483 485 return matches[0]
484 486 raise
485 487 if node is None:
486 488 return
487 489 repo.changelog.rev(node) # make sure node isn't filtered
488 490 return node
489 491
490 492
491 493 def mayberevnum(repo, prefix):
492 494 """Checks if the given prefix may be mistaken for a revision number"""
493 495 try:
494 496 i = int(prefix)
495 497 # if we are a pure int, then starting with zero will not be
496 498 # confused as a rev; or, obviously, if the int is larger
497 499 # than the value of the tip rev. We still need to disambiguate if
498 500 # prefix == '0', since that *is* a valid revnum.
499 501 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
500 502 return False
501 503 return True
502 504 except ValueError:
503 505 return False
504 506
505 507
506 508 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
507 509 """Find the shortest unambiguous prefix that matches hexnode.
508 510
509 511 If "cache" is not None, it must be a dictionary that can be used for
510 512 caching between calls to this method.
511 513 """
512 514 # _partialmatch() of filtered changelog could take O(len(repo)) time,
513 515 # which would be unacceptably slow. so we look for hash collision in
514 516 # unfiltered space, which means some hashes may be slightly longer.
515 517
516 518 minlength = max(minlength, 1)
517 519
518 520 def disambiguate(prefix):
519 521 """Disambiguate against revnums."""
520 522 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
521 523 if mayberevnum(repo, prefix):
522 524 return b'x' + prefix
523 525 else:
524 526 return prefix
525 527
526 528 hexnode = hex(node)
527 529 for length in range(len(prefix), len(hexnode) + 1):
528 530 prefix = hexnode[:length]
529 531 if not mayberevnum(repo, prefix):
530 532 return prefix
531 533
532 534 cl = repo.unfiltered().changelog
533 535 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
534 536 if revset:
535 537 revs = None
536 538 if cache is not None:
537 539 revs = cache.get(b'disambiguationrevset')
538 540 if revs is None:
539 541 revs = repo.anyrevs([revset], user=True)
540 542 if cache is not None:
541 543 cache[b'disambiguationrevset'] = revs
542 544 if cl.rev(node) in revs:
543 545 hexnode = hex(node)
544 546 nodetree = None
545 547 if cache is not None:
546 548 nodetree = cache.get(b'disambiguationnodetree')
547 549 if not nodetree:
548 550 if util.safehasattr(parsers, 'nodetree'):
549 551 # The CExt is the only implementation to provide a nodetree
550 552 # class so far.
551 553 index = cl.index
552 554 if util.safehasattr(index, 'get_cindex'):
553 555 # the rust wrapped need to give access to its internal index
554 556 index = index.get_cindex()
555 557 nodetree = parsers.nodetree(index, len(revs))
556 558 for r in revs:
557 559 nodetree.insert(r)
558 560 if cache is not None:
559 561 cache[b'disambiguationnodetree'] = nodetree
560 562 if nodetree is not None:
561 563 length = max(nodetree.shortest(node), minlength)
562 564 prefix = hexnode[:length]
563 565 return disambiguate(prefix)
564 566 for length in range(minlength, len(hexnode) + 1):
565 567 matches = []
566 568 prefix = hexnode[:length]
567 569 for rev in revs:
568 570 otherhexnode = repo[rev].hex()
569 571 if prefix == otherhexnode[:length]:
570 572 matches.append(otherhexnode)
571 573 if len(matches) == 1:
572 574 return disambiguate(prefix)
573 575
574 576 try:
575 577 return disambiguate(cl.shortest(node, minlength))
576 578 except error.LookupError:
577 579 raise error.RepoLookupError()
578 580
579 581
580 582 def isrevsymbol(repo, symbol):
581 583 """Checks if a symbol exists in the repo.
582 584
583 585 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
584 586 symbol is an ambiguous nodeid prefix.
585 587 """
586 588 try:
587 589 revsymbol(repo, symbol)
588 590 return True
589 591 except error.RepoLookupError:
590 592 return False
591 593
592 594
593 595 def revsymbol(repo, symbol):
594 596 """Returns a context given a single revision symbol (as string).
595 597
596 598 This is similar to revsingle(), but accepts only a single revision symbol,
597 599 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
598 600 not "max(public())".
599 601 """
600 602 if not isinstance(symbol, bytes):
601 603 msg = (
602 604 b"symbol (%s of type %s) was not a string, did you mean "
603 605 b"repo[symbol]?" % (symbol, type(symbol))
604 606 )
605 607 raise error.ProgrammingError(msg)
606 608 try:
607 609 if symbol in (b'.', b'tip', b'null'):
608 610 return repo[symbol]
609 611
610 612 try:
611 613 r = int(symbol)
612 614 if b'%d' % r != symbol:
613 615 raise ValueError
614 616 l = len(repo.changelog)
615 617 if r < 0:
616 618 r += l
617 619 if r < 0 or r >= l and r != wdirrev:
618 620 raise ValueError
619 621 return repo[r]
620 622 except error.FilteredIndexError:
621 623 raise
622 624 except (ValueError, OverflowError, IndexError):
623 625 pass
624 626
625 627 if len(symbol) == 40:
626 628 try:
627 629 node = bin(symbol)
628 630 rev = repo.changelog.rev(node)
629 631 return repo[rev]
630 632 except error.FilteredLookupError:
631 633 raise
632 634 except (TypeError, LookupError):
633 635 pass
634 636
635 637 # look up bookmarks through the name interface
636 638 try:
637 639 node = repo.names.singlenode(repo, symbol)
638 640 rev = repo.changelog.rev(node)
639 641 return repo[rev]
640 642 except KeyError:
641 643 pass
642 644
643 645 node = resolvehexnodeidprefix(repo, symbol)
644 646 if node is not None:
645 647 rev = repo.changelog.rev(node)
646 648 return repo[rev]
647 649
648 650 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
649 651
650 652 except error.WdirUnsupported:
651 653 return repo[None]
652 654 except (
653 655 error.FilteredIndexError,
654 656 error.FilteredLookupError,
655 657 error.FilteredRepoLookupError,
656 658 ):
657 659 raise _filterederror(repo, symbol)
658 660
659 661
660 662 def _filterederror(repo, changeid):
661 663 """build an exception to be raised about a filtered changeid
662 664
663 665 This is extracted in a function to help extensions (eg: evolve) to
664 666 experiment with various message variants."""
665 667 if repo.filtername.startswith(b'visible'):
666 668
667 669 # Check if the changeset is obsolete
668 670 unfilteredrepo = repo.unfiltered()
669 671 ctx = revsymbol(unfilteredrepo, changeid)
670 672
671 673 # If the changeset is obsolete, enrich the message with the reason
672 674 # that made this changeset not visible
673 675 if ctx.obsolete():
674 676 msg = obsutil._getfilteredreason(repo, changeid, ctx)
675 677 else:
676 678 msg = _(b"hidden revision '%s'") % changeid
677 679
678 680 hint = _(b'use --hidden to access hidden revisions')
679 681
680 682 return error.FilteredRepoLookupError(msg, hint=hint)
681 683 msg = _(b"filtered revision '%s' (not in '%s' subset)")
682 684 msg %= (changeid, repo.filtername)
683 685 return error.FilteredRepoLookupError(msg)
684 686
685 687
686 688 def revsingle(repo, revspec, default=b'.', localalias=None):
687 689 if not revspec and revspec != 0:
688 690 return repo[default]
689 691
690 692 l = revrange(repo, [revspec], localalias=localalias)
691 693 if not l:
692 694 raise error.Abort(_(b'empty revision set'))
693 695 return repo[l.last()]
694 696
695 697
696 698 def _pairspec(revspec):
697 699 tree = revsetlang.parse(revspec)
698 700 return tree and tree[0] in (
699 701 b'range',
700 702 b'rangepre',
701 703 b'rangepost',
702 704 b'rangeall',
703 705 )
704 706
705 707
706 708 def revpair(repo, revs):
707 709 if not revs:
708 710 return repo[b'.'], repo[None]
709 711
710 712 l = revrange(repo, revs)
711 713
712 714 if not l:
713 715 raise error.Abort(_(b'empty revision range'))
714 716
715 717 first = l.first()
716 718 second = l.last()
717 719
718 720 if (
719 721 first == second
720 722 and len(revs) >= 2
721 723 and not all(revrange(repo, [r]) for r in revs)
722 724 ):
723 725 raise error.Abort(_(b'empty revision on one side of range'))
724 726
725 727 # if top-level is range expression, the result must always be a pair
726 728 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
727 729 return repo[first], repo[None]
728 730
729 731 return repo[first], repo[second]
730 732
731 733
732 734 def revrange(repo, specs, localalias=None):
733 735 """Execute 1 to many revsets and return the union.
734 736
735 737 This is the preferred mechanism for executing revsets using user-specified
736 738 config options, such as revset aliases.
737 739
738 740 The revsets specified by ``specs`` will be executed via a chained ``OR``
739 741 expression. If ``specs`` is empty, an empty result is returned.
740 742
741 743 ``specs`` can contain integers, in which case they are assumed to be
742 744 revision numbers.
743 745
744 746 It is assumed the revsets are already formatted. If you have arguments
745 747 that need to be expanded in the revset, call ``revsetlang.formatspec()``
746 748 and pass the result as an element of ``specs``.
747 749
748 750 Specifying a single revset is allowed.
749 751
750 752 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
751 753 integer revisions.
752 754 """
753 755 allspecs = []
754 756 for spec in specs:
755 757 if isinstance(spec, int):
756 758 spec = revsetlang.formatspec(b'%d', spec)
757 759 allspecs.append(spec)
758 760 return repo.anyrevs(allspecs, user=True, localalias=localalias)
759 761
760 762
761 763 def meaningfulparents(repo, ctx):
762 764 """Return list of meaningful (or all if debug) parentrevs for rev.
763 765
764 766 For merges (two non-nullrev revisions) both parents are meaningful.
765 767 Otherwise the first parent revision is considered meaningful if it
766 768 is not the preceding revision.
767 769 """
768 770 parents = ctx.parents()
769 771 if len(parents) > 1:
770 772 return parents
771 773 if repo.ui.debugflag:
772 774 return [parents[0], repo[nullrev]]
773 775 if parents[0].rev() >= intrev(ctx) - 1:
774 776 return []
775 777 return parents
776 778
777 779
778 780 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
779 781 """Return a function that produced paths for presenting to the user.
780 782
781 783 The returned function takes a repo-relative path and produces a path
782 784 that can be presented in the UI.
783 785
784 786 Depending on the value of ui.relative-paths, either a repo-relative or
785 787 cwd-relative path will be produced.
786 788
787 789 legacyrelativevalue is the value to use if ui.relative-paths=legacy
788 790
789 791 If forcerelativevalue is not None, then that value will be used regardless
790 792 of what ui.relative-paths is set to.
791 793 """
792 794 if forcerelativevalue is not None:
793 795 relative = forcerelativevalue
794 796 else:
795 797 config = repo.ui.config(b'ui', b'relative-paths')
796 798 if config == b'legacy':
797 799 relative = legacyrelativevalue
798 800 else:
799 801 relative = stringutil.parsebool(config)
800 802 if relative is None:
801 803 raise error.ConfigError(
802 804 _(b"ui.relative-paths is not a boolean ('%s')") % config
803 805 )
804 806
805 807 if relative:
806 808 cwd = repo.getcwd()
807 809 if cwd != b'':
808 810 # this branch would work even if cwd == b'' (ie cwd = repo
809 811 # root), but its generality makes the returned function slower
810 812 pathto = repo.pathto
811 813 return lambda f: pathto(f, cwd)
812 814 if repo.ui.configbool(b'ui', b'slash'):
813 815 return lambda f: f
814 816 else:
815 817 return util.localpath
816 818
817 819
818 820 def subdiruipathfn(subpath, uipathfn):
819 821 '''Create a new uipathfn that treats the file as relative to subpath.'''
820 822 return lambda f: uipathfn(posixpath.join(subpath, f))
821 823
822 824
823 825 def anypats(pats, opts):
824 826 '''Checks if any patterns, including --include and --exclude were given.
825 827
826 828 Some commands (e.g. addremove) use this condition for deciding whether to
827 829 print absolute or relative paths.
828 830 '''
829 831 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
830 832
831 833
832 834 def expandpats(pats):
833 835 '''Expand bare globs when running on windows.
834 836 On posix we assume it already has already been done by sh.'''
835 837 if not util.expandglobs:
836 838 return list(pats)
837 839 ret = []
838 840 for kindpat in pats:
839 841 kind, pat = matchmod._patsplit(kindpat, None)
840 842 if kind is None:
841 843 try:
842 844 globbed = glob.glob(pat)
843 845 except re.error:
844 846 globbed = [pat]
845 847 if globbed:
846 848 ret.extend(globbed)
847 849 continue
848 850 ret.append(kindpat)
849 851 return ret
850 852
851 853
852 854 def matchandpats(
853 855 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
854 856 ):
855 857 '''Return a matcher and the patterns that were used.
856 858 The matcher will warn about bad matches, unless an alternate badfn callback
857 859 is provided.'''
858 860 if opts is None:
859 861 opts = {}
860 862 if not globbed and default == b'relpath':
861 863 pats = expandpats(pats or [])
862 864
863 865 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
864 866
865 867 def bad(f, msg):
866 868 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
867 869
868 870 if badfn is None:
869 871 badfn = bad
870 872
871 873 m = ctx.match(
872 874 pats,
873 875 opts.get(b'include'),
874 876 opts.get(b'exclude'),
875 877 default,
876 878 listsubrepos=opts.get(b'subrepos'),
877 879 badfn=badfn,
878 880 )
879 881
880 882 if m.always():
881 883 pats = []
882 884 return m, pats
883 885
884 886
885 887 def match(
886 888 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
887 889 ):
888 890 '''Return a matcher that will warn about bad matches.'''
889 891 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
890 892
891 893
892 894 def matchall(repo):
893 895 '''Return a matcher that will efficiently match everything.'''
894 896 return matchmod.always()
895 897
896 898
897 899 def matchfiles(repo, files, badfn=None):
898 900 '''Return a matcher that will efficiently match exactly these files.'''
899 901 return matchmod.exact(files, badfn=badfn)
900 902
901 903
902 904 def parsefollowlinespattern(repo, rev, pat, msg):
903 905 """Return a file name from `pat` pattern suitable for usage in followlines
904 906 logic.
905 907 """
906 908 if not matchmod.patkind(pat):
907 909 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
908 910 else:
909 911 ctx = repo[rev]
910 912 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
911 913 files = [f for f in ctx if m(f)]
912 914 if len(files) != 1:
913 915 raise error.ParseError(msg)
914 916 return files[0]
915 917
916 918
917 919 def getorigvfs(ui, repo):
918 920 """return a vfs suitable to save 'orig' file
919 921
920 922 return None if no special directory is configured"""
921 923 origbackuppath = ui.config(b'ui', b'origbackuppath')
922 924 if not origbackuppath:
923 925 return None
924 926 return vfs.vfs(repo.wvfs.join(origbackuppath))
925 927
926 928
927 929 def backuppath(ui, repo, filepath):
928 930 '''customize where working copy backup files (.orig files) are created
929 931
930 932 Fetch user defined path from config file: [ui] origbackuppath = <path>
931 933 Fall back to default (filepath with .orig suffix) if not specified
932 934
933 935 filepath is repo-relative
934 936
935 937 Returns an absolute path
936 938 '''
937 939 origvfs = getorigvfs(ui, repo)
938 940 if origvfs is None:
939 941 return repo.wjoin(filepath + b".orig")
940 942
941 943 origbackupdir = origvfs.dirname(filepath)
942 944 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
943 945 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
944 946
945 947 # Remove any files that conflict with the backup file's path
946 948 for f in reversed(list(pathutil.finddirs(filepath))):
947 949 if origvfs.isfileorlink(f):
948 950 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
949 951 origvfs.unlink(f)
950 952 break
951 953
952 954 origvfs.makedirs(origbackupdir)
953 955
954 956 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
955 957 ui.note(
956 958 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
957 959 )
958 960 origvfs.rmtree(filepath, forcibly=True)
959 961
960 962 return origvfs.join(filepath)
961 963
962 964
963 965 class _containsnode(object):
964 966 """proxy __contains__(node) to container.__contains__ which accepts revs"""
965 967
966 968 def __init__(self, repo, revcontainer):
967 969 self._torev = repo.changelog.rev
968 970 self._revcontains = revcontainer.__contains__
969 971
970 972 def __contains__(self, node):
971 973 return self._revcontains(self._torev(node))
972 974
973 975
974 976 def cleanupnodes(
975 977 repo,
976 978 replacements,
977 979 operation,
978 980 moves=None,
979 981 metadata=None,
980 982 fixphase=False,
981 983 targetphase=None,
982 984 backup=True,
983 985 ):
984 986 """do common cleanups when old nodes are replaced by new nodes
985 987
986 988 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
987 989 (we might also want to move working directory parent in the future)
988 990
989 991 By default, bookmark moves are calculated automatically from 'replacements',
990 992 but 'moves' can be used to override that. Also, 'moves' may include
991 993 additional bookmark moves that should not have associated obsmarkers.
992 994
993 995 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
994 996 have replacements. operation is a string, like "rebase".
995 997
996 998 metadata is dictionary containing metadata to be stored in obsmarker if
997 999 obsolescence is enabled.
998 1000 """
999 1001 assert fixphase or targetphase is None
1000 1002 if not replacements and not moves:
1001 1003 return
1002 1004
1003 1005 # translate mapping's other forms
1004 1006 if not util.safehasattr(replacements, b'items'):
1005 1007 replacements = {(n,): () for n in replacements}
1006 1008 else:
1007 1009 # upgrading non tuple "source" to tuple ones for BC
1008 1010 repls = {}
1009 1011 for key, value in replacements.items():
1010 1012 if not isinstance(key, tuple):
1011 1013 key = (key,)
1012 1014 repls[key] = value
1013 1015 replacements = repls
1014 1016
1015 1017 # Unfiltered repo is needed since nodes in replacements might be hidden.
1016 1018 unfi = repo.unfiltered()
1017 1019
1018 1020 # Calculate bookmark movements
1019 1021 if moves is None:
1020 1022 moves = {}
1021 1023 for oldnodes, newnodes in replacements.items():
1022 1024 for oldnode in oldnodes:
1023 1025 if oldnode in moves:
1024 1026 continue
1025 1027 if len(newnodes) > 1:
1026 1028 # usually a split, take the one with biggest rev number
1027 1029 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1028 1030 elif len(newnodes) == 0:
1029 1031 # move bookmark backwards
1030 1032 allreplaced = []
1031 1033 for rep in replacements:
1032 1034 allreplaced.extend(rep)
1033 1035 roots = list(
1034 1036 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1035 1037 )
1036 1038 if roots:
1037 1039 newnode = roots[0].node()
1038 1040 else:
1039 1041 newnode = nullid
1040 1042 else:
1041 1043 newnode = newnodes[0]
1042 1044 moves[oldnode] = newnode
1043 1045
1044 1046 allnewnodes = [n for ns in replacements.values() for n in ns]
1045 1047 toretract = {}
1046 1048 toadvance = {}
1047 1049 if fixphase:
1048 1050 precursors = {}
1049 1051 for oldnodes, newnodes in replacements.items():
1050 1052 for oldnode in oldnodes:
1051 1053 for newnode in newnodes:
1052 1054 precursors.setdefault(newnode, []).append(oldnode)
1053 1055
1054 1056 allnewnodes.sort(key=lambda n: unfi[n].rev())
1055 1057 newphases = {}
1056 1058
1057 1059 def phase(ctx):
1058 1060 return newphases.get(ctx.node(), ctx.phase())
1059 1061
1060 1062 for newnode in allnewnodes:
1061 1063 ctx = unfi[newnode]
1062 1064 parentphase = max(phase(p) for p in ctx.parents())
1063 1065 if targetphase is None:
1064 1066 oldphase = max(
1065 1067 unfi[oldnode].phase() for oldnode in precursors[newnode]
1066 1068 )
1067 1069 newphase = max(oldphase, parentphase)
1068 1070 else:
1069 1071 newphase = max(targetphase, parentphase)
1070 1072 newphases[newnode] = newphase
1071 1073 if newphase > ctx.phase():
1072 1074 toretract.setdefault(newphase, []).append(newnode)
1073 1075 elif newphase < ctx.phase():
1074 1076 toadvance.setdefault(newphase, []).append(newnode)
1075 1077
1076 1078 with repo.transaction(b'cleanup') as tr:
1077 1079 # Move bookmarks
1078 1080 bmarks = repo._bookmarks
1079 1081 bmarkchanges = []
1080 1082 for oldnode, newnode in moves.items():
1081 1083 oldbmarks = repo.nodebookmarks(oldnode)
1082 1084 if not oldbmarks:
1083 1085 continue
1084 1086 from . import bookmarks # avoid import cycle
1085 1087
1086 1088 repo.ui.debug(
1087 1089 b'moving bookmarks %r from %s to %s\n'
1088 1090 % (
1089 1091 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1090 1092 hex(oldnode),
1091 1093 hex(newnode),
1092 1094 )
1093 1095 )
1094 1096 # Delete divergent bookmarks being parents of related newnodes
1095 1097 deleterevs = repo.revs(
1096 1098 b'parents(roots(%ln & (::%n))) - parents(%n)',
1097 1099 allnewnodes,
1098 1100 newnode,
1099 1101 oldnode,
1100 1102 )
1101 1103 deletenodes = _containsnode(repo, deleterevs)
1102 1104 for name in oldbmarks:
1103 1105 bmarkchanges.append((name, newnode))
1104 1106 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1105 1107 bmarkchanges.append((b, None))
1106 1108
1107 1109 if bmarkchanges:
1108 1110 bmarks.applychanges(repo, tr, bmarkchanges)
1109 1111
1110 1112 for phase, nodes in toretract.items():
1111 1113 phases.retractboundary(repo, tr, phase, nodes)
1112 1114 for phase, nodes in toadvance.items():
1113 1115 phases.advanceboundary(repo, tr, phase, nodes)
1114 1116
1115 1117 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1116 1118 # Obsolete or strip nodes
1117 1119 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1118 1120 # If a node is already obsoleted, and we want to obsolete it
1119 1121 # without a successor, skip that obssolete request since it's
1120 1122 # unnecessary. That's the "if s or not isobs(n)" check below.
1121 1123 # Also sort the node in topology order, that might be useful for
1122 1124 # some obsstore logic.
1123 1125 # NOTE: the sorting might belong to createmarkers.
1124 1126 torev = unfi.changelog.rev
1125 1127 sortfunc = lambda ns: torev(ns[0][0])
1126 1128 rels = []
1127 1129 for ns, s in sorted(replacements.items(), key=sortfunc):
1128 1130 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1129 1131 rels.append(rel)
1130 1132 if rels:
1131 1133 obsolete.createmarkers(
1132 1134 repo, rels, operation=operation, metadata=metadata
1133 1135 )
1134 1136 elif phases.supportinternal(repo) and mayusearchived:
1135 1137 # this assume we do not have "unstable" nodes above the cleaned ones
1136 1138 allreplaced = set()
1137 1139 for ns in replacements.keys():
1138 1140 allreplaced.update(ns)
1139 1141 if backup:
1140 1142 from . import repair # avoid import cycle
1141 1143
1142 1144 node = min(allreplaced, key=repo.changelog.rev)
1143 1145 repair.backupbundle(
1144 1146 repo, allreplaced, allreplaced, node, operation
1145 1147 )
1146 1148 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1147 1149 else:
1148 1150 from . import repair # avoid import cycle
1149 1151
1150 1152 tostrip = list(n for ns in replacements for n in ns)
1151 1153 if tostrip:
1152 1154 repair.delayedstrip(
1153 1155 repo.ui, repo, tostrip, operation, backup=backup
1154 1156 )
1155 1157
1156 1158
1157 1159 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1158 1160 if opts is None:
1159 1161 opts = {}
1160 1162 m = matcher
1161 1163 dry_run = opts.get(b'dry_run')
1162 1164 try:
1163 1165 similarity = float(opts.get(b'similarity') or 0)
1164 1166 except ValueError:
1165 1167 raise error.Abort(_(b'similarity must be a number'))
1166 1168 if similarity < 0 or similarity > 100:
1167 1169 raise error.Abort(_(b'similarity must be between 0 and 100'))
1168 1170 similarity /= 100.0
1169 1171
1170 1172 ret = 0
1171 1173
1172 1174 wctx = repo[None]
1173 1175 for subpath in sorted(wctx.substate):
1174 1176 submatch = matchmod.subdirmatcher(subpath, m)
1175 1177 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1176 1178 sub = wctx.sub(subpath)
1177 1179 subprefix = repo.wvfs.reljoin(prefix, subpath)
1178 1180 subuipathfn = subdiruipathfn(subpath, uipathfn)
1179 1181 try:
1180 1182 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1181 1183 ret = 1
1182 1184 except error.LookupError:
1183 1185 repo.ui.status(
1184 1186 _(b"skipping missing subrepository: %s\n")
1185 1187 % uipathfn(subpath)
1186 1188 )
1187 1189
1188 1190 rejected = []
1189 1191
1190 1192 def badfn(f, msg):
1191 1193 if f in m.files():
1192 1194 m.bad(f, msg)
1193 1195 rejected.append(f)
1194 1196
1195 1197 badmatch = matchmod.badmatch(m, badfn)
1196 1198 added, unknown, deleted, removed, forgotten = _interestingfiles(
1197 1199 repo, badmatch
1198 1200 )
1199 1201
1200 1202 unknownset = set(unknown + forgotten)
1201 1203 toprint = unknownset.copy()
1202 1204 toprint.update(deleted)
1203 1205 for abs in sorted(toprint):
1204 1206 if repo.ui.verbose or not m.exact(abs):
1205 1207 if abs in unknownset:
1206 1208 status = _(b'adding %s\n') % uipathfn(abs)
1207 1209 label = b'ui.addremove.added'
1208 1210 else:
1209 1211 status = _(b'removing %s\n') % uipathfn(abs)
1210 1212 label = b'ui.addremove.removed'
1211 1213 repo.ui.status(status, label=label)
1212 1214
1213 1215 renames = _findrenames(
1214 1216 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1215 1217 )
1216 1218
1217 1219 if not dry_run:
1218 1220 _markchanges(repo, unknown + forgotten, deleted, renames)
1219 1221
1220 1222 for f in rejected:
1221 1223 if f in m.files():
1222 1224 return 1
1223 1225 return ret
1224 1226
1225 1227
1226 1228 def marktouched(repo, files, similarity=0.0):
1227 1229 '''Assert that files have somehow been operated upon. files are relative to
1228 1230 the repo root.'''
1229 1231 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1230 1232 rejected = []
1231 1233
1232 1234 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1233 1235
1234 1236 if repo.ui.verbose:
1235 1237 unknownset = set(unknown + forgotten)
1236 1238 toprint = unknownset.copy()
1237 1239 toprint.update(deleted)
1238 1240 for abs in sorted(toprint):
1239 1241 if abs in unknownset:
1240 1242 status = _(b'adding %s\n') % abs
1241 1243 else:
1242 1244 status = _(b'removing %s\n') % abs
1243 1245 repo.ui.status(status)
1244 1246
1245 1247 # TODO: We should probably have the caller pass in uipathfn and apply it to
1246 1248 # the messages above too. legacyrelativevalue=True is consistent with how
1247 1249 # it used to work.
1248 1250 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1249 1251 renames = _findrenames(
1250 1252 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1251 1253 )
1252 1254
1253 1255 _markchanges(repo, unknown + forgotten, deleted, renames)
1254 1256
1255 1257 for f in rejected:
1256 1258 if f in m.files():
1257 1259 return 1
1258 1260 return 0
1259 1261
1260 1262
1261 1263 def _interestingfiles(repo, matcher):
1262 1264 '''Walk dirstate with matcher, looking for files that addremove would care
1263 1265 about.
1264 1266
1265 1267 This is different from dirstate.status because it doesn't care about
1266 1268 whether files are modified or clean.'''
1267 1269 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1268 1270 audit_path = pathutil.pathauditor(repo.root, cached=True)
1269 1271
1270 1272 ctx = repo[None]
1271 1273 dirstate = repo.dirstate
1272 1274 matcher = repo.narrowmatch(matcher, includeexact=True)
1273 1275 walkresults = dirstate.walk(
1274 1276 matcher,
1275 1277 subrepos=sorted(ctx.substate),
1276 1278 unknown=True,
1277 1279 ignored=False,
1278 1280 full=False,
1279 1281 )
1280 1282 for abs, st in pycompat.iteritems(walkresults):
1281 1283 dstate = dirstate[abs]
1282 1284 if dstate == b'?' and audit_path.check(abs):
1283 1285 unknown.append(abs)
1284 1286 elif dstate != b'r' and not st:
1285 1287 deleted.append(abs)
1286 1288 elif dstate == b'r' and st:
1287 1289 forgotten.append(abs)
1288 1290 # for finding renames
1289 1291 elif dstate == b'r' and not st:
1290 1292 removed.append(abs)
1291 1293 elif dstate == b'a':
1292 1294 added.append(abs)
1293 1295
1294 1296 return added, unknown, deleted, removed, forgotten
1295 1297
1296 1298
1297 1299 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1298 1300 '''Find renames from removed files to added ones.'''
1299 1301 renames = {}
1300 1302 if similarity > 0:
1301 1303 for old, new, score in similar.findrenames(
1302 1304 repo, added, removed, similarity
1303 1305 ):
1304 1306 if (
1305 1307 repo.ui.verbose
1306 1308 or not matcher.exact(old)
1307 1309 or not matcher.exact(new)
1308 1310 ):
1309 1311 repo.ui.status(
1310 1312 _(
1311 1313 b'recording removal of %s as rename to %s '
1312 1314 b'(%d%% similar)\n'
1313 1315 )
1314 1316 % (uipathfn(old), uipathfn(new), score * 100)
1315 1317 )
1316 1318 renames[new] = old
1317 1319 return renames
1318 1320
1319 1321
1320 1322 def _markchanges(repo, unknown, deleted, renames):
1321 1323 '''Marks the files in unknown as added, the files in deleted as removed,
1322 1324 and the files in renames as copied.'''
1323 1325 wctx = repo[None]
1324 1326 with repo.wlock():
1325 1327 wctx.forget(deleted)
1326 1328 wctx.add(unknown)
1327 1329 for new, old in pycompat.iteritems(renames):
1328 1330 wctx.copy(old, new)
1329 1331
1330 1332
1331 1333 def getrenamedfn(repo, endrev=None):
1332 1334 if copiesmod.usechangesetcentricalgo(repo):
1333 1335
1334 1336 def getrenamed(fn, rev):
1335 1337 ctx = repo[rev]
1336 1338 p1copies = ctx.p1copies()
1337 1339 if fn in p1copies:
1338 1340 return p1copies[fn]
1339 1341 p2copies = ctx.p2copies()
1340 1342 if fn in p2copies:
1341 1343 return p2copies[fn]
1342 1344 return None
1343 1345
1344 1346 return getrenamed
1345 1347
1346 1348 rcache = {}
1347 1349 if endrev is None:
1348 1350 endrev = len(repo)
1349 1351
1350 1352 def getrenamed(fn, rev):
1351 1353 '''looks up all renames for a file (up to endrev) the first
1352 1354 time the file is given. It indexes on the changerev and only
1353 1355 parses the manifest if linkrev != changerev.
1354 1356 Returns rename info for fn at changerev rev.'''
1355 1357 if fn not in rcache:
1356 1358 rcache[fn] = {}
1357 1359 fl = repo.file(fn)
1358 1360 for i in fl:
1359 1361 lr = fl.linkrev(i)
1360 1362 renamed = fl.renamed(fl.node(i))
1361 1363 rcache[fn][lr] = renamed and renamed[0]
1362 1364 if lr >= endrev:
1363 1365 break
1364 1366 if rev in rcache[fn]:
1365 1367 return rcache[fn][rev]
1366 1368
1367 1369 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1368 1370 # filectx logic.
1369 1371 try:
1370 1372 return repo[rev][fn].copysource()
1371 1373 except error.LookupError:
1372 1374 return None
1373 1375
1374 1376 return getrenamed
1375 1377
1376 1378
1377 1379 def getcopiesfn(repo, endrev=None):
1378 1380 if copiesmod.usechangesetcentricalgo(repo):
1379 1381
1380 1382 def copiesfn(ctx):
1381 1383 if ctx.p2copies():
1382 1384 allcopies = ctx.p1copies().copy()
1383 1385 # There should be no overlap
1384 1386 allcopies.update(ctx.p2copies())
1385 1387 return sorted(allcopies.items())
1386 1388 else:
1387 1389 return sorted(ctx.p1copies().items())
1388 1390
1389 1391 else:
1390 1392 getrenamed = getrenamedfn(repo, endrev)
1391 1393
1392 1394 def copiesfn(ctx):
1393 1395 copies = []
1394 1396 for fn in ctx.files():
1395 1397 rename = getrenamed(fn, ctx.rev())
1396 1398 if rename:
1397 1399 copies.append((fn, rename))
1398 1400 return copies
1399 1401
1400 1402 return copiesfn
1401 1403
1402 1404
1403 1405 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1404 1406 """Update the dirstate to reflect the intent of copying src to dst. For
1405 1407 different reasons it might not end with dst being marked as copied from src.
1406 1408 """
1407 1409 origsrc = repo.dirstate.copied(src) or src
1408 1410 if dst == origsrc: # copying back a copy?
1409 1411 if repo.dirstate[dst] not in b'mn' and not dryrun:
1410 1412 repo.dirstate.normallookup(dst)
1411 1413 else:
1412 1414 if repo.dirstate[origsrc] == b'a' and origsrc == src:
1413 1415 if not ui.quiet:
1414 1416 ui.warn(
1415 1417 _(
1416 1418 b"%s has not been committed yet, so no copy "
1417 1419 b"data will be stored for %s.\n"
1418 1420 )
1419 1421 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1420 1422 )
1421 1423 if repo.dirstate[dst] in b'?r' and not dryrun:
1422 1424 wctx.add([dst])
1423 1425 elif not dryrun:
1424 1426 wctx.copy(origsrc, dst)
1425 1427
1426 1428
1427 1429 def movedirstate(repo, newctx, match=None):
1428 1430 """Move the dirstate to newctx and adjust it as necessary.
1429 1431
1430 1432 A matcher can be provided as an optimization. It is probably a bug to pass
1431 1433 a matcher that doesn't match all the differences between the parent of the
1432 1434 working copy and newctx.
1433 1435 """
1434 1436 oldctx = repo[b'.']
1435 1437 ds = repo.dirstate
1436 1438 copies = dict(ds.copies())
1437 1439 ds.setparents(newctx.node(), nullid)
1438 1440 s = newctx.status(oldctx, match=match)
1439 1441 for f in s.modified:
1440 1442 if ds[f] == b'r':
1441 1443 # modified + removed -> removed
1442 1444 continue
1443 1445 ds.normallookup(f)
1444 1446
1445 1447 for f in s.added:
1446 1448 if ds[f] == b'r':
1447 1449 # added + removed -> unknown
1448 1450 ds.drop(f)
1449 1451 elif ds[f] != b'a':
1450 1452 ds.add(f)
1451 1453
1452 1454 for f in s.removed:
1453 1455 if ds[f] == b'a':
1454 1456 # removed + added -> normal
1455 1457 ds.normallookup(f)
1456 1458 elif ds[f] != b'r':
1457 1459 ds.remove(f)
1458 1460
1459 1461 # Merge old parent and old working dir copies
1460 1462 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1461 1463 oldcopies.update(copies)
1462 1464 copies = {
1463 1465 dst: oldcopies.get(src, src)
1464 1466 for dst, src in pycompat.iteritems(oldcopies)
1465 1467 }
1466 1468 # Adjust the dirstate copies
1467 1469 for dst, src in pycompat.iteritems(copies):
1468 1470 if src not in newctx or dst in newctx or ds[dst] != b'a':
1469 1471 src = None
1470 1472 ds.copy(src, dst)
1471 1473 repo._quick_access_changeid_invalidate()
1472 1474
1473 1475
1474 1476 def filterrequirements(requirements):
1475 1477 """ filters the requirements into two sets:
1476 1478
1477 1479 wcreq: requirements which should be written in .hg/requires
1478 1480 storereq: which should be written in .hg/store/requires
1479 1481
1480 1482 Returns (wcreq, storereq)
1481 1483 """
1482 1484 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1483 1485 wc, store = set(), set()
1484 1486 for r in requirements:
1485 1487 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1486 1488 wc.add(r)
1487 1489 else:
1488 1490 store.add(r)
1489 1491 return wc, store
1490 1492 return requirements, None
1491 1493
1492 1494
1493 1495 def writereporequirements(repo, requirements=None):
1494 1496 """ writes requirements for the repo to .hg/requires """
1495 1497 if requirements:
1496 1498 repo.requirements = requirements
1497 1499 wcreq, storereq = filterrequirements(repo.requirements)
1498 1500 if wcreq is not None:
1499 1501 writerequires(repo.vfs, wcreq)
1500 1502 if storereq is not None:
1501 1503 writerequires(repo.svfs, storereq)
1502 1504
1503 1505
1504 1506 def writerequires(opener, requirements):
1505 1507 with opener(b'requires', b'w', atomictemp=True) as fp:
1506 1508 for r in sorted(requirements):
1507 1509 fp.write(b"%s\n" % r)
1508 1510
1509 1511
1510 1512 class filecachesubentry(object):
1511 1513 def __init__(self, path, stat):
1512 1514 self.path = path
1513 1515 self.cachestat = None
1514 1516 self._cacheable = None
1515 1517
1516 1518 if stat:
1517 1519 self.cachestat = filecachesubentry.stat(self.path)
1518 1520
1519 1521 if self.cachestat:
1520 1522 self._cacheable = self.cachestat.cacheable()
1521 1523 else:
1522 1524 # None means we don't know yet
1523 1525 self._cacheable = None
1524 1526
1525 1527 def refresh(self):
1526 1528 if self.cacheable():
1527 1529 self.cachestat = filecachesubentry.stat(self.path)
1528 1530
1529 1531 def cacheable(self):
1530 1532 if self._cacheable is not None:
1531 1533 return self._cacheable
1532 1534
1533 1535 # we don't know yet, assume it is for now
1534 1536 return True
1535 1537
1536 1538 def changed(self):
1537 1539 # no point in going further if we can't cache it
1538 1540 if not self.cacheable():
1539 1541 return True
1540 1542
1541 1543 newstat = filecachesubentry.stat(self.path)
1542 1544
1543 1545 # we may not know if it's cacheable yet, check again now
1544 1546 if newstat and self._cacheable is None:
1545 1547 self._cacheable = newstat.cacheable()
1546 1548
1547 1549 # check again
1548 1550 if not self._cacheable:
1549 1551 return True
1550 1552
1551 1553 if self.cachestat != newstat:
1552 1554 self.cachestat = newstat
1553 1555 return True
1554 1556 else:
1555 1557 return False
1556 1558
1557 1559 @staticmethod
1558 1560 def stat(path):
1559 1561 try:
1560 1562 return util.cachestat(path)
1561 1563 except OSError as e:
1562 1564 if e.errno != errno.ENOENT:
1563 1565 raise
1564 1566
1565 1567
1566 1568 class filecacheentry(object):
1567 1569 def __init__(self, paths, stat=True):
1568 1570 self._entries = []
1569 1571 for path in paths:
1570 1572 self._entries.append(filecachesubentry(path, stat))
1571 1573
1572 1574 def changed(self):
1573 1575 '''true if any entry has changed'''
1574 1576 for entry in self._entries:
1575 1577 if entry.changed():
1576 1578 return True
1577 1579 return False
1578 1580
1579 1581 def refresh(self):
1580 1582 for entry in self._entries:
1581 1583 entry.refresh()
1582 1584
1583 1585
1584 1586 class filecache(object):
1585 1587 """A property like decorator that tracks files under .hg/ for updates.
1586 1588
1587 1589 On first access, the files defined as arguments are stat()ed and the
1588 1590 results cached. The decorated function is called. The results are stashed
1589 1591 away in a ``_filecache`` dict on the object whose method is decorated.
1590 1592
1591 1593 On subsequent access, the cached result is used as it is set to the
1592 1594 instance dictionary.
1593 1595
1594 1596 On external property set/delete operations, the caller must update the
1595 1597 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1596 1598 instead of directly setting <attr>.
1597 1599
1598 1600 When using the property API, the cached data is always used if available.
1599 1601 No stat() is performed to check if the file has changed.
1600 1602
1601 1603 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1602 1604 can populate an entry before the property's getter is called. In this case,
1603 1605 entries in ``_filecache`` will be used during property operations,
1604 1606 if available. If the underlying file changes, it is up to external callers
1605 1607 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1606 1608 method result as well as possibly calling ``del obj._filecache[attr]`` to
1607 1609 remove the ``filecacheentry``.
1608 1610 """
1609 1611
1610 1612 def __init__(self, *paths):
1611 1613 self.paths = paths
1612 1614
1613 1615 def join(self, obj, fname):
1614 1616 """Used to compute the runtime path of a cached file.
1615 1617
1616 1618 Users should subclass filecache and provide their own version of this
1617 1619 function to call the appropriate join function on 'obj' (an instance
1618 1620 of the class that its member function was decorated).
1619 1621 """
1620 1622 raise NotImplementedError
1621 1623
1622 1624 def __call__(self, func):
1623 1625 self.func = func
1624 1626 self.sname = func.__name__
1625 1627 self.name = pycompat.sysbytes(self.sname)
1626 1628 return self
1627 1629
1628 1630 def __get__(self, obj, type=None):
1629 1631 # if accessed on the class, return the descriptor itself.
1630 1632 if obj is None:
1631 1633 return self
1632 1634
1633 1635 assert self.sname not in obj.__dict__
1634 1636
1635 1637 entry = obj._filecache.get(self.name)
1636 1638
1637 1639 if entry:
1638 1640 if entry.changed():
1639 1641 entry.obj = self.func(obj)
1640 1642 else:
1641 1643 paths = [self.join(obj, path) for path in self.paths]
1642 1644
1643 1645 # We stat -before- creating the object so our cache doesn't lie if
1644 1646 # a writer modified between the time we read and stat
1645 1647 entry = filecacheentry(paths, True)
1646 1648 entry.obj = self.func(obj)
1647 1649
1648 1650 obj._filecache[self.name] = entry
1649 1651
1650 1652 obj.__dict__[self.sname] = entry.obj
1651 1653 return entry.obj
1652 1654
1653 1655 # don't implement __set__(), which would make __dict__ lookup as slow as
1654 1656 # function call.
1655 1657
1656 1658 def set(self, obj, value):
1657 1659 if self.name not in obj._filecache:
1658 1660 # we add an entry for the missing value because X in __dict__
1659 1661 # implies X in _filecache
1660 1662 paths = [self.join(obj, path) for path in self.paths]
1661 1663 ce = filecacheentry(paths, False)
1662 1664 obj._filecache[self.name] = ce
1663 1665 else:
1664 1666 ce = obj._filecache[self.name]
1665 1667
1666 1668 ce.obj = value # update cached copy
1667 1669 obj.__dict__[self.sname] = value # update copy returned by obj.x
1668 1670
1669 1671
1670 1672 def extdatasource(repo, source):
1671 1673 """Gather a map of rev -> value dict from the specified source
1672 1674
1673 1675 A source spec is treated as a URL, with a special case shell: type
1674 1676 for parsing the output from a shell command.
1675 1677
1676 1678 The data is parsed as a series of newline-separated records where
1677 1679 each record is a revision specifier optionally followed by a space
1678 1680 and a freeform string value. If the revision is known locally, it
1679 1681 is converted to a rev, otherwise the record is skipped.
1680 1682
1681 1683 Note that both key and value are treated as UTF-8 and converted to
1682 1684 the local encoding. This allows uniformity between local and
1683 1685 remote data sources.
1684 1686 """
1685 1687
1686 1688 spec = repo.ui.config(b"extdata", source)
1687 1689 if not spec:
1688 1690 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1689 1691
1690 1692 data = {}
1691 1693 src = proc = None
1692 1694 try:
1693 1695 if spec.startswith(b"shell:"):
1694 1696 # external commands should be run relative to the repo root
1695 1697 cmd = spec[6:]
1696 1698 proc = subprocess.Popen(
1697 1699 procutil.tonativestr(cmd),
1698 1700 shell=True,
1699 1701 bufsize=-1,
1700 1702 close_fds=procutil.closefds,
1701 1703 stdout=subprocess.PIPE,
1702 1704 cwd=procutil.tonativestr(repo.root),
1703 1705 )
1704 1706 src = proc.stdout
1705 1707 else:
1706 1708 # treat as a URL or file
1707 1709 src = url.open(repo.ui, spec)
1708 1710 for l in src:
1709 1711 if b" " in l:
1710 1712 k, v = l.strip().split(b" ", 1)
1711 1713 else:
1712 1714 k, v = l.strip(), b""
1713 1715
1714 1716 k = encoding.tolocal(k)
1715 1717 try:
1716 1718 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1717 1719 except (error.LookupError, error.RepoLookupError):
1718 1720 pass # we ignore data for nodes that don't exist locally
1719 1721 finally:
1720 1722 if proc:
1721 1723 try:
1722 1724 proc.communicate()
1723 1725 except ValueError:
1724 1726 # This happens if we started iterating src and then
1725 1727 # get a parse error on a line. It should be safe to ignore.
1726 1728 pass
1727 1729 if src:
1728 1730 src.close()
1729 1731 if proc and proc.returncode != 0:
1730 1732 raise error.Abort(
1731 1733 _(b"extdata command '%s' failed: %s")
1732 1734 % (cmd, procutil.explainexit(proc.returncode))
1733 1735 )
1734 1736
1735 1737 return data
1736 1738
1737 1739
1738 1740 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1739 1741 if lock is None:
1740 1742 raise error.LockInheritanceContractViolation(
1741 1743 b'lock can only be inherited while held'
1742 1744 )
1743 1745 if environ is None:
1744 1746 environ = {}
1745 1747 with lock.inherit() as locker:
1746 1748 environ[envvar] = locker
1747 1749 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1748 1750
1749 1751
1750 1752 def wlocksub(repo, cmd, *args, **kwargs):
1751 1753 """run cmd as a subprocess that allows inheriting repo's wlock
1752 1754
1753 1755 This can only be called while the wlock is held. This takes all the
1754 1756 arguments that ui.system does, and returns the exit code of the
1755 1757 subprocess."""
1756 1758 return _locksub(
1757 1759 repo, repo.currentwlock(), b'HG_WLOCK_LOCKER', cmd, *args, **kwargs
1758 1760 )
1759 1761
1760 1762
1761 1763 class progress(object):
1762 1764 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1763 1765 self.ui = ui
1764 1766 self.pos = 0
1765 1767 self.topic = topic
1766 1768 self.unit = unit
1767 1769 self.total = total
1768 1770 self.debug = ui.configbool(b'progress', b'debug')
1769 1771 self._updatebar = updatebar
1770 1772
1771 1773 def __enter__(self):
1772 1774 return self
1773 1775
1774 1776 def __exit__(self, exc_type, exc_value, exc_tb):
1775 1777 self.complete()
1776 1778
1777 1779 def update(self, pos, item=b"", total=None):
1778 1780 assert pos is not None
1779 1781 if total:
1780 1782 self.total = total
1781 1783 self.pos = pos
1782 1784 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1783 1785 if self.debug:
1784 1786 self._printdebug(item)
1785 1787
1786 1788 def increment(self, step=1, item=b"", total=None):
1787 1789 self.update(self.pos + step, item, total)
1788 1790
1789 1791 def complete(self):
1790 1792 self.pos = None
1791 1793 self.unit = b""
1792 1794 self.total = None
1793 1795 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1794 1796
1795 1797 def _printdebug(self, item):
1796 1798 unit = b''
1797 1799 if self.unit:
1798 1800 unit = b' ' + self.unit
1799 1801 if item:
1800 1802 item = b' ' + item
1801 1803
1802 1804 if self.total:
1803 1805 pct = 100.0 * self.pos / self.total
1804 1806 self.ui.debug(
1805 1807 b'%s:%s %d/%d%s (%4.2f%%)\n'
1806 1808 % (self.topic, item, self.pos, self.total, unit, pct)
1807 1809 )
1808 1810 else:
1809 1811 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1810 1812
1811 1813
1812 1814 def gdinitconfig(ui):
1813 1815 """helper function to know if a repo should be created as general delta
1814 1816 """
1815 1817 # experimental config: format.generaldelta
1816 1818 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1817 1819 b'format', b'usegeneraldelta'
1818 1820 )
1819 1821
1820 1822
1821 1823 def gddeltaconfig(ui):
1822 1824 """helper function to know if incoming delta should be optimised
1823 1825 """
1824 1826 # experimental config: format.generaldelta
1825 1827 return ui.configbool(b'format', b'generaldelta')
1826 1828
1827 1829
1828 1830 class simplekeyvaluefile(object):
1829 1831 """A simple file with key=value lines
1830 1832
1831 1833 Keys must be alphanumerics and start with a letter, values must not
1832 1834 contain '\n' characters"""
1833 1835
1834 1836 firstlinekey = b'__firstline'
1835 1837
1836 1838 def __init__(self, vfs, path, keys=None):
1837 1839 self.vfs = vfs
1838 1840 self.path = path
1839 1841
1840 1842 def read(self, firstlinenonkeyval=False):
1841 1843 """Read the contents of a simple key-value file
1842 1844
1843 1845 'firstlinenonkeyval' indicates whether the first line of file should
1844 1846 be treated as a key-value pair or reuturned fully under the
1845 1847 __firstline key."""
1846 1848 lines = self.vfs.readlines(self.path)
1847 1849 d = {}
1848 1850 if firstlinenonkeyval:
1849 1851 if not lines:
1850 1852 e = _(b"empty simplekeyvalue file")
1851 1853 raise error.CorruptedState(e)
1852 1854 # we don't want to include '\n' in the __firstline
1853 1855 d[self.firstlinekey] = lines[0][:-1]
1854 1856 del lines[0]
1855 1857
1856 1858 try:
1857 1859 # the 'if line.strip()' part prevents us from failing on empty
1858 1860 # lines which only contain '\n' therefore are not skipped
1859 1861 # by 'if line'
1860 1862 updatedict = dict(
1861 1863 line[:-1].split(b'=', 1) for line in lines if line.strip()
1862 1864 )
1863 1865 if self.firstlinekey in updatedict:
1864 1866 e = _(b"%r can't be used as a key")
1865 1867 raise error.CorruptedState(e % self.firstlinekey)
1866 1868 d.update(updatedict)
1867 1869 except ValueError as e:
1868 1870 raise error.CorruptedState(stringutil.forcebytestr(e))
1869 1871 return d
1870 1872
1871 1873 def write(self, data, firstline=None):
1872 1874 """Write key=>value mapping to a file
1873 1875 data is a dict. Keys must be alphanumerical and start with a letter.
1874 1876 Values must not contain newline characters.
1875 1877
1876 1878 If 'firstline' is not None, it is written to file before
1877 1879 everything else, as it is, not in a key=value form"""
1878 1880 lines = []
1879 1881 if firstline is not None:
1880 1882 lines.append(b'%s\n' % firstline)
1881 1883
1882 1884 for k, v in data.items():
1883 1885 if k == self.firstlinekey:
1884 1886 e = b"key name '%s' is reserved" % self.firstlinekey
1885 1887 raise error.ProgrammingError(e)
1886 1888 if not k[0:1].isalpha():
1887 1889 e = b"keys must start with a letter in a key-value file"
1888 1890 raise error.ProgrammingError(e)
1889 1891 if not k.isalnum():
1890 1892 e = b"invalid key name in a simple key-value file"
1891 1893 raise error.ProgrammingError(e)
1892 1894 if b'\n' in v:
1893 1895 e = b"invalid value in a simple key-value file"
1894 1896 raise error.ProgrammingError(e)
1895 1897 lines.append(b"%s=%s\n" % (k, v))
1896 1898 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1897 1899 fp.write(b''.join(lines))
1898 1900
1899 1901
1900 1902 _reportobsoletedsource = [
1901 1903 b'debugobsolete',
1902 1904 b'pull',
1903 1905 b'push',
1904 1906 b'serve',
1905 1907 b'unbundle',
1906 1908 ]
1907 1909
1908 1910 _reportnewcssource = [
1909 1911 b'pull',
1910 1912 b'unbundle',
1911 1913 ]
1912 1914
1913 1915
1914 1916 def prefetchfiles(repo, revmatches):
1915 1917 """Invokes the registered file prefetch functions, allowing extensions to
1916 1918 ensure the corresponding files are available locally, before the command
1917 1919 uses them.
1918 1920
1919 1921 Args:
1920 1922 revmatches: a list of (revision, match) tuples to indicate the files to
1921 1923 fetch at each revision. If any of the match elements is None, it matches
1922 1924 all files.
1923 1925 """
1924 1926
1925 1927 def _matcher(m):
1926 1928 if m:
1927 1929 assert isinstance(m, matchmod.basematcher)
1928 1930 # The command itself will complain about files that don't exist, so
1929 1931 # don't duplicate the message.
1930 1932 return matchmod.badmatch(m, lambda fn, msg: None)
1931 1933 else:
1932 1934 return matchall(repo)
1933 1935
1934 1936 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1935 1937
1936 1938 fileprefetchhooks(repo, revbadmatches)
1937 1939
1938 1940
1939 1941 # a list of (repo, revs, match) prefetch functions
1940 1942 fileprefetchhooks = util.hooks()
1941 1943
1942 1944 # A marker that tells the evolve extension to suppress its own reporting
1943 1945 _reportstroubledchangesets = True
1944 1946
1945 1947
1946 1948 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1947 1949 """register a callback to issue a summary after the transaction is closed
1948 1950
1949 1951 If as_validator is true, then the callbacks are registered as transaction
1950 1952 validators instead
1951 1953 """
1952 1954
1953 1955 def txmatch(sources):
1954 1956 return any(txnname.startswith(source) for source in sources)
1955 1957
1956 1958 categories = []
1957 1959
1958 1960 def reportsummary(func):
1959 1961 """decorator for report callbacks."""
1960 1962 # The repoview life cycle is shorter than the one of the actual
1961 1963 # underlying repository. So the filtered object can die before the
1962 1964 # weakref is used leading to troubles. We keep a reference to the
1963 1965 # unfiltered object and restore the filtering when retrieving the
1964 1966 # repository through the weakref.
1965 1967 filtername = repo.filtername
1966 1968 reporef = weakref.ref(repo.unfiltered())
1967 1969
1968 1970 def wrapped(tr):
1969 1971 repo = reporef()
1970 1972 if filtername:
1971 1973 assert repo is not None # help pytype
1972 1974 repo = repo.filtered(filtername)
1973 1975 func(repo, tr)
1974 1976
1975 1977 newcat = b'%02i-txnreport' % len(categories)
1976 1978 if as_validator:
1977 1979 otr.addvalidator(newcat, wrapped)
1978 1980 else:
1979 1981 otr.addpostclose(newcat, wrapped)
1980 1982 categories.append(newcat)
1981 1983 return wrapped
1982 1984
1983 1985 @reportsummary
1984 1986 def reportchangegroup(repo, tr):
1985 1987 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
1986 1988 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
1987 1989 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
1988 1990 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
1989 1991 if cgchangesets or cgrevisions or cgfiles:
1990 1992 htext = b""
1991 1993 if cgheads:
1992 1994 htext = _(b" (%+d heads)") % cgheads
1993 1995 msg = _(b"added %d changesets with %d changes to %d files%s\n")
1994 1996 if as_validator:
1995 1997 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
1996 1998 assert repo is not None # help pytype
1997 1999 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1998 2000
1999 2001 if txmatch(_reportobsoletedsource):
2000 2002
2001 2003 @reportsummary
2002 2004 def reportobsoleted(repo, tr):
2003 2005 obsoleted = obsutil.getobsoleted(repo, tr)
2004 2006 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2005 2007 if newmarkers:
2006 2008 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2007 2009 if obsoleted:
2008 2010 msg = _(b'obsoleted %i changesets\n')
2009 2011 if as_validator:
2010 2012 msg = _(b'obsoleting %i changesets\n')
2011 2013 repo.ui.status(msg % len(obsoleted))
2012 2014
2013 2015 if obsolete.isenabled(
2014 2016 repo, obsolete.createmarkersopt
2015 2017 ) and repo.ui.configbool(
2016 2018 b'experimental', b'evolution.report-instabilities'
2017 2019 ):
2018 2020 instabilitytypes = [
2019 2021 (b'orphan', b'orphan'),
2020 2022 (b'phase-divergent', b'phasedivergent'),
2021 2023 (b'content-divergent', b'contentdivergent'),
2022 2024 ]
2023 2025
2024 2026 def getinstabilitycounts(repo):
2025 2027 filtered = repo.changelog.filteredrevs
2026 2028 counts = {}
2027 2029 for instability, revset in instabilitytypes:
2028 2030 counts[instability] = len(
2029 2031 set(obsolete.getrevs(repo, revset)) - filtered
2030 2032 )
2031 2033 return counts
2032 2034
2033 2035 oldinstabilitycounts = getinstabilitycounts(repo)
2034 2036
2035 2037 @reportsummary
2036 2038 def reportnewinstabilities(repo, tr):
2037 2039 newinstabilitycounts = getinstabilitycounts(repo)
2038 2040 for instability, revset in instabilitytypes:
2039 2041 delta = (
2040 2042 newinstabilitycounts[instability]
2041 2043 - oldinstabilitycounts[instability]
2042 2044 )
2043 2045 msg = getinstabilitymessage(delta, instability)
2044 2046 if msg:
2045 2047 repo.ui.warn(msg)
2046 2048
2047 2049 if txmatch(_reportnewcssource):
2048 2050
2049 2051 @reportsummary
2050 2052 def reportnewcs(repo, tr):
2051 2053 """Report the range of new revisions pulled/unbundled."""
2052 2054 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2053 2055 unfi = repo.unfiltered()
2054 2056 if origrepolen >= len(unfi):
2055 2057 return
2056 2058
2057 2059 # Compute the bounds of new visible revisions' range.
2058 2060 revs = smartset.spanset(repo, start=origrepolen)
2059 2061 if revs:
2060 2062 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2061 2063
2062 2064 if minrev == maxrev:
2063 2065 revrange = minrev
2064 2066 else:
2065 2067 revrange = b'%s:%s' % (minrev, maxrev)
2066 2068 draft = len(repo.revs(b'%ld and draft()', revs))
2067 2069 secret = len(repo.revs(b'%ld and secret()', revs))
2068 2070 if not (draft or secret):
2069 2071 msg = _(b'new changesets %s\n') % revrange
2070 2072 elif draft and secret:
2071 2073 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2072 2074 msg %= (revrange, draft, secret)
2073 2075 elif draft:
2074 2076 msg = _(b'new changesets %s (%d drafts)\n')
2075 2077 msg %= (revrange, draft)
2076 2078 elif secret:
2077 2079 msg = _(b'new changesets %s (%d secrets)\n')
2078 2080 msg %= (revrange, secret)
2079 2081 else:
2080 2082 errormsg = b'entered unreachable condition'
2081 2083 raise error.ProgrammingError(errormsg)
2082 2084 repo.ui.status(msg)
2083 2085
2084 2086 # search new changesets directly pulled as obsolete
2085 2087 duplicates = tr.changes.get(b'revduplicates', ())
2086 2088 obsadded = unfi.revs(
2087 2089 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2088 2090 )
2089 2091 cl = repo.changelog
2090 2092 extinctadded = [r for r in obsadded if r not in cl]
2091 2093 if extinctadded:
2092 2094 # They are not just obsolete, but obsolete and invisible
2093 2095 # we call them "extinct" internally but the terms have not been
2094 2096 # exposed to users.
2095 2097 msg = b'(%d other changesets obsolete on arrival)\n'
2096 2098 repo.ui.status(msg % len(extinctadded))
2097 2099
2098 2100 @reportsummary
2099 2101 def reportphasechanges(repo, tr):
2100 2102 """Report statistics of phase changes for changesets pre-existing
2101 2103 pull/unbundle.
2102 2104 """
2103 2105 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2104 2106 published = []
2105 2107 for revs, (old, new) in tr.changes.get(b'phases', []):
2106 2108 if new != phases.public:
2107 2109 continue
2108 2110 published.extend(rev for rev in revs if rev < origrepolen)
2109 2111 if not published:
2110 2112 return
2111 2113 msg = _(b'%d local changesets published\n')
2112 2114 if as_validator:
2113 2115 msg = _(b'%d local changesets will be published\n')
2114 2116 repo.ui.status(msg % len(published))
2115 2117
2116 2118
2117 2119 def getinstabilitymessage(delta, instability):
2118 2120 """function to return the message to show warning about new instabilities
2119 2121
2120 2122 exists as a separate function so that extension can wrap to show more
2121 2123 information like how to fix instabilities"""
2122 2124 if delta > 0:
2123 2125 return _(b'%i new %s changesets\n') % (delta, instability)
2124 2126
2125 2127
2126 2128 def nodesummaries(repo, nodes, maxnumnodes=4):
2127 2129 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2128 2130 return b' '.join(short(h) for h in nodes)
2129 2131 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2130 2132 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2131 2133
2132 2134
2133 2135 def enforcesinglehead(repo, tr, desc, accountclosed=False):
2134 2136 """check that no named branch has multiple heads"""
2135 2137 if desc in (b'strip', b'repair'):
2136 2138 # skip the logic during strip
2137 2139 return
2138 2140 visible = repo.filtered(b'visible')
2139 2141 # possible improvement: we could restrict the check to affected branch
2140 2142 bm = visible.branchmap()
2141 2143 for name in bm:
2142 2144 heads = bm.branchheads(name, closed=accountclosed)
2143 2145 if len(heads) > 1:
2144 2146 msg = _(b'rejecting multiple heads on branch "%s"')
2145 2147 msg %= name
2146 2148 hint = _(b'%d heads: %s')
2147 2149 hint %= (len(heads), nodesummaries(repo, heads))
2148 2150 raise error.Abort(msg, hint=hint)
2149 2151
2150 2152
2151 2153 def wrapconvertsink(sink):
2152 2154 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2153 2155 before it is used, whether or not the convert extension was formally loaded.
2154 2156 """
2155 2157 return sink
2156 2158
2157 2159
2158 2160 def unhidehashlikerevs(repo, specs, hiddentype):
2159 2161 """parse the user specs and unhide changesets whose hash or revision number
2160 2162 is passed.
2161 2163
2162 2164 hiddentype can be: 1) 'warn': warn while unhiding changesets
2163 2165 2) 'nowarn': don't warn while unhiding changesets
2164 2166
2165 2167 returns a repo object with the required changesets unhidden
2166 2168 """
2167 2169 if not repo.filtername or not repo.ui.configbool(
2168 2170 b'experimental', b'directaccess'
2169 2171 ):
2170 2172 return repo
2171 2173
2172 2174 if repo.filtername not in (b'visible', b'visible-hidden'):
2173 2175 return repo
2174 2176
2175 2177 symbols = set()
2176 2178 for spec in specs:
2177 2179 try:
2178 2180 tree = revsetlang.parse(spec)
2179 2181 except error.ParseError: # will be reported by scmutil.revrange()
2180 2182 continue
2181 2183
2182 2184 symbols.update(revsetlang.gethashlikesymbols(tree))
2183 2185
2184 2186 if not symbols:
2185 2187 return repo
2186 2188
2187 2189 revs = _getrevsfromsymbols(repo, symbols)
2188 2190
2189 2191 if not revs:
2190 2192 return repo
2191 2193
2192 2194 if hiddentype == b'warn':
2193 2195 unfi = repo.unfiltered()
2194 2196 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2195 2197 repo.ui.warn(
2196 2198 _(
2197 2199 b"warning: accessing hidden changesets for write "
2198 2200 b"operation: %s\n"
2199 2201 )
2200 2202 % revstr
2201 2203 )
2202 2204
2203 2205 # we have to use new filtername to separate branch/tags cache until we can
2204 2206 # disbale these cache when revisions are dynamically pinned.
2205 2207 return repo.filtered(b'visible-hidden', revs)
2206 2208
2207 2209
2208 2210 def _getrevsfromsymbols(repo, symbols):
2209 2211 """parse the list of symbols and returns a set of revision numbers of hidden
2210 2212 changesets present in symbols"""
2211 2213 revs = set()
2212 2214 unfi = repo.unfiltered()
2213 2215 unficl = unfi.changelog
2214 2216 cl = repo.changelog
2215 2217 tiprev = len(unficl)
2216 2218 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2217 2219 for s in symbols:
2218 2220 try:
2219 2221 n = int(s)
2220 2222 if n <= tiprev:
2221 2223 if not allowrevnums:
2222 2224 continue
2223 2225 else:
2224 2226 if n not in cl:
2225 2227 revs.add(n)
2226 2228 continue
2227 2229 except ValueError:
2228 2230 pass
2229 2231
2230 2232 try:
2231 2233 s = resolvehexnodeidprefix(unfi, s)
2232 2234 except (error.LookupError, error.WdirUnsupported):
2233 2235 s = None
2234 2236
2235 2237 if s is not None:
2236 2238 rev = unficl.rev(s)
2237 2239 if rev not in cl:
2238 2240 revs.add(rev)
2239 2241
2240 2242 return revs
2241 2243
2242 2244
2243 2245 def bookmarkrevs(repo, mark):
2244 2246 """
2245 2247 Select revisions reachable by a given bookmark
2246 2248 """
2247 2249 return repo.revs(
2248 2250 b"ancestors(bookmark(%s)) - "
2249 2251 b"ancestors(head() and not bookmark(%s)) - "
2250 2252 b"ancestors(bookmark() and not bookmark(%s))",
2251 2253 mark,
2252 2254 mark,
2253 2255 mark,
2254 2256 )
General Comments 0
You need to be logged in to leave comments. Login now