##// END OF EJS Templates
revset: extract a parsefollowlinespattern helper function...
Denis Laxalde -
r34855:39b094e4 default
parent child Browse files
Show More
@@ -1,2232 +1,2225
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 dagop,
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 obsutil,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 revsetlang,
28 28 scmutil,
29 29 smartset,
30 30 util,
31 31 )
32 32
33 33 # helpers for processing parsed tree
34 34 getsymbol = revsetlang.getsymbol
35 35 getstring = revsetlang.getstring
36 36 getinteger = revsetlang.getinteger
37 37 getboolean = revsetlang.getboolean
38 38 getlist = revsetlang.getlist
39 39 getrange = revsetlang.getrange
40 40 getargs = revsetlang.getargs
41 41 getargsdict = revsetlang.getargsdict
42 42
43 43 baseset = smartset.baseset
44 44 generatorset = smartset.generatorset
45 45 spanset = smartset.spanset
46 46 fullreposet = smartset.fullreposet
47 47
48 48 # Constants for ordering requirement, used in getset():
49 49 #
50 50 # If 'define', any nested functions and operations MAY change the ordering of
51 51 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
52 52 # it). If 'follow', any nested functions and operations MUST take the ordering
53 53 # specified by the first operand to the '&' operator.
54 54 #
55 55 # For instance,
56 56 #
57 57 # X & (Y | Z)
58 58 # ^ ^^^^^^^
59 59 # | follow
60 60 # define
61 61 #
62 62 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
63 63 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
64 64 #
65 65 # 'any' means the order doesn't matter. For instance,
66 66 #
67 67 # (X & !Y) | ancestors(Z)
68 68 # ^ ^
69 69 # any any
70 70 #
71 71 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
72 72 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
73 73 # since 'ancestors' does not care about the order of its argument.
74 74 #
75 75 # Currently, most revsets do not care about the order, so 'define' is
76 76 # equivalent to 'follow' for them, and the resulting order is based on the
77 77 # 'subset' parameter passed down to them:
78 78 #
79 79 # m = revset.match(...)
80 80 # m(repo, subset, order=defineorder)
81 81 # ^^^^^^
82 82 # For most revsets, 'define' means using the order this subset provides
83 83 #
84 84 # There are a few revsets that always redefine the order if 'define' is
85 85 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
86 86 anyorder = 'any' # don't care the order, could be even random-shuffled
87 87 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
88 88 followorder = 'follow' # MUST follow the current order
89 89
90 90 # helpers
91 91
92 92 def getset(repo, subset, x, order=defineorder):
93 93 if not x:
94 94 raise error.ParseError(_("missing argument"))
95 95 return methods[x[0]](repo, subset, *x[1:], order=order)
96 96
97 97 def _getrevsource(repo, r):
98 98 extra = repo[r].extra()
99 99 for label in ('source', 'transplant_source', 'rebase_source'):
100 100 if label in extra:
101 101 try:
102 102 return repo[extra[label]].rev()
103 103 except error.RepoLookupError:
104 104 pass
105 105 return None
106 106
107 107 # operator methods
108 108
109 109 def stringset(repo, subset, x, order):
110 110 x = scmutil.intrev(repo[x])
111 111 if (x in subset
112 112 or x == node.nullrev and isinstance(subset, fullreposet)):
113 113 return baseset([x])
114 114 return baseset()
115 115
116 116 def rangeset(repo, subset, x, y, order):
117 117 m = getset(repo, fullreposet(repo), x)
118 118 n = getset(repo, fullreposet(repo), y)
119 119
120 120 if not m or not n:
121 121 return baseset()
122 122 return _makerangeset(repo, subset, m.first(), n.last(), order)
123 123
124 124 def rangeall(repo, subset, x, order):
125 125 assert x is None
126 126 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
127 127
128 128 def rangepre(repo, subset, y, order):
129 129 # ':y' can't be rewritten to '0:y' since '0' may be hidden
130 130 n = getset(repo, fullreposet(repo), y)
131 131 if not n:
132 132 return baseset()
133 133 return _makerangeset(repo, subset, 0, n.last(), order)
134 134
135 135 def rangepost(repo, subset, x, order):
136 136 m = getset(repo, fullreposet(repo), x)
137 137 if not m:
138 138 return baseset()
139 139 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
140 140
141 141 def _makerangeset(repo, subset, m, n, order):
142 142 if m == n:
143 143 r = baseset([m])
144 144 elif n == node.wdirrev:
145 145 r = spanset(repo, m, len(repo)) + baseset([n])
146 146 elif m == node.wdirrev:
147 147 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
148 148 elif m < n:
149 149 r = spanset(repo, m, n + 1)
150 150 else:
151 151 r = spanset(repo, m, n - 1)
152 152
153 153 if order == defineorder:
154 154 return r & subset
155 155 else:
156 156 # carrying the sorting over when possible would be more efficient
157 157 return subset & r
158 158
159 159 def dagrange(repo, subset, x, y, order):
160 160 r = fullreposet(repo)
161 161 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
162 162 includepath=True)
163 163 return subset & xs
164 164
165 165 def andset(repo, subset, x, y, order):
166 166 if order == anyorder:
167 167 yorder = anyorder
168 168 else:
169 169 yorder = followorder
170 170 return getset(repo, getset(repo, subset, x, order), y, yorder)
171 171
172 172 def andsmallyset(repo, subset, x, y, order):
173 173 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
174 174 if order == anyorder:
175 175 yorder = anyorder
176 176 else:
177 177 yorder = followorder
178 178 return getset(repo, getset(repo, subset, y, yorder), x, order)
179 179
180 180 def differenceset(repo, subset, x, y, order):
181 181 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
182 182
183 183 def _orsetlist(repo, subset, xs, order):
184 184 assert xs
185 185 if len(xs) == 1:
186 186 return getset(repo, subset, xs[0], order)
187 187 p = len(xs) // 2
188 188 a = _orsetlist(repo, subset, xs[:p], order)
189 189 b = _orsetlist(repo, subset, xs[p:], order)
190 190 return a + b
191 191
192 192 def orset(repo, subset, x, order):
193 193 xs = getlist(x)
194 194 if order == followorder:
195 195 # slow path to take the subset order
196 196 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
197 197 else:
198 198 return _orsetlist(repo, subset, xs, order)
199 199
200 200 def notset(repo, subset, x, order):
201 201 return subset - getset(repo, subset, x, anyorder)
202 202
203 203 def relationset(repo, subset, x, y, order):
204 204 raise error.ParseError(_("can't use a relation in this context"))
205 205
206 206 def relsubscriptset(repo, subset, x, y, z, order):
207 207 # this is pretty basic implementation of 'x#y[z]' operator, still
208 208 # experimental so undocumented. see the wiki for further ideas.
209 209 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
210 210 rel = getsymbol(y)
211 211 n = getinteger(z, _("relation subscript must be an integer"))
212 212
213 213 # TODO: perhaps this should be a table of relation functions
214 214 if rel in ('g', 'generations'):
215 215 # TODO: support range, rewrite tests, and drop startdepth argument
216 216 # from ancestors() and descendants() predicates
217 217 if n <= 0:
218 218 n = -n
219 219 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
220 220 else:
221 221 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
222 222
223 223 raise error.UnknownIdentifier(rel, ['generations'])
224 224
225 225 def subscriptset(repo, subset, x, y, order):
226 226 raise error.ParseError(_("can't use a subscript in this context"))
227 227
228 228 def listset(repo, subset, *xs, **opts):
229 229 raise error.ParseError(_("can't use a list in this context"),
230 230 hint=_('see hg help "revsets.x or y"'))
231 231
232 232 def keyvaluepair(repo, subset, k, v, order):
233 233 raise error.ParseError(_("can't use a key-value pair in this context"))
234 234
235 235 def func(repo, subset, a, b, order):
236 236 f = getsymbol(a)
237 237 if f in symbols:
238 238 func = symbols[f]
239 239 if getattr(func, '_takeorder', False):
240 240 return func(repo, subset, b, order)
241 241 return func(repo, subset, b)
242 242
243 243 keep = lambda fn: getattr(fn, '__doc__', None) is not None
244 244
245 245 syms = [s for (s, fn) in symbols.items() if keep(fn)]
246 246 raise error.UnknownIdentifier(f, syms)
247 247
248 248 # functions
249 249
250 250 # symbols are callables like:
251 251 # fn(repo, subset, x)
252 252 # with:
253 253 # repo - current repository instance
254 254 # subset - of revisions to be examined
255 255 # x - argument in tree form
256 256 symbols = revsetlang.symbols
257 257
258 258 # symbols which can't be used for a DoS attack for any given input
259 259 # (e.g. those which accept regexes as plain strings shouldn't be included)
260 260 # functions that just return a lot of changesets (like all) don't count here
261 261 safesymbols = set()
262 262
263 263 predicate = registrar.revsetpredicate()
264 264
265 265 @predicate('_destupdate')
266 266 def _destupdate(repo, subset, x):
267 267 # experimental revset for update destination
268 268 args = getargsdict(x, 'limit', 'clean')
269 269 return subset & baseset([destutil.destupdate(repo, **args)[0]])
270 270
271 271 @predicate('_destmerge')
272 272 def _destmerge(repo, subset, x):
273 273 # experimental revset for merge destination
274 274 sourceset = None
275 275 if x is not None:
276 276 sourceset = getset(repo, fullreposet(repo), x)
277 277 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
278 278
279 279 @predicate('adds(pattern)', safe=True, weight=30)
280 280 def adds(repo, subset, x):
281 281 """Changesets that add a file matching pattern.
282 282
283 283 The pattern without explicit kind like ``glob:`` is expected to be
284 284 relative to the current directory and match against a file or a
285 285 directory.
286 286 """
287 287 # i18n: "adds" is a keyword
288 288 pat = getstring(x, _("adds requires a pattern"))
289 289 return checkstatus(repo, subset, pat, 1)
290 290
291 291 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
292 292 def ancestor(repo, subset, x):
293 293 """A greatest common ancestor of the changesets.
294 294
295 295 Accepts 0 or more changesets.
296 296 Will return empty list when passed no args.
297 297 Greatest common ancestor of a single changeset is that changeset.
298 298 """
299 299 # i18n: "ancestor" is a keyword
300 300 l = getlist(x)
301 301 rl = fullreposet(repo)
302 302 anc = None
303 303
304 304 # (getset(repo, rl, i) for i in l) generates a list of lists
305 305 for revs in (getset(repo, rl, i) for i in l):
306 306 for r in revs:
307 307 if anc is None:
308 308 anc = repo[r]
309 309 else:
310 310 anc = anc.ancestor(repo[r])
311 311
312 312 if anc is not None and anc.rev() in subset:
313 313 return baseset([anc.rev()])
314 314 return baseset()
315 315
316 316 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
317 317 stopdepth=None):
318 318 heads = getset(repo, fullreposet(repo), x)
319 319 if not heads:
320 320 return baseset()
321 321 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
322 322 return subset & s
323 323
324 324 @predicate('ancestors(set[, depth])', safe=True)
325 325 def ancestors(repo, subset, x):
326 326 """Changesets that are ancestors of changesets in set, including the
327 327 given changesets themselves.
328 328
329 329 If depth is specified, the result only includes changesets up to
330 330 the specified generation.
331 331 """
332 332 # startdepth is for internal use only until we can decide the UI
333 333 args = getargsdict(x, 'ancestors', 'set depth startdepth')
334 334 if 'set' not in args:
335 335 # i18n: "ancestors" is a keyword
336 336 raise error.ParseError(_('ancestors takes at least 1 argument'))
337 337 startdepth = stopdepth = None
338 338 if 'startdepth' in args:
339 339 n = getinteger(args['startdepth'],
340 340 "ancestors expects an integer startdepth")
341 341 if n < 0:
342 342 raise error.ParseError("negative startdepth")
343 343 startdepth = n
344 344 if 'depth' in args:
345 345 # i18n: "ancestors" is a keyword
346 346 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
347 347 if n < 0:
348 348 raise error.ParseError(_("negative depth"))
349 349 stopdepth = n + 1
350 350 return _ancestors(repo, subset, args['set'],
351 351 startdepth=startdepth, stopdepth=stopdepth)
352 352
353 353 @predicate('_firstancestors', safe=True)
354 354 def _firstancestors(repo, subset, x):
355 355 # ``_firstancestors(set)``
356 356 # Like ``ancestors(set)`` but follows only the first parents.
357 357 return _ancestors(repo, subset, x, followfirst=True)
358 358
359 359 def _childrenspec(repo, subset, x, n, order):
360 360 """Changesets that are the Nth child of a changeset
361 361 in set.
362 362 """
363 363 cs = set()
364 364 for r in getset(repo, fullreposet(repo), x):
365 365 for i in range(n):
366 366 c = repo[r].children()
367 367 if len(c) == 0:
368 368 break
369 369 if len(c) > 1:
370 370 raise error.RepoLookupError(
371 371 _("revision in set has more than one child"))
372 372 r = c[0].rev()
373 373 else:
374 374 cs.add(r)
375 375 return subset & cs
376 376
377 377 def ancestorspec(repo, subset, x, n, order):
378 378 """``set~n``
379 379 Changesets that are the Nth ancestor (first parents only) of a changeset
380 380 in set.
381 381 """
382 382 n = getinteger(n, _("~ expects a number"))
383 383 if n < 0:
384 384 # children lookup
385 385 return _childrenspec(repo, subset, x, -n, order)
386 386 ps = set()
387 387 cl = repo.changelog
388 388 for r in getset(repo, fullreposet(repo), x):
389 389 for i in range(n):
390 390 try:
391 391 r = cl.parentrevs(r)[0]
392 392 except error.WdirUnsupported:
393 393 r = repo[r].parents()[0].rev()
394 394 ps.add(r)
395 395 return subset & ps
396 396
397 397 @predicate('author(string)', safe=True, weight=10)
398 398 def author(repo, subset, x):
399 399 """Alias for ``user(string)``.
400 400 """
401 401 # i18n: "author" is a keyword
402 402 n = getstring(x, _("author requires a string"))
403 403 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
404 404 return subset.filter(lambda x: matcher(repo[x].user()),
405 405 condrepr=('<user %r>', n))
406 406
407 407 @predicate('bisect(string)', safe=True)
408 408 def bisect(repo, subset, x):
409 409 """Changesets marked in the specified bisect status:
410 410
411 411 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
412 412 - ``goods``, ``bads`` : csets topologically good/bad
413 413 - ``range`` : csets taking part in the bisection
414 414 - ``pruned`` : csets that are goods, bads or skipped
415 415 - ``untested`` : csets whose fate is yet unknown
416 416 - ``ignored`` : csets ignored due to DAG topology
417 417 - ``current`` : the cset currently being bisected
418 418 """
419 419 # i18n: "bisect" is a keyword
420 420 status = getstring(x, _("bisect requires a string")).lower()
421 421 state = set(hbisect.get(repo, status))
422 422 return subset & state
423 423
424 424 # Backward-compatibility
425 425 # - no help entry so that we do not advertise it any more
426 426 @predicate('bisected', safe=True)
427 427 def bisected(repo, subset, x):
428 428 return bisect(repo, subset, x)
429 429
430 430 @predicate('bookmark([name])', safe=True)
431 431 def bookmark(repo, subset, x):
432 432 """The named bookmark or all bookmarks.
433 433
434 434 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
435 435 """
436 436 # i18n: "bookmark" is a keyword
437 437 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
438 438 if args:
439 439 bm = getstring(args[0],
440 440 # i18n: "bookmark" is a keyword
441 441 _('the argument to bookmark must be a string'))
442 442 kind, pattern, matcher = util.stringmatcher(bm)
443 443 bms = set()
444 444 if kind == 'literal':
445 445 bmrev = repo._bookmarks.get(pattern, None)
446 446 if not bmrev:
447 447 raise error.RepoLookupError(_("bookmark '%s' does not exist")
448 448 % pattern)
449 449 bms.add(repo[bmrev].rev())
450 450 else:
451 451 matchrevs = set()
452 452 for name, bmrev in repo._bookmarks.iteritems():
453 453 if matcher(name):
454 454 matchrevs.add(bmrev)
455 455 if not matchrevs:
456 456 raise error.RepoLookupError(_("no bookmarks exist"
457 457 " that match '%s'") % pattern)
458 458 for bmrev in matchrevs:
459 459 bms.add(repo[bmrev].rev())
460 460 else:
461 461 bms = {repo[r].rev() for r in repo._bookmarks.values()}
462 462 bms -= {node.nullrev}
463 463 return subset & bms
464 464
465 465 @predicate('branch(string or set)', safe=True, weight=10)
466 466 def branch(repo, subset, x):
467 467 """
468 468 All changesets belonging to the given branch or the branches of the given
469 469 changesets.
470 470
471 471 Pattern matching is supported for `string`. See
472 472 :hg:`help revisions.patterns`.
473 473 """
474 474 getbi = repo.revbranchcache().branchinfo
475 475 def getbranch(r):
476 476 try:
477 477 return getbi(r)[0]
478 478 except error.WdirUnsupported:
479 479 return repo[r].branch()
480 480
481 481 try:
482 482 b = getstring(x, '')
483 483 except error.ParseError:
484 484 # not a string, but another revspec, e.g. tip()
485 485 pass
486 486 else:
487 487 kind, pattern, matcher = util.stringmatcher(b)
488 488 if kind == 'literal':
489 489 # note: falls through to the revspec case if no branch with
490 490 # this name exists and pattern kind is not specified explicitly
491 491 if pattern in repo.branchmap():
492 492 return subset.filter(lambda r: matcher(getbranch(r)),
493 493 condrepr=('<branch %r>', b))
494 494 if b.startswith('literal:'):
495 495 raise error.RepoLookupError(_("branch '%s' does not exist")
496 496 % pattern)
497 497 else:
498 498 return subset.filter(lambda r: matcher(getbranch(r)),
499 499 condrepr=('<branch %r>', b))
500 500
501 501 s = getset(repo, fullreposet(repo), x)
502 502 b = set()
503 503 for r in s:
504 504 b.add(getbranch(r))
505 505 c = s.__contains__
506 506 return subset.filter(lambda r: c(r) or getbranch(r) in b,
507 507 condrepr=lambda: '<branch %r>' % sorted(b))
508 508
509 509 @predicate('bumped()', safe=True)
510 510 def bumped(repo, subset, x):
511 511 msg = ("'bumped()' is deprecated, "
512 512 "use 'phasedivergent()'")
513 513 repo.ui.deprecwarn(msg, '4.4')
514 514
515 515 return phasedivergent(repo, subset, x)
516 516
517 517 @predicate('phasedivergent()', safe=True)
518 518 def phasedivergent(repo, subset, x):
519 519 """Mutable changesets marked as successors of public changesets.
520 520
521 521 Only non-public and non-obsolete changesets can be `phasedivergent`.
522 522 (EXPERIMENTAL)
523 523 """
524 524 # i18n: "phasedivergent" is a keyword
525 525 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
526 526 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
527 527 return subset & phasedivergent
528 528
529 529 @predicate('bundle()', safe=True)
530 530 def bundle(repo, subset, x):
531 531 """Changesets in the bundle.
532 532
533 533 Bundle must be specified by the -R option."""
534 534
535 535 try:
536 536 bundlerevs = repo.changelog.bundlerevs
537 537 except AttributeError:
538 538 raise error.Abort(_("no bundle provided - specify with -R"))
539 539 return subset & bundlerevs
540 540
541 541 def checkstatus(repo, subset, pat, field):
542 542 hasset = matchmod.patkind(pat) == 'set'
543 543
544 544 mcache = [None]
545 545 def matches(x):
546 546 c = repo[x]
547 547 if not mcache[0] or hasset:
548 548 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
549 549 m = mcache[0]
550 550 fname = None
551 551 if not m.anypats() and len(m.files()) == 1:
552 552 fname = m.files()[0]
553 553 if fname is not None:
554 554 if fname not in c.files():
555 555 return False
556 556 else:
557 557 for f in c.files():
558 558 if m(f):
559 559 break
560 560 else:
561 561 return False
562 562 files = repo.status(c.p1().node(), c.node())[field]
563 563 if fname is not None:
564 564 if fname in files:
565 565 return True
566 566 else:
567 567 for f in files:
568 568 if m(f):
569 569 return True
570 570
571 571 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
572 572
573 573 def _children(repo, subset, parentset):
574 574 if not parentset:
575 575 return baseset()
576 576 cs = set()
577 577 pr = repo.changelog.parentrevs
578 578 minrev = parentset.min()
579 579 nullrev = node.nullrev
580 580 for r in subset:
581 581 if r <= minrev:
582 582 continue
583 583 p1, p2 = pr(r)
584 584 if p1 in parentset:
585 585 cs.add(r)
586 586 if p2 != nullrev and p2 in parentset:
587 587 cs.add(r)
588 588 return baseset(cs)
589 589
590 590 @predicate('children(set)', safe=True)
591 591 def children(repo, subset, x):
592 592 """Child changesets of changesets in set.
593 593 """
594 594 s = getset(repo, fullreposet(repo), x)
595 595 cs = _children(repo, subset, s)
596 596 return subset & cs
597 597
598 598 @predicate('closed()', safe=True, weight=10)
599 599 def closed(repo, subset, x):
600 600 """Changeset is closed.
601 601 """
602 602 # i18n: "closed" is a keyword
603 603 getargs(x, 0, 0, _("closed takes no arguments"))
604 604 return subset.filter(lambda r: repo[r].closesbranch(),
605 605 condrepr='<branch closed>')
606 606
607 607 @predicate('contains(pattern)', weight=100)
608 608 def contains(repo, subset, x):
609 609 """The revision's manifest contains a file matching pattern (but might not
610 610 modify it). See :hg:`help patterns` for information about file patterns.
611 611
612 612 The pattern without explicit kind like ``glob:`` is expected to be
613 613 relative to the current directory and match against a file exactly
614 614 for efficiency.
615 615 """
616 616 # i18n: "contains" is a keyword
617 617 pat = getstring(x, _("contains requires a pattern"))
618 618
619 619 def matches(x):
620 620 if not matchmod.patkind(pat):
621 621 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
622 622 if pats in repo[x]:
623 623 return True
624 624 else:
625 625 c = repo[x]
626 626 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
627 627 for f in c.manifest():
628 628 if m(f):
629 629 return True
630 630 return False
631 631
632 632 return subset.filter(matches, condrepr=('<contains %r>', pat))
633 633
634 634 @predicate('converted([id])', safe=True)
635 635 def converted(repo, subset, x):
636 636 """Changesets converted from the given identifier in the old repository if
637 637 present, or all converted changesets if no identifier is specified.
638 638 """
639 639
640 640 # There is exactly no chance of resolving the revision, so do a simple
641 641 # string compare and hope for the best
642 642
643 643 rev = None
644 644 # i18n: "converted" is a keyword
645 645 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
646 646 if l:
647 647 # i18n: "converted" is a keyword
648 648 rev = getstring(l[0], _('converted requires a revision'))
649 649
650 650 def _matchvalue(r):
651 651 source = repo[r].extra().get('convert_revision', None)
652 652 return source is not None and (rev is None or source.startswith(rev))
653 653
654 654 return subset.filter(lambda r: _matchvalue(r),
655 655 condrepr=('<converted %r>', rev))
656 656
657 657 @predicate('date(interval)', safe=True, weight=10)
658 658 def date(repo, subset, x):
659 659 """Changesets within the interval, see :hg:`help dates`.
660 660 """
661 661 # i18n: "date" is a keyword
662 662 ds = getstring(x, _("date requires a string"))
663 663 dm = util.matchdate(ds)
664 664 return subset.filter(lambda x: dm(repo[x].date()[0]),
665 665 condrepr=('<date %r>', ds))
666 666
667 667 @predicate('desc(string)', safe=True, weight=10)
668 668 def desc(repo, subset, x):
669 669 """Search commit message for string. The match is case-insensitive.
670 670
671 671 Pattern matching is supported for `string`. See
672 672 :hg:`help revisions.patterns`.
673 673 """
674 674 # i18n: "desc" is a keyword
675 675 ds = getstring(x, _("desc requires a string"))
676 676
677 677 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
678 678
679 679 return subset.filter(lambda r: matcher(repo[r].description()),
680 680 condrepr=('<desc %r>', ds))
681 681
682 682 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
683 683 stopdepth=None):
684 684 roots = getset(repo, fullreposet(repo), x)
685 685 if not roots:
686 686 return baseset()
687 687 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
688 688 return subset & s
689 689
690 690 @predicate('descendants(set[, depth])', safe=True)
691 691 def descendants(repo, subset, x):
692 692 """Changesets which are descendants of changesets in set, including the
693 693 given changesets themselves.
694 694
695 695 If depth is specified, the result only includes changesets up to
696 696 the specified generation.
697 697 """
698 698 # startdepth is for internal use only until we can decide the UI
699 699 args = getargsdict(x, 'descendants', 'set depth startdepth')
700 700 if 'set' not in args:
701 701 # i18n: "descendants" is a keyword
702 702 raise error.ParseError(_('descendants takes at least 1 argument'))
703 703 startdepth = stopdepth = None
704 704 if 'startdepth' in args:
705 705 n = getinteger(args['startdepth'],
706 706 "descendants expects an integer startdepth")
707 707 if n < 0:
708 708 raise error.ParseError("negative startdepth")
709 709 startdepth = n
710 710 if 'depth' in args:
711 711 # i18n: "descendants" is a keyword
712 712 n = getinteger(args['depth'], _("descendants expects an integer depth"))
713 713 if n < 0:
714 714 raise error.ParseError(_("negative depth"))
715 715 stopdepth = n + 1
716 716 return _descendants(repo, subset, args['set'],
717 717 startdepth=startdepth, stopdepth=stopdepth)
718 718
719 719 @predicate('_firstdescendants', safe=True)
720 720 def _firstdescendants(repo, subset, x):
721 721 # ``_firstdescendants(set)``
722 722 # Like ``descendants(set)`` but follows only the first parents.
723 723 return _descendants(repo, subset, x, followfirst=True)
724 724
725 725 @predicate('destination([set])', safe=True, weight=10)
726 726 def destination(repo, subset, x):
727 727 """Changesets that were created by a graft, transplant or rebase operation,
728 728 with the given revisions specified as the source. Omitting the optional set
729 729 is the same as passing all().
730 730 """
731 731 if x is not None:
732 732 sources = getset(repo, fullreposet(repo), x)
733 733 else:
734 734 sources = fullreposet(repo)
735 735
736 736 dests = set()
737 737
738 738 # subset contains all of the possible destinations that can be returned, so
739 739 # iterate over them and see if their source(s) were provided in the arg set.
740 740 # Even if the immediate src of r is not in the arg set, src's source (or
741 741 # further back) may be. Scanning back further than the immediate src allows
742 742 # transitive transplants and rebases to yield the same results as transitive
743 743 # grafts.
744 744 for r in subset:
745 745 src = _getrevsource(repo, r)
746 746 lineage = None
747 747
748 748 while src is not None:
749 749 if lineage is None:
750 750 lineage = list()
751 751
752 752 lineage.append(r)
753 753
754 754 # The visited lineage is a match if the current source is in the arg
755 755 # set. Since every candidate dest is visited by way of iterating
756 756 # subset, any dests further back in the lineage will be tested by a
757 757 # different iteration over subset. Likewise, if the src was already
758 758 # selected, the current lineage can be selected without going back
759 759 # further.
760 760 if src in sources or src in dests:
761 761 dests.update(lineage)
762 762 break
763 763
764 764 r = src
765 765 src = _getrevsource(repo, r)
766 766
767 767 return subset.filter(dests.__contains__,
768 768 condrepr=lambda: '<destination %r>' % sorted(dests))
769 769
770 770 @predicate('divergent()', safe=True)
771 771 def divergent(repo, subset, x):
772 772 msg = ("'divergent()' is deprecated, "
773 773 "use 'contentdivergent()'")
774 774 repo.ui.deprecwarn(msg, '4.4')
775 775
776 776 return contentdivergent(repo, subset, x)
777 777
778 778 @predicate('contentdivergent()', safe=True)
779 779 def contentdivergent(repo, subset, x):
780 780 """
781 781 Final successors of changesets with an alternative set of final
782 782 successors. (EXPERIMENTAL)
783 783 """
784 784 # i18n: "contentdivergent" is a keyword
785 785 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
786 786 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
787 787 return subset & contentdivergent
788 788
789 789 @predicate('extdata(source)', safe=False, weight=100)
790 790 def extdata(repo, subset, x):
791 791 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
792 792 # i18n: "extdata" is a keyword
793 793 args = getargsdict(x, 'extdata', 'source')
794 794 source = getstring(args.get('source'),
795 795 # i18n: "extdata" is a keyword
796 796 _('extdata takes at least 1 string argument'))
797 797 data = scmutil.extdatasource(repo, source)
798 798 return subset & baseset(data)
799 799
800 800 @predicate('extinct()', safe=True)
801 801 def extinct(repo, subset, x):
802 802 """Obsolete changesets with obsolete descendants only.
803 803 """
804 804 # i18n: "extinct" is a keyword
805 805 getargs(x, 0, 0, _("extinct takes no arguments"))
806 806 extincts = obsmod.getrevs(repo, 'extinct')
807 807 return subset & extincts
808 808
809 809 @predicate('extra(label, [value])', safe=True)
810 810 def extra(repo, subset, x):
811 811 """Changesets with the given label in the extra metadata, with the given
812 812 optional value.
813 813
814 814 Pattern matching is supported for `value`. See
815 815 :hg:`help revisions.patterns`.
816 816 """
817 817 args = getargsdict(x, 'extra', 'label value')
818 818 if 'label' not in args:
819 819 # i18n: "extra" is a keyword
820 820 raise error.ParseError(_('extra takes at least 1 argument'))
821 821 # i18n: "extra" is a keyword
822 822 label = getstring(args['label'], _('first argument to extra must be '
823 823 'a string'))
824 824 value = None
825 825
826 826 if 'value' in args:
827 827 # i18n: "extra" is a keyword
828 828 value = getstring(args['value'], _('second argument to extra must be '
829 829 'a string'))
830 830 kind, value, matcher = util.stringmatcher(value)
831 831
832 832 def _matchvalue(r):
833 833 extra = repo[r].extra()
834 834 return label in extra and (value is None or matcher(extra[label]))
835 835
836 836 return subset.filter(lambda r: _matchvalue(r),
837 837 condrepr=('<extra[%r] %r>', label, value))
838 838
839 839 @predicate('filelog(pattern)', safe=True)
840 840 def filelog(repo, subset, x):
841 841 """Changesets connected to the specified filelog.
842 842
843 843 For performance reasons, visits only revisions mentioned in the file-level
844 844 filelog, rather than filtering through all changesets (much faster, but
845 845 doesn't include deletes or duplicate changes). For a slower, more accurate
846 846 result, use ``file()``.
847 847
848 848 The pattern without explicit kind like ``glob:`` is expected to be
849 849 relative to the current directory and match against a file exactly
850 850 for efficiency.
851 851
852 852 If some linkrev points to revisions filtered by the current repoview, we'll
853 853 work around it to return a non-filtered value.
854 854 """
855 855
856 856 # i18n: "filelog" is a keyword
857 857 pat = getstring(x, _("filelog requires a pattern"))
858 858 s = set()
859 859 cl = repo.changelog
860 860
861 861 if not matchmod.patkind(pat):
862 862 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
863 863 files = [f]
864 864 else:
865 865 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
866 866 files = (f for f in repo[None] if m(f))
867 867
868 868 for f in files:
869 869 fl = repo.file(f)
870 870 known = {}
871 871 scanpos = 0
872 872 for fr in list(fl):
873 873 fn = fl.node(fr)
874 874 if fn in known:
875 875 s.add(known[fn])
876 876 continue
877 877
878 878 lr = fl.linkrev(fr)
879 879 if lr in cl:
880 880 s.add(lr)
881 881 elif scanpos is not None:
882 882 # lowest matching changeset is filtered, scan further
883 883 # ahead in changelog
884 884 start = max(lr, scanpos) + 1
885 885 scanpos = None
886 886 for r in cl.revs(start):
887 887 # minimize parsing of non-matching entries
888 888 if f in cl.revision(r) and f in cl.readfiles(r):
889 889 try:
890 890 # try to use manifest delta fastpath
891 891 n = repo[r].filenode(f)
892 892 if n not in known:
893 893 if n == fn:
894 894 s.add(r)
895 895 scanpos = r
896 896 break
897 897 else:
898 898 known[n] = r
899 899 except error.ManifestLookupError:
900 900 # deletion in changelog
901 901 continue
902 902
903 903 return subset & s
904 904
905 905 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
906 906 def first(repo, subset, x, order):
907 907 """An alias for limit().
908 908 """
909 909 return limit(repo, subset, x, order)
910 910
911 911 def _follow(repo, subset, x, name, followfirst=False):
912 912 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
913 913 "and an optional revset") % name)
914 914 c = repo['.']
915 915 if l:
916 916 x = getstring(l[0], _("%s expected a pattern") % name)
917 917 rev = None
918 918 if len(l) >= 2:
919 919 revs = getset(repo, fullreposet(repo), l[1])
920 920 if len(revs) != 1:
921 921 raise error.RepoLookupError(
922 922 _("%s expected one starting revision") % name)
923 923 rev = revs.last()
924 924 c = repo[rev]
925 925 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
926 926 ctx=repo[rev], default='path')
927 927
928 928 files = c.manifest().walk(matcher)
929 929
930 930 s = set()
931 931 for fname in files:
932 932 fctx = c[fname]
933 933 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
934 934 # include the revision responsible for the most recent version
935 935 s.add(fctx.introrev())
936 936 else:
937 937 s = dagop.revancestors(repo, baseset([c.rev()]), followfirst)
938 938
939 939 return subset & s
940 940
941 941 @predicate('follow([pattern[, startrev]])', safe=True)
942 942 def follow(repo, subset, x):
943 943 """
944 944 An alias for ``::.`` (ancestors of the working directory's first parent).
945 945 If pattern is specified, the histories of files matching given
946 946 pattern in the revision given by startrev are followed, including copies.
947 947 """
948 948 return _follow(repo, subset, x, 'follow')
949 949
950 950 @predicate('_followfirst', safe=True)
951 951 def _followfirst(repo, subset, x):
952 952 # ``followfirst([pattern[, startrev]])``
953 953 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
954 954 # of every revisions or files revisions.
955 955 return _follow(repo, subset, x, '_followfirst', followfirst=True)
956 956
957 957 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
958 958 safe=True)
959 959 def followlines(repo, subset, x):
960 960 """Changesets modifying `file` in line range ('fromline', 'toline').
961 961
962 962 Line range corresponds to 'file' content at 'startrev' and should hence be
963 963 consistent with file size. If startrev is not specified, working directory's
964 964 parent is used.
965 965
966 966 By default, ancestors of 'startrev' are returned. If 'descend' is True,
967 967 descendants of 'startrev' are returned though renames are (currently) not
968 968 followed in this direction.
969 969 """
970 970 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
971 971 if len(args['lines']) != 1:
972 972 raise error.ParseError(_("followlines requires a line range"))
973 973
974 974 rev = '.'
975 975 if 'startrev' in args:
976 976 revs = getset(repo, fullreposet(repo), args['startrev'])
977 977 if len(revs) != 1:
978 978 raise error.ParseError(
979 979 # i18n: "followlines" is a keyword
980 980 _("followlines expects exactly one revision"))
981 981 rev = revs.last()
982 982
983 983 pat = getstring(args['file'], _("followlines requires a pattern"))
984 if not matchmod.patkind(pat):
985 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
986 else:
987 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
988 files = [f for f in repo[rev] if m(f)]
989 if len(files) != 1:
990 # i18n: "followlines" is a keyword
991 raise error.ParseError(_("followlines expects exactly one file"))
992 fname = files[0]
993
984 # i18n: "followlines" is a keyword
985 msg = _("followlines expects exactly one file")
986 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
994 987 # i18n: "followlines" is a keyword
995 988 lr = getrange(args['lines'][0], _("followlines expects a line range"))
996 989 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
997 990 for a in lr]
998 991 fromline, toline = util.processlinerange(fromline, toline)
999 992
1000 993 fctx = repo[rev].filectx(fname)
1001 994 descend = False
1002 995 if 'descend' in args:
1003 996 descend = getboolean(args['descend'],
1004 997 # i18n: "descend" is a keyword
1005 998 _("descend argument must be a boolean"))
1006 999 if descend:
1007 1000 rs = generatorset(
1008 1001 (c.rev() for c, _linerange
1009 1002 in dagop.blockdescendants(fctx, fromline, toline)),
1010 1003 iterasc=True)
1011 1004 else:
1012 1005 rs = generatorset(
1013 1006 (c.rev() for c, _linerange
1014 1007 in dagop.blockancestors(fctx, fromline, toline)),
1015 1008 iterasc=False)
1016 1009 return subset & rs
1017 1010
1018 1011 @predicate('all()', safe=True)
1019 1012 def getall(repo, subset, x):
1020 1013 """All changesets, the same as ``0:tip``.
1021 1014 """
1022 1015 # i18n: "all" is a keyword
1023 1016 getargs(x, 0, 0, _("all takes no arguments"))
1024 1017 return subset & spanset(repo) # drop "null" if any
1025 1018
1026 1019 @predicate('grep(regex)', weight=10)
1027 1020 def grep(repo, subset, x):
1028 1021 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1029 1022 to ensure special escape characters are handled correctly. Unlike
1030 1023 ``keyword(string)``, the match is case-sensitive.
1031 1024 """
1032 1025 try:
1033 1026 # i18n: "grep" is a keyword
1034 1027 gr = re.compile(getstring(x, _("grep requires a string")))
1035 1028 except re.error as e:
1036 1029 raise error.ParseError(_('invalid match pattern: %s') % e)
1037 1030
1038 1031 def matches(x):
1039 1032 c = repo[x]
1040 1033 for e in c.files() + [c.user(), c.description()]:
1041 1034 if gr.search(e):
1042 1035 return True
1043 1036 return False
1044 1037
1045 1038 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1046 1039
1047 1040 @predicate('_matchfiles', safe=True)
1048 1041 def _matchfiles(repo, subset, x):
1049 1042 # _matchfiles takes a revset list of prefixed arguments:
1050 1043 #
1051 1044 # [p:foo, i:bar, x:baz]
1052 1045 #
1053 1046 # builds a match object from them and filters subset. Allowed
1054 1047 # prefixes are 'p:' for regular patterns, 'i:' for include
1055 1048 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1056 1049 # a revision identifier, or the empty string to reference the
1057 1050 # working directory, from which the match object is
1058 1051 # initialized. Use 'd:' to set the default matching mode, default
1059 1052 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1060 1053
1061 1054 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1062 1055 pats, inc, exc = [], [], []
1063 1056 rev, default = None, None
1064 1057 for arg in l:
1065 1058 s = getstring(arg, "_matchfiles requires string arguments")
1066 1059 prefix, value = s[:2], s[2:]
1067 1060 if prefix == 'p:':
1068 1061 pats.append(value)
1069 1062 elif prefix == 'i:':
1070 1063 inc.append(value)
1071 1064 elif prefix == 'x:':
1072 1065 exc.append(value)
1073 1066 elif prefix == 'r:':
1074 1067 if rev is not None:
1075 1068 raise error.ParseError('_matchfiles expected at most one '
1076 1069 'revision')
1077 1070 if value != '': # empty means working directory; leave rev as None
1078 1071 rev = value
1079 1072 elif prefix == 'd:':
1080 1073 if default is not None:
1081 1074 raise error.ParseError('_matchfiles expected at most one '
1082 1075 'default mode')
1083 1076 default = value
1084 1077 else:
1085 1078 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1086 1079 if not default:
1087 1080 default = 'glob'
1088 1081
1089 1082 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1090 1083 exclude=exc, ctx=repo[rev], default=default)
1091 1084
1092 1085 # This directly read the changelog data as creating changectx for all
1093 1086 # revisions is quite expensive.
1094 1087 getfiles = repo.changelog.readfiles
1095 1088 wdirrev = node.wdirrev
1096 1089 def matches(x):
1097 1090 if x == wdirrev:
1098 1091 files = repo[x].files()
1099 1092 else:
1100 1093 files = getfiles(x)
1101 1094 for f in files:
1102 1095 if m(f):
1103 1096 return True
1104 1097 return False
1105 1098
1106 1099 return subset.filter(matches,
1107 1100 condrepr=('<matchfiles patterns=%r, include=%r '
1108 1101 'exclude=%r, default=%r, rev=%r>',
1109 1102 pats, inc, exc, default, rev))
1110 1103
1111 1104 @predicate('file(pattern)', safe=True, weight=10)
1112 1105 def hasfile(repo, subset, x):
1113 1106 """Changesets affecting files matched by pattern.
1114 1107
1115 1108 For a faster but less accurate result, consider using ``filelog()``
1116 1109 instead.
1117 1110
1118 1111 This predicate uses ``glob:`` as the default kind of pattern.
1119 1112 """
1120 1113 # i18n: "file" is a keyword
1121 1114 pat = getstring(x, _("file requires a pattern"))
1122 1115 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1123 1116
1124 1117 @predicate('head()', safe=True)
1125 1118 def head(repo, subset, x):
1126 1119 """Changeset is a named branch head.
1127 1120 """
1128 1121 # i18n: "head" is a keyword
1129 1122 getargs(x, 0, 0, _("head takes no arguments"))
1130 1123 hs = set()
1131 1124 cl = repo.changelog
1132 1125 for ls in repo.branchmap().itervalues():
1133 1126 hs.update(cl.rev(h) for h in ls)
1134 1127 return subset & baseset(hs)
1135 1128
1136 1129 @predicate('heads(set)', safe=True)
1137 1130 def heads(repo, subset, x):
1138 1131 """Members of set with no children in set.
1139 1132 """
1140 1133 s = getset(repo, subset, x)
1141 1134 ps = parents(repo, subset, x)
1142 1135 return s - ps
1143 1136
1144 1137 @predicate('hidden()', safe=True)
1145 1138 def hidden(repo, subset, x):
1146 1139 """Hidden changesets.
1147 1140 """
1148 1141 # i18n: "hidden" is a keyword
1149 1142 getargs(x, 0, 0, _("hidden takes no arguments"))
1150 1143 hiddenrevs = repoview.filterrevs(repo, 'visible')
1151 1144 return subset & hiddenrevs
1152 1145
1153 1146 @predicate('keyword(string)', safe=True, weight=10)
1154 1147 def keyword(repo, subset, x):
1155 1148 """Search commit message, user name, and names of changed files for
1156 1149 string. The match is case-insensitive.
1157 1150
1158 1151 For a regular expression or case sensitive search of these fields, use
1159 1152 ``grep(regex)``.
1160 1153 """
1161 1154 # i18n: "keyword" is a keyword
1162 1155 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1163 1156
1164 1157 def matches(r):
1165 1158 c = repo[r]
1166 1159 return any(kw in encoding.lower(t)
1167 1160 for t in c.files() + [c.user(), c.description()])
1168 1161
1169 1162 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1170 1163
1171 1164 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1172 1165 def limit(repo, subset, x, order):
1173 1166 """First n members of set, defaulting to 1, starting from offset.
1174 1167 """
1175 1168 args = getargsdict(x, 'limit', 'set n offset')
1176 1169 if 'set' not in args:
1177 1170 # i18n: "limit" is a keyword
1178 1171 raise error.ParseError(_("limit requires one to three arguments"))
1179 1172 # i18n: "limit" is a keyword
1180 1173 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1181 1174 if lim < 0:
1182 1175 raise error.ParseError(_("negative number to select"))
1183 1176 # i18n: "limit" is a keyword
1184 1177 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1185 1178 if ofs < 0:
1186 1179 raise error.ParseError(_("negative offset"))
1187 1180 os = getset(repo, fullreposet(repo), args['set'])
1188 1181 ls = os.slice(ofs, ofs + lim)
1189 1182 if order == followorder and lim > 1:
1190 1183 return subset & ls
1191 1184 return ls & subset
1192 1185
1193 1186 @predicate('last(set, [n])', safe=True, takeorder=True)
1194 1187 def last(repo, subset, x, order):
1195 1188 """Last n members of set, defaulting to 1.
1196 1189 """
1197 1190 # i18n: "last" is a keyword
1198 1191 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1199 1192 lim = 1
1200 1193 if len(l) == 2:
1201 1194 # i18n: "last" is a keyword
1202 1195 lim = getinteger(l[1], _("last expects a number"))
1203 1196 if lim < 0:
1204 1197 raise error.ParseError(_("negative number to select"))
1205 1198 os = getset(repo, fullreposet(repo), l[0])
1206 1199 os.reverse()
1207 1200 ls = os.slice(0, lim)
1208 1201 if order == followorder and lim > 1:
1209 1202 return subset & ls
1210 1203 ls.reverse()
1211 1204 return ls & subset
1212 1205
1213 1206 @predicate('max(set)', safe=True)
1214 1207 def maxrev(repo, subset, x):
1215 1208 """Changeset with highest revision number in set.
1216 1209 """
1217 1210 os = getset(repo, fullreposet(repo), x)
1218 1211 try:
1219 1212 m = os.max()
1220 1213 if m in subset:
1221 1214 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1222 1215 except ValueError:
1223 1216 # os.max() throws a ValueError when the collection is empty.
1224 1217 # Same as python's max().
1225 1218 pass
1226 1219 return baseset(datarepr=('<max %r, %r>', subset, os))
1227 1220
1228 1221 @predicate('merge()', safe=True)
1229 1222 def merge(repo, subset, x):
1230 1223 """Changeset is a merge changeset.
1231 1224 """
1232 1225 # i18n: "merge" is a keyword
1233 1226 getargs(x, 0, 0, _("merge takes no arguments"))
1234 1227 cl = repo.changelog
1235 1228 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1236 1229 condrepr='<merge>')
1237 1230
1238 1231 @predicate('branchpoint()', safe=True)
1239 1232 def branchpoint(repo, subset, x):
1240 1233 """Changesets with more than one child.
1241 1234 """
1242 1235 # i18n: "branchpoint" is a keyword
1243 1236 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1244 1237 cl = repo.changelog
1245 1238 if not subset:
1246 1239 return baseset()
1247 1240 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1248 1241 # (and if it is not, it should.)
1249 1242 baserev = min(subset)
1250 1243 parentscount = [0]*(len(repo) - baserev)
1251 1244 for r in cl.revs(start=baserev + 1):
1252 1245 for p in cl.parentrevs(r):
1253 1246 if p >= baserev:
1254 1247 parentscount[p - baserev] += 1
1255 1248 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1256 1249 condrepr='<branchpoint>')
1257 1250
1258 1251 @predicate('min(set)', safe=True)
1259 1252 def minrev(repo, subset, x):
1260 1253 """Changeset with lowest revision number in set.
1261 1254 """
1262 1255 os = getset(repo, fullreposet(repo), x)
1263 1256 try:
1264 1257 m = os.min()
1265 1258 if m in subset:
1266 1259 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1267 1260 except ValueError:
1268 1261 # os.min() throws a ValueError when the collection is empty.
1269 1262 # Same as python's min().
1270 1263 pass
1271 1264 return baseset(datarepr=('<min %r, %r>', subset, os))
1272 1265
1273 1266 @predicate('modifies(pattern)', safe=True, weight=30)
1274 1267 def modifies(repo, subset, x):
1275 1268 """Changesets modifying files matched by pattern.
1276 1269
1277 1270 The pattern without explicit kind like ``glob:`` is expected to be
1278 1271 relative to the current directory and match against a file or a
1279 1272 directory.
1280 1273 """
1281 1274 # i18n: "modifies" is a keyword
1282 1275 pat = getstring(x, _("modifies requires a pattern"))
1283 1276 return checkstatus(repo, subset, pat, 0)
1284 1277
1285 1278 @predicate('named(namespace)')
1286 1279 def named(repo, subset, x):
1287 1280 """The changesets in a given namespace.
1288 1281
1289 1282 Pattern matching is supported for `namespace`. See
1290 1283 :hg:`help revisions.patterns`.
1291 1284 """
1292 1285 # i18n: "named" is a keyword
1293 1286 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1294 1287
1295 1288 ns = getstring(args[0],
1296 1289 # i18n: "named" is a keyword
1297 1290 _('the argument to named must be a string'))
1298 1291 kind, pattern, matcher = util.stringmatcher(ns)
1299 1292 namespaces = set()
1300 1293 if kind == 'literal':
1301 1294 if pattern not in repo.names:
1302 1295 raise error.RepoLookupError(_("namespace '%s' does not exist")
1303 1296 % ns)
1304 1297 namespaces.add(repo.names[pattern])
1305 1298 else:
1306 1299 for name, ns in repo.names.iteritems():
1307 1300 if matcher(name):
1308 1301 namespaces.add(ns)
1309 1302 if not namespaces:
1310 1303 raise error.RepoLookupError(_("no namespace exists"
1311 1304 " that match '%s'") % pattern)
1312 1305
1313 1306 names = set()
1314 1307 for ns in namespaces:
1315 1308 for name in ns.listnames(repo):
1316 1309 if name not in ns.deprecated:
1317 1310 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1318 1311
1319 1312 names -= {node.nullrev}
1320 1313 return subset & names
1321 1314
1322 1315 @predicate('id(string)', safe=True)
1323 1316 def node_(repo, subset, x):
1324 1317 """Revision non-ambiguously specified by the given hex string prefix.
1325 1318 """
1326 1319 # i18n: "id" is a keyword
1327 1320 l = getargs(x, 1, 1, _("id requires one argument"))
1328 1321 # i18n: "id" is a keyword
1329 1322 n = getstring(l[0], _("id requires a string"))
1330 1323 if len(n) == 40:
1331 1324 try:
1332 1325 rn = repo.changelog.rev(node.bin(n))
1333 1326 except error.WdirUnsupported:
1334 1327 rn = node.wdirrev
1335 1328 except (LookupError, TypeError):
1336 1329 rn = None
1337 1330 else:
1338 1331 rn = None
1339 1332 try:
1340 1333 pm = repo.changelog._partialmatch(n)
1341 1334 if pm is not None:
1342 1335 rn = repo.changelog.rev(pm)
1343 1336 except error.WdirUnsupported:
1344 1337 rn = node.wdirrev
1345 1338
1346 1339 if rn is None:
1347 1340 return baseset()
1348 1341 result = baseset([rn])
1349 1342 return result & subset
1350 1343
1351 1344 @predicate('obsolete()', safe=True)
1352 1345 def obsolete(repo, subset, x):
1353 1346 """Mutable changeset with a newer version."""
1354 1347 # i18n: "obsolete" is a keyword
1355 1348 getargs(x, 0, 0, _("obsolete takes no arguments"))
1356 1349 obsoletes = obsmod.getrevs(repo, 'obsolete')
1357 1350 return subset & obsoletes
1358 1351
1359 1352 @predicate('only(set, [set])', safe=True)
1360 1353 def only(repo, subset, x):
1361 1354 """Changesets that are ancestors of the first set that are not ancestors
1362 1355 of any other head in the repo. If a second set is specified, the result
1363 1356 is ancestors of the first set that are not ancestors of the second set
1364 1357 (i.e. ::<set1> - ::<set2>).
1365 1358 """
1366 1359 cl = repo.changelog
1367 1360 # i18n: "only" is a keyword
1368 1361 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1369 1362 include = getset(repo, fullreposet(repo), args[0])
1370 1363 if len(args) == 1:
1371 1364 if not include:
1372 1365 return baseset()
1373 1366
1374 1367 descendants = set(dagop.revdescendants(repo, include, False))
1375 1368 exclude = [rev for rev in cl.headrevs()
1376 1369 if not rev in descendants and not rev in include]
1377 1370 else:
1378 1371 exclude = getset(repo, fullreposet(repo), args[1])
1379 1372
1380 1373 results = set(cl.findmissingrevs(common=exclude, heads=include))
1381 1374 # XXX we should turn this into a baseset instead of a set, smartset may do
1382 1375 # some optimizations from the fact this is a baseset.
1383 1376 return subset & results
1384 1377
1385 1378 @predicate('origin([set])', safe=True)
1386 1379 def origin(repo, subset, x):
1387 1380 """
1388 1381 Changesets that were specified as a source for the grafts, transplants or
1389 1382 rebases that created the given revisions. Omitting the optional set is the
1390 1383 same as passing all(). If a changeset created by these operations is itself
1391 1384 specified as a source for one of these operations, only the source changeset
1392 1385 for the first operation is selected.
1393 1386 """
1394 1387 if x is not None:
1395 1388 dests = getset(repo, fullreposet(repo), x)
1396 1389 else:
1397 1390 dests = fullreposet(repo)
1398 1391
1399 1392 def _firstsrc(rev):
1400 1393 src = _getrevsource(repo, rev)
1401 1394 if src is None:
1402 1395 return None
1403 1396
1404 1397 while True:
1405 1398 prev = _getrevsource(repo, src)
1406 1399
1407 1400 if prev is None:
1408 1401 return src
1409 1402 src = prev
1410 1403
1411 1404 o = {_firstsrc(r) for r in dests}
1412 1405 o -= {None}
1413 1406 # XXX we should turn this into a baseset instead of a set, smartset may do
1414 1407 # some optimizations from the fact this is a baseset.
1415 1408 return subset & o
1416 1409
1417 1410 @predicate('outgoing([path])', safe=False, weight=10)
1418 1411 def outgoing(repo, subset, x):
1419 1412 """Changesets not found in the specified destination repository, or the
1420 1413 default push location.
1421 1414 """
1422 1415 # Avoid cycles.
1423 1416 from . import (
1424 1417 discovery,
1425 1418 hg,
1426 1419 )
1427 1420 # i18n: "outgoing" is a keyword
1428 1421 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1429 1422 # i18n: "outgoing" is a keyword
1430 1423 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1431 1424 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1432 1425 dest, branches = hg.parseurl(dest)
1433 1426 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1434 1427 if revs:
1435 1428 revs = [repo.lookup(rev) for rev in revs]
1436 1429 other = hg.peer(repo, {}, dest)
1437 1430 repo.ui.pushbuffer()
1438 1431 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1439 1432 repo.ui.popbuffer()
1440 1433 cl = repo.changelog
1441 1434 o = {cl.rev(r) for r in outgoing.missing}
1442 1435 return subset & o
1443 1436
1444 1437 @predicate('p1([set])', safe=True)
1445 1438 def p1(repo, subset, x):
1446 1439 """First parent of changesets in set, or the working directory.
1447 1440 """
1448 1441 if x is None:
1449 1442 p = repo[x].p1().rev()
1450 1443 if p >= 0:
1451 1444 return subset & baseset([p])
1452 1445 return baseset()
1453 1446
1454 1447 ps = set()
1455 1448 cl = repo.changelog
1456 1449 for r in getset(repo, fullreposet(repo), x):
1457 1450 try:
1458 1451 ps.add(cl.parentrevs(r)[0])
1459 1452 except error.WdirUnsupported:
1460 1453 ps.add(repo[r].parents()[0].rev())
1461 1454 ps -= {node.nullrev}
1462 1455 # XXX we should turn this into a baseset instead of a set, smartset may do
1463 1456 # some optimizations from the fact this is a baseset.
1464 1457 return subset & ps
1465 1458
1466 1459 @predicate('p2([set])', safe=True)
1467 1460 def p2(repo, subset, x):
1468 1461 """Second parent of changesets in set, or the working directory.
1469 1462 """
1470 1463 if x is None:
1471 1464 ps = repo[x].parents()
1472 1465 try:
1473 1466 p = ps[1].rev()
1474 1467 if p >= 0:
1475 1468 return subset & baseset([p])
1476 1469 return baseset()
1477 1470 except IndexError:
1478 1471 return baseset()
1479 1472
1480 1473 ps = set()
1481 1474 cl = repo.changelog
1482 1475 for r in getset(repo, fullreposet(repo), x):
1483 1476 try:
1484 1477 ps.add(cl.parentrevs(r)[1])
1485 1478 except error.WdirUnsupported:
1486 1479 parents = repo[r].parents()
1487 1480 if len(parents) == 2:
1488 1481 ps.add(parents[1])
1489 1482 ps -= {node.nullrev}
1490 1483 # XXX we should turn this into a baseset instead of a set, smartset may do
1491 1484 # some optimizations from the fact this is a baseset.
1492 1485 return subset & ps
1493 1486
1494 1487 def parentpost(repo, subset, x, order):
1495 1488 return p1(repo, subset, x)
1496 1489
1497 1490 @predicate('parents([set])', safe=True)
1498 1491 def parents(repo, subset, x):
1499 1492 """
1500 1493 The set of all parents for all changesets in set, or the working directory.
1501 1494 """
1502 1495 if x is None:
1503 1496 ps = set(p.rev() for p in repo[x].parents())
1504 1497 else:
1505 1498 ps = set()
1506 1499 cl = repo.changelog
1507 1500 up = ps.update
1508 1501 parentrevs = cl.parentrevs
1509 1502 for r in getset(repo, fullreposet(repo), x):
1510 1503 try:
1511 1504 up(parentrevs(r))
1512 1505 except error.WdirUnsupported:
1513 1506 up(p.rev() for p in repo[r].parents())
1514 1507 ps -= {node.nullrev}
1515 1508 return subset & ps
1516 1509
1517 1510 def _phase(repo, subset, *targets):
1518 1511 """helper to select all rev in <targets> phases"""
1519 1512 s = repo._phasecache.getrevset(repo, targets)
1520 1513 return subset & s
1521 1514
1522 1515 @predicate('draft()', safe=True)
1523 1516 def draft(repo, subset, x):
1524 1517 """Changeset in draft phase."""
1525 1518 # i18n: "draft" is a keyword
1526 1519 getargs(x, 0, 0, _("draft takes no arguments"))
1527 1520 target = phases.draft
1528 1521 return _phase(repo, subset, target)
1529 1522
1530 1523 @predicate('secret()', safe=True)
1531 1524 def secret(repo, subset, x):
1532 1525 """Changeset in secret phase."""
1533 1526 # i18n: "secret" is a keyword
1534 1527 getargs(x, 0, 0, _("secret takes no arguments"))
1535 1528 target = phases.secret
1536 1529 return _phase(repo, subset, target)
1537 1530
1538 1531 def parentspec(repo, subset, x, n, order):
1539 1532 """``set^0``
1540 1533 The set.
1541 1534 ``set^1`` (or ``set^``), ``set^2``
1542 1535 First or second parent, respectively, of all changesets in set.
1543 1536 """
1544 1537 try:
1545 1538 n = int(n[1])
1546 1539 if n not in (0, 1, 2):
1547 1540 raise ValueError
1548 1541 except (TypeError, ValueError):
1549 1542 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1550 1543 ps = set()
1551 1544 cl = repo.changelog
1552 1545 for r in getset(repo, fullreposet(repo), x):
1553 1546 if n == 0:
1554 1547 ps.add(r)
1555 1548 elif n == 1:
1556 1549 try:
1557 1550 ps.add(cl.parentrevs(r)[0])
1558 1551 except error.WdirUnsupported:
1559 1552 ps.add(repo[r].parents()[0].rev())
1560 1553 else:
1561 1554 try:
1562 1555 parents = cl.parentrevs(r)
1563 1556 if parents[1] != node.nullrev:
1564 1557 ps.add(parents[1])
1565 1558 except error.WdirUnsupported:
1566 1559 parents = repo[r].parents()
1567 1560 if len(parents) == 2:
1568 1561 ps.add(parents[1].rev())
1569 1562 return subset & ps
1570 1563
1571 1564 @predicate('present(set)', safe=True, takeorder=True)
1572 1565 def present(repo, subset, x, order):
1573 1566 """An empty set, if any revision in set isn't found; otherwise,
1574 1567 all revisions in set.
1575 1568
1576 1569 If any of specified revisions is not present in the local repository,
1577 1570 the query is normally aborted. But this predicate allows the query
1578 1571 to continue even in such cases.
1579 1572 """
1580 1573 try:
1581 1574 return getset(repo, subset, x, order)
1582 1575 except error.RepoLookupError:
1583 1576 return baseset()
1584 1577
1585 1578 # for internal use
1586 1579 @predicate('_notpublic', safe=True)
1587 1580 def _notpublic(repo, subset, x):
1588 1581 getargs(x, 0, 0, "_notpublic takes no arguments")
1589 1582 return _phase(repo, subset, phases.draft, phases.secret)
1590 1583
1591 1584 # for internal use
1592 1585 @predicate('_phaseandancestors(phasename, set)', safe=True)
1593 1586 def _phaseandancestors(repo, subset, x):
1594 1587 # equivalent to (phasename() & ancestors(set)) but more efficient
1595 1588 # phasename could be one of 'draft', 'secret', or '_notpublic'
1596 1589 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1597 1590 phasename = getsymbol(args[0])
1598 1591 s = getset(repo, fullreposet(repo), args[1])
1599 1592
1600 1593 draft = phases.draft
1601 1594 secret = phases.secret
1602 1595 phasenamemap = {
1603 1596 '_notpublic': draft,
1604 1597 'draft': draft, # follow secret's ancestors
1605 1598 'secret': secret,
1606 1599 }
1607 1600 if phasename not in phasenamemap:
1608 1601 raise error.ParseError('%r is not a valid phasename' % phasename)
1609 1602
1610 1603 minimalphase = phasenamemap[phasename]
1611 1604 getphase = repo._phasecache.phase
1612 1605
1613 1606 def cutfunc(rev):
1614 1607 return getphase(repo, rev) < minimalphase
1615 1608
1616 1609 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1617 1610
1618 1611 if phasename == 'draft': # need to remove secret changesets
1619 1612 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1620 1613 return subset & revs
1621 1614
1622 1615 @predicate('public()', safe=True)
1623 1616 def public(repo, subset, x):
1624 1617 """Changeset in public phase."""
1625 1618 # i18n: "public" is a keyword
1626 1619 getargs(x, 0, 0, _("public takes no arguments"))
1627 1620 phase = repo._phasecache.phase
1628 1621 target = phases.public
1629 1622 condition = lambda r: phase(repo, r) == target
1630 1623 return subset.filter(condition, condrepr=('<phase %r>', target),
1631 1624 cache=False)
1632 1625
1633 1626 @predicate('remote([id [,path]])', safe=False)
1634 1627 def remote(repo, subset, x):
1635 1628 """Local revision that corresponds to the given identifier in a
1636 1629 remote repository, if present. Here, the '.' identifier is a
1637 1630 synonym for the current local branch.
1638 1631 """
1639 1632
1640 1633 from . import hg # avoid start-up nasties
1641 1634 # i18n: "remote" is a keyword
1642 1635 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1643 1636
1644 1637 q = '.'
1645 1638 if len(l) > 0:
1646 1639 # i18n: "remote" is a keyword
1647 1640 q = getstring(l[0], _("remote requires a string id"))
1648 1641 if q == '.':
1649 1642 q = repo['.'].branch()
1650 1643
1651 1644 dest = ''
1652 1645 if len(l) > 1:
1653 1646 # i18n: "remote" is a keyword
1654 1647 dest = getstring(l[1], _("remote requires a repository path"))
1655 1648 dest = repo.ui.expandpath(dest or 'default')
1656 1649 dest, branches = hg.parseurl(dest)
1657 1650 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1658 1651 if revs:
1659 1652 revs = [repo.lookup(rev) for rev in revs]
1660 1653 other = hg.peer(repo, {}, dest)
1661 1654 n = other.lookup(q)
1662 1655 if n in repo:
1663 1656 r = repo[n].rev()
1664 1657 if r in subset:
1665 1658 return baseset([r])
1666 1659 return baseset()
1667 1660
1668 1661 @predicate('removes(pattern)', safe=True, weight=30)
1669 1662 def removes(repo, subset, x):
1670 1663 """Changesets which remove files matching pattern.
1671 1664
1672 1665 The pattern without explicit kind like ``glob:`` is expected to be
1673 1666 relative to the current directory and match against a file or a
1674 1667 directory.
1675 1668 """
1676 1669 # i18n: "removes" is a keyword
1677 1670 pat = getstring(x, _("removes requires a pattern"))
1678 1671 return checkstatus(repo, subset, pat, 2)
1679 1672
1680 1673 @predicate('rev(number)', safe=True)
1681 1674 def rev(repo, subset, x):
1682 1675 """Revision with the given numeric identifier.
1683 1676 """
1684 1677 # i18n: "rev" is a keyword
1685 1678 l = getargs(x, 1, 1, _("rev requires one argument"))
1686 1679 try:
1687 1680 # i18n: "rev" is a keyword
1688 1681 l = int(getstring(l[0], _("rev requires a number")))
1689 1682 except (TypeError, ValueError):
1690 1683 # i18n: "rev" is a keyword
1691 1684 raise error.ParseError(_("rev expects a number"))
1692 1685 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1693 1686 return baseset()
1694 1687 return subset & baseset([l])
1695 1688
1696 1689 @predicate('matching(revision [, field])', safe=True)
1697 1690 def matching(repo, subset, x):
1698 1691 """Changesets in which a given set of fields match the set of fields in the
1699 1692 selected revision or set.
1700 1693
1701 1694 To match more than one field pass the list of fields to match separated
1702 1695 by spaces (e.g. ``author description``).
1703 1696
1704 1697 Valid fields are most regular revision fields and some special fields.
1705 1698
1706 1699 Regular revision fields are ``description``, ``author``, ``branch``,
1707 1700 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1708 1701 and ``diff``.
1709 1702 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1710 1703 contents of the revision. Two revisions matching their ``diff`` will
1711 1704 also match their ``files``.
1712 1705
1713 1706 Special fields are ``summary`` and ``metadata``:
1714 1707 ``summary`` matches the first line of the description.
1715 1708 ``metadata`` is equivalent to matching ``description user date``
1716 1709 (i.e. it matches the main metadata fields).
1717 1710
1718 1711 ``metadata`` is the default field which is used when no fields are
1719 1712 specified. You can match more than one field at a time.
1720 1713 """
1721 1714 # i18n: "matching" is a keyword
1722 1715 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1723 1716
1724 1717 revs = getset(repo, fullreposet(repo), l[0])
1725 1718
1726 1719 fieldlist = ['metadata']
1727 1720 if len(l) > 1:
1728 1721 fieldlist = getstring(l[1],
1729 1722 # i18n: "matching" is a keyword
1730 1723 _("matching requires a string "
1731 1724 "as its second argument")).split()
1732 1725
1733 1726 # Make sure that there are no repeated fields,
1734 1727 # expand the 'special' 'metadata' field type
1735 1728 # and check the 'files' whenever we check the 'diff'
1736 1729 fields = []
1737 1730 for field in fieldlist:
1738 1731 if field == 'metadata':
1739 1732 fields += ['user', 'description', 'date']
1740 1733 elif field == 'diff':
1741 1734 # a revision matching the diff must also match the files
1742 1735 # since matching the diff is very costly, make sure to
1743 1736 # also match the files first
1744 1737 fields += ['files', 'diff']
1745 1738 else:
1746 1739 if field == 'author':
1747 1740 field = 'user'
1748 1741 fields.append(field)
1749 1742 fields = set(fields)
1750 1743 if 'summary' in fields and 'description' in fields:
1751 1744 # If a revision matches its description it also matches its summary
1752 1745 fields.discard('summary')
1753 1746
1754 1747 # We may want to match more than one field
1755 1748 # Not all fields take the same amount of time to be matched
1756 1749 # Sort the selected fields in order of increasing matching cost
1757 1750 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1758 1751 'files', 'description', 'substate', 'diff']
1759 1752 def fieldkeyfunc(f):
1760 1753 try:
1761 1754 return fieldorder.index(f)
1762 1755 except ValueError:
1763 1756 # assume an unknown field is very costly
1764 1757 return len(fieldorder)
1765 1758 fields = list(fields)
1766 1759 fields.sort(key=fieldkeyfunc)
1767 1760
1768 1761 # Each field will be matched with its own "getfield" function
1769 1762 # which will be added to the getfieldfuncs array of functions
1770 1763 getfieldfuncs = []
1771 1764 _funcs = {
1772 1765 'user': lambda r: repo[r].user(),
1773 1766 'branch': lambda r: repo[r].branch(),
1774 1767 'date': lambda r: repo[r].date(),
1775 1768 'description': lambda r: repo[r].description(),
1776 1769 'files': lambda r: repo[r].files(),
1777 1770 'parents': lambda r: repo[r].parents(),
1778 1771 'phase': lambda r: repo[r].phase(),
1779 1772 'substate': lambda r: repo[r].substate,
1780 1773 'summary': lambda r: repo[r].description().splitlines()[0],
1781 1774 'diff': lambda r: list(repo[r].diff(git=True),)
1782 1775 }
1783 1776 for info in fields:
1784 1777 getfield = _funcs.get(info, None)
1785 1778 if getfield is None:
1786 1779 raise error.ParseError(
1787 1780 # i18n: "matching" is a keyword
1788 1781 _("unexpected field name passed to matching: %s") % info)
1789 1782 getfieldfuncs.append(getfield)
1790 1783 # convert the getfield array of functions into a "getinfo" function
1791 1784 # which returns an array of field values (or a single value if there
1792 1785 # is only one field to match)
1793 1786 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1794 1787
1795 1788 def matches(x):
1796 1789 for rev in revs:
1797 1790 target = getinfo(rev)
1798 1791 match = True
1799 1792 for n, f in enumerate(getfieldfuncs):
1800 1793 if target[n] != f(x):
1801 1794 match = False
1802 1795 if match:
1803 1796 return True
1804 1797 return False
1805 1798
1806 1799 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1807 1800
1808 1801 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1809 1802 def reverse(repo, subset, x, order):
1810 1803 """Reverse order of set.
1811 1804 """
1812 1805 l = getset(repo, subset, x, order)
1813 1806 if order == defineorder:
1814 1807 l.reverse()
1815 1808 return l
1816 1809
1817 1810 @predicate('roots(set)', safe=True)
1818 1811 def roots(repo, subset, x):
1819 1812 """Changesets in set with no parent changeset in set.
1820 1813 """
1821 1814 s = getset(repo, fullreposet(repo), x)
1822 1815 parents = repo.changelog.parentrevs
1823 1816 def filter(r):
1824 1817 for p in parents(r):
1825 1818 if 0 <= p and p in s:
1826 1819 return False
1827 1820 return True
1828 1821 return subset & s.filter(filter, condrepr='<roots>')
1829 1822
1830 1823 _sortkeyfuncs = {
1831 1824 'rev': lambda c: c.rev(),
1832 1825 'branch': lambda c: c.branch(),
1833 1826 'desc': lambda c: c.description(),
1834 1827 'user': lambda c: c.user(),
1835 1828 'author': lambda c: c.user(),
1836 1829 'date': lambda c: c.date()[0],
1837 1830 }
1838 1831
1839 1832 def _getsortargs(x):
1840 1833 """Parse sort options into (set, [(key, reverse)], opts)"""
1841 1834 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1842 1835 if 'set' not in args:
1843 1836 # i18n: "sort" is a keyword
1844 1837 raise error.ParseError(_('sort requires one or two arguments'))
1845 1838 keys = "rev"
1846 1839 if 'keys' in args:
1847 1840 # i18n: "sort" is a keyword
1848 1841 keys = getstring(args['keys'], _("sort spec must be a string"))
1849 1842
1850 1843 keyflags = []
1851 1844 for k in keys.split():
1852 1845 fk = k
1853 1846 reverse = (k[0] == '-')
1854 1847 if reverse:
1855 1848 k = k[1:]
1856 1849 if k not in _sortkeyfuncs and k != 'topo':
1857 1850 raise error.ParseError(_("unknown sort key %r") % fk)
1858 1851 keyflags.append((k, reverse))
1859 1852
1860 1853 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1861 1854 # i18n: "topo" is a keyword
1862 1855 raise error.ParseError(_('topo sort order cannot be combined '
1863 1856 'with other sort keys'))
1864 1857
1865 1858 opts = {}
1866 1859 if 'topo.firstbranch' in args:
1867 1860 if any(k == 'topo' for k, reverse in keyflags):
1868 1861 opts['topo.firstbranch'] = args['topo.firstbranch']
1869 1862 else:
1870 1863 # i18n: "topo" and "topo.firstbranch" are keywords
1871 1864 raise error.ParseError(_('topo.firstbranch can only be used '
1872 1865 'when using the topo sort key'))
1873 1866
1874 1867 return args['set'], keyflags, opts
1875 1868
1876 1869 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
1877 1870 weight=10)
1878 1871 def sort(repo, subset, x, order):
1879 1872 """Sort set by keys. The default sort order is ascending, specify a key
1880 1873 as ``-key`` to sort in descending order.
1881 1874
1882 1875 The keys can be:
1883 1876
1884 1877 - ``rev`` for the revision number,
1885 1878 - ``branch`` for the branch name,
1886 1879 - ``desc`` for the commit message (description),
1887 1880 - ``user`` for user name (``author`` can be used as an alias),
1888 1881 - ``date`` for the commit date
1889 1882 - ``topo`` for a reverse topographical sort
1890 1883
1891 1884 The ``topo`` sort order cannot be combined with other sort keys. This sort
1892 1885 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1893 1886 specifies what topographical branches to prioritize in the sort.
1894 1887
1895 1888 """
1896 1889 s, keyflags, opts = _getsortargs(x)
1897 1890 revs = getset(repo, subset, s, order)
1898 1891
1899 1892 if not keyflags or order != defineorder:
1900 1893 return revs
1901 1894 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1902 1895 revs.sort(reverse=keyflags[0][1])
1903 1896 return revs
1904 1897 elif keyflags[0][0] == "topo":
1905 1898 firstbranch = ()
1906 1899 if 'topo.firstbranch' in opts:
1907 1900 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1908 1901 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1909 1902 firstbranch),
1910 1903 istopo=True)
1911 1904 if keyflags[0][1]:
1912 1905 revs.reverse()
1913 1906 return revs
1914 1907
1915 1908 # sort() is guaranteed to be stable
1916 1909 ctxs = [repo[r] for r in revs]
1917 1910 for k, reverse in reversed(keyflags):
1918 1911 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1919 1912 return baseset([c.rev() for c in ctxs])
1920 1913
1921 1914 @predicate('subrepo([pattern])')
1922 1915 def subrepo(repo, subset, x):
1923 1916 """Changesets that add, modify or remove the given subrepo. If no subrepo
1924 1917 pattern is named, any subrepo changes are returned.
1925 1918 """
1926 1919 # i18n: "subrepo" is a keyword
1927 1920 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1928 1921 pat = None
1929 1922 if len(args) != 0:
1930 1923 pat = getstring(args[0], _("subrepo requires a pattern"))
1931 1924
1932 1925 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1933 1926
1934 1927 def submatches(names):
1935 1928 k, p, m = util.stringmatcher(pat)
1936 1929 for name in names:
1937 1930 if m(name):
1938 1931 yield name
1939 1932
1940 1933 def matches(x):
1941 1934 c = repo[x]
1942 1935 s = repo.status(c.p1().node(), c.node(), match=m)
1943 1936
1944 1937 if pat is None:
1945 1938 return s.added or s.modified or s.removed
1946 1939
1947 1940 if s.added:
1948 1941 return any(submatches(c.substate.keys()))
1949 1942
1950 1943 if s.modified:
1951 1944 subs = set(c.p1().substate.keys())
1952 1945 subs.update(c.substate.keys())
1953 1946
1954 1947 for path in submatches(subs):
1955 1948 if c.p1().substate.get(path) != c.substate.get(path):
1956 1949 return True
1957 1950
1958 1951 if s.removed:
1959 1952 return any(submatches(c.p1().substate.keys()))
1960 1953
1961 1954 return False
1962 1955
1963 1956 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1964 1957
1965 1958 def _mapbynodefunc(repo, s, f):
1966 1959 """(repo, smartset, [node] -> [node]) -> smartset
1967 1960
1968 1961 Helper method to map a smartset to another smartset given a function only
1969 1962 talking about nodes. Handles converting between rev numbers and nodes, and
1970 1963 filtering.
1971 1964 """
1972 1965 cl = repo.unfiltered().changelog
1973 1966 torev = cl.rev
1974 1967 tonode = cl.node
1975 1968 nodemap = cl.nodemap
1976 1969 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
1977 1970 return smartset.baseset(result - repo.changelog.filteredrevs)
1978 1971
1979 1972 @predicate('successors(set)', safe=True)
1980 1973 def successors(repo, subset, x):
1981 1974 """All successors for set, including the given set themselves"""
1982 1975 s = getset(repo, fullreposet(repo), x)
1983 1976 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
1984 1977 d = _mapbynodefunc(repo, s, f)
1985 1978 return subset & d
1986 1979
1987 1980 def _substringmatcher(pattern, casesensitive=True):
1988 1981 kind, pattern, matcher = util.stringmatcher(pattern,
1989 1982 casesensitive=casesensitive)
1990 1983 if kind == 'literal':
1991 1984 if not casesensitive:
1992 1985 pattern = encoding.lower(pattern)
1993 1986 matcher = lambda s: pattern in encoding.lower(s)
1994 1987 else:
1995 1988 matcher = lambda s: pattern in s
1996 1989 return kind, pattern, matcher
1997 1990
1998 1991 @predicate('tag([name])', safe=True)
1999 1992 def tag(repo, subset, x):
2000 1993 """The specified tag by name, or all tagged revisions if no name is given.
2001 1994
2002 1995 Pattern matching is supported for `name`. See
2003 1996 :hg:`help revisions.patterns`.
2004 1997 """
2005 1998 # i18n: "tag" is a keyword
2006 1999 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2007 2000 cl = repo.changelog
2008 2001 if args:
2009 2002 pattern = getstring(args[0],
2010 2003 # i18n: "tag" is a keyword
2011 2004 _('the argument to tag must be a string'))
2012 2005 kind, pattern, matcher = util.stringmatcher(pattern)
2013 2006 if kind == 'literal':
2014 2007 # avoid resolving all tags
2015 2008 tn = repo._tagscache.tags.get(pattern, None)
2016 2009 if tn is None:
2017 2010 raise error.RepoLookupError(_("tag '%s' does not exist")
2018 2011 % pattern)
2019 2012 s = {repo[tn].rev()}
2020 2013 else:
2021 2014 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2022 2015 else:
2023 2016 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2024 2017 return subset & s
2025 2018
2026 2019 @predicate('tagged', safe=True)
2027 2020 def tagged(repo, subset, x):
2028 2021 return tag(repo, subset, x)
2029 2022
2030 2023 @predicate('unstable()', safe=True)
2031 2024 def unstable(repo, subset, x):
2032 2025 msg = ("'unstable()' is deprecated, "
2033 2026 "use 'orphan()'")
2034 2027 repo.ui.deprecwarn(msg, '4.4')
2035 2028
2036 2029 return orphan(repo, subset, x)
2037 2030
2038 2031 @predicate('orphan()', safe=True)
2039 2032 def orphan(repo, subset, x):
2040 2033 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2041 2034 """
2042 2035 # i18n: "orphan" is a keyword
2043 2036 getargs(x, 0, 0, _("orphan takes no arguments"))
2044 2037 orphan = obsmod.getrevs(repo, 'orphan')
2045 2038 return subset & orphan
2046 2039
2047 2040
2048 2041 @predicate('user(string)', safe=True, weight=10)
2049 2042 def user(repo, subset, x):
2050 2043 """User name contains string. The match is case-insensitive.
2051 2044
2052 2045 Pattern matching is supported for `string`. See
2053 2046 :hg:`help revisions.patterns`.
2054 2047 """
2055 2048 return author(repo, subset, x)
2056 2049
2057 2050 @predicate('wdir()', safe=True, weight=0)
2058 2051 def wdir(repo, subset, x):
2059 2052 """Working directory. (EXPERIMENTAL)"""
2060 2053 # i18n: "wdir" is a keyword
2061 2054 getargs(x, 0, 0, _("wdir takes no arguments"))
2062 2055 if node.wdirrev in subset or isinstance(subset, fullreposet):
2063 2056 return baseset([node.wdirrev])
2064 2057 return baseset()
2065 2058
2066 2059 def _orderedlist(repo, subset, x):
2067 2060 s = getstring(x, "internal error")
2068 2061 if not s:
2069 2062 return baseset()
2070 2063 # remove duplicates here. it's difficult for caller to deduplicate sets
2071 2064 # because different symbols can point to the same rev.
2072 2065 cl = repo.changelog
2073 2066 ls = []
2074 2067 seen = set()
2075 2068 for t in s.split('\0'):
2076 2069 try:
2077 2070 # fast path for integer revision
2078 2071 r = int(t)
2079 2072 if str(r) != t or r not in cl:
2080 2073 raise ValueError
2081 2074 revs = [r]
2082 2075 except ValueError:
2083 2076 revs = stringset(repo, subset, t, defineorder)
2084 2077
2085 2078 for r in revs:
2086 2079 if r in seen:
2087 2080 continue
2088 2081 if (r in subset
2089 2082 or r == node.nullrev and isinstance(subset, fullreposet)):
2090 2083 ls.append(r)
2091 2084 seen.add(r)
2092 2085 return baseset(ls)
2093 2086
2094 2087 # for internal use
2095 2088 @predicate('_list', safe=True, takeorder=True)
2096 2089 def _list(repo, subset, x, order):
2097 2090 if order == followorder:
2098 2091 # slow path to take the subset order
2099 2092 return subset & _orderedlist(repo, fullreposet(repo), x)
2100 2093 else:
2101 2094 return _orderedlist(repo, subset, x)
2102 2095
2103 2096 def _orderedintlist(repo, subset, x):
2104 2097 s = getstring(x, "internal error")
2105 2098 if not s:
2106 2099 return baseset()
2107 2100 ls = [int(r) for r in s.split('\0')]
2108 2101 s = subset
2109 2102 return baseset([r for r in ls if r in s])
2110 2103
2111 2104 # for internal use
2112 2105 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2113 2106 def _intlist(repo, subset, x, order):
2114 2107 if order == followorder:
2115 2108 # slow path to take the subset order
2116 2109 return subset & _orderedintlist(repo, fullreposet(repo), x)
2117 2110 else:
2118 2111 return _orderedintlist(repo, subset, x)
2119 2112
2120 2113 def _orderedhexlist(repo, subset, x):
2121 2114 s = getstring(x, "internal error")
2122 2115 if not s:
2123 2116 return baseset()
2124 2117 cl = repo.changelog
2125 2118 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2126 2119 s = subset
2127 2120 return baseset([r for r in ls if r in s])
2128 2121
2129 2122 # for internal use
2130 2123 @predicate('_hexlist', safe=True, takeorder=True)
2131 2124 def _hexlist(repo, subset, x, order):
2132 2125 if order == followorder:
2133 2126 # slow path to take the subset order
2134 2127 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2135 2128 else:
2136 2129 return _orderedhexlist(repo, subset, x)
2137 2130
2138 2131 methods = {
2139 2132 "range": rangeset,
2140 2133 "rangeall": rangeall,
2141 2134 "rangepre": rangepre,
2142 2135 "rangepost": rangepost,
2143 2136 "dagrange": dagrange,
2144 2137 "string": stringset,
2145 2138 "symbol": stringset,
2146 2139 "and": andset,
2147 2140 "andsmally": andsmallyset,
2148 2141 "or": orset,
2149 2142 "not": notset,
2150 2143 "difference": differenceset,
2151 2144 "relation": relationset,
2152 2145 "relsubscript": relsubscriptset,
2153 2146 "subscript": subscriptset,
2154 2147 "list": listset,
2155 2148 "keyvalue": keyvaluepair,
2156 2149 "func": func,
2157 2150 "ancestor": ancestorspec,
2158 2151 "parent": parentspec,
2159 2152 "parentpost": parentpost,
2160 2153 }
2161 2154
2162 2155 def posttreebuilthook(tree, repo):
2163 2156 # hook for extensions to execute code on the optimized tree
2164 2157 pass
2165 2158
2166 2159 def match(ui, spec, repo=None):
2167 2160 """Create a matcher for a single revision spec"""
2168 2161 return matchany(ui, [spec], repo=repo)
2169 2162
2170 2163 def matchany(ui, specs, repo=None, localalias=None):
2171 2164 """Create a matcher that will include any revisions matching one of the
2172 2165 given specs
2173 2166
2174 2167 If localalias is not None, it is a dict {name: definitionstring}. It takes
2175 2168 precedence over [revsetalias] config section.
2176 2169 """
2177 2170 if not specs:
2178 2171 def mfunc(repo, subset=None):
2179 2172 return baseset()
2180 2173 return mfunc
2181 2174 if not all(specs):
2182 2175 raise error.ParseError(_("empty query"))
2183 2176 lookup = None
2184 2177 if repo:
2185 2178 lookup = repo.__contains__
2186 2179 if len(specs) == 1:
2187 2180 tree = revsetlang.parse(specs[0], lookup)
2188 2181 else:
2189 2182 tree = ('or',
2190 2183 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2191 2184
2192 2185 aliases = []
2193 2186 warn = None
2194 2187 if ui:
2195 2188 aliases.extend(ui.configitems('revsetalias'))
2196 2189 warn = ui.warn
2197 2190 if localalias:
2198 2191 aliases.extend(localalias.items())
2199 2192 if aliases:
2200 2193 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2201 2194 tree = revsetlang.foldconcat(tree)
2202 2195 tree = revsetlang.analyze(tree)
2203 2196 tree = revsetlang.optimize(tree)
2204 2197 posttreebuilthook(tree, repo)
2205 2198 return makematcher(tree)
2206 2199
2207 2200 def makematcher(tree):
2208 2201 """Create a matcher from an evaluatable tree"""
2209 2202 def mfunc(repo, subset=None, order=None):
2210 2203 if order is None:
2211 2204 if subset is None:
2212 2205 order = defineorder # 'x'
2213 2206 else:
2214 2207 order = followorder # 'subset & x'
2215 2208 if subset is None:
2216 2209 subset = fullreposet(repo)
2217 2210 return getset(repo, subset, tree, order)
2218 2211 return mfunc
2219 2212
2220 2213 def loadpredicate(ui, extname, registrarobj):
2221 2214 """Load revset predicates from specified registrarobj
2222 2215 """
2223 2216 for name, func in registrarobj._table.iteritems():
2224 2217 symbols[name] = func
2225 2218 if func._safe:
2226 2219 safesymbols.add(name)
2227 2220
2228 2221 # load built-in predicates explicitly to setup safesymbols
2229 2222 loadpredicate(None, None, predicate)
2230 2223
2231 2224 # tell hggettext to extract docstrings from these functions:
2232 2225 i18nfunctions = symbols.values()
@@ -1,1259 +1,1273
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 hex,
22 22 nullid,
23 23 short,
24 24 wdirid,
25 25 wdirrev,
26 26 )
27 27
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 match as matchmod,
32 32 obsolete,
33 33 obsutil,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 revsetlang,
38 38 similar,
39 39 url,
40 40 util,
41 41 vfs,
42 42 )
43 43
44 44 if pycompat.iswindows:
45 45 from . import scmwindows as scmplatform
46 46 else:
47 47 from . import scmposix as scmplatform
48 48
49 49 termsize = scmplatform.termsize
50 50
51 51 class status(tuple):
52 52 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
53 53 and 'ignored' properties are only relevant to the working copy.
54 54 '''
55 55
56 56 __slots__ = ()
57 57
58 58 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
59 59 clean):
60 60 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
61 61 ignored, clean))
62 62
63 63 @property
64 64 def modified(self):
65 65 '''files that have been modified'''
66 66 return self[0]
67 67
68 68 @property
69 69 def added(self):
70 70 '''files that have been added'''
71 71 return self[1]
72 72
73 73 @property
74 74 def removed(self):
75 75 '''files that have been removed'''
76 76 return self[2]
77 77
78 78 @property
79 79 def deleted(self):
80 80 '''files that are in the dirstate, but have been deleted from the
81 81 working copy (aka "missing")
82 82 '''
83 83 return self[3]
84 84
85 85 @property
86 86 def unknown(self):
87 87 '''files not in the dirstate that are not ignored'''
88 88 return self[4]
89 89
90 90 @property
91 91 def ignored(self):
92 92 '''files not in the dirstate that are ignored (by _dirignore())'''
93 93 return self[5]
94 94
95 95 @property
96 96 def clean(self):
97 97 '''files that have not been modified'''
98 98 return self[6]
99 99
100 100 def __repr__(self, *args, **kwargs):
101 101 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
102 102 'unknown=%r, ignored=%r, clean=%r>') % self)
103 103
104 104 def itersubrepos(ctx1, ctx2):
105 105 """find subrepos in ctx1 or ctx2"""
106 106 # Create a (subpath, ctx) mapping where we prefer subpaths from
107 107 # ctx1. The subpaths from ctx2 are important when the .hgsub file
108 108 # has been modified (in ctx2) but not yet committed (in ctx1).
109 109 subpaths = dict.fromkeys(ctx2.substate, ctx2)
110 110 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
111 111
112 112 missing = set()
113 113
114 114 for subpath in ctx2.substate:
115 115 if subpath not in ctx1.substate:
116 116 del subpaths[subpath]
117 117 missing.add(subpath)
118 118
119 119 for subpath, ctx in sorted(subpaths.iteritems()):
120 120 yield subpath, ctx.sub(subpath)
121 121
122 122 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
123 123 # status and diff will have an accurate result when it does
124 124 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
125 125 # against itself.
126 126 for subpath in missing:
127 127 yield subpath, ctx2.nullsub(subpath, ctx1)
128 128
129 129 def nochangesfound(ui, repo, excluded=None):
130 130 '''Report no changes for push/pull, excluded is None or a list of
131 131 nodes excluded from the push/pull.
132 132 '''
133 133 secretlist = []
134 134 if excluded:
135 135 for n in excluded:
136 136 ctx = repo[n]
137 137 if ctx.phase() >= phases.secret and not ctx.extinct():
138 138 secretlist.append(n)
139 139
140 140 if secretlist:
141 141 ui.status(_("no changes found (ignored %d secret changesets)\n")
142 142 % len(secretlist))
143 143 else:
144 144 ui.status(_("no changes found\n"))
145 145
146 146 def callcatch(ui, func):
147 147 """call func() with global exception handling
148 148
149 149 return func() if no exception happens. otherwise do some error handling
150 150 and return an exit code accordingly. does not handle all exceptions.
151 151 """
152 152 try:
153 153 try:
154 154 return func()
155 155 except: # re-raises
156 156 ui.traceback()
157 157 raise
158 158 # Global exception handling, alphabetically
159 159 # Mercurial-specific first, followed by built-in and library exceptions
160 160 except error.LockHeld as inst:
161 161 if inst.errno == errno.ETIMEDOUT:
162 162 reason = _('timed out waiting for lock held by %r') % inst.locker
163 163 else:
164 164 reason = _('lock held by %r') % inst.locker
165 165 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
166 166 if not inst.locker:
167 167 ui.warn(_("(lock might be very busy)\n"))
168 168 except error.LockUnavailable as inst:
169 169 ui.warn(_("abort: could not lock %s: %s\n") %
170 170 (inst.desc or inst.filename,
171 171 encoding.strtolocal(inst.strerror)))
172 172 except error.OutOfBandError as inst:
173 173 if inst.args:
174 174 msg = _("abort: remote error:\n")
175 175 else:
176 176 msg = _("abort: remote error\n")
177 177 ui.warn(msg)
178 178 if inst.args:
179 179 ui.warn(''.join(inst.args))
180 180 if inst.hint:
181 181 ui.warn('(%s)\n' % inst.hint)
182 182 except error.RepoError as inst:
183 183 ui.warn(_("abort: %s!\n") % inst)
184 184 if inst.hint:
185 185 ui.warn(_("(%s)\n") % inst.hint)
186 186 except error.ResponseError as inst:
187 187 ui.warn(_("abort: %s") % inst.args[0])
188 188 if not isinstance(inst.args[1], basestring):
189 189 ui.warn(" %r\n" % (inst.args[1],))
190 190 elif not inst.args[1]:
191 191 ui.warn(_(" empty string\n"))
192 192 else:
193 193 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
194 194 except error.CensoredNodeError as inst:
195 195 ui.warn(_("abort: file censored %s!\n") % inst)
196 196 except error.RevlogError as inst:
197 197 ui.warn(_("abort: %s!\n") % inst)
198 198 except error.InterventionRequired as inst:
199 199 ui.warn("%s\n" % inst)
200 200 if inst.hint:
201 201 ui.warn(_("(%s)\n") % inst.hint)
202 202 return 1
203 203 except error.WdirUnsupported:
204 204 ui.warn(_("abort: working directory revision cannot be specified\n"))
205 205 except error.Abort as inst:
206 206 ui.warn(_("abort: %s\n") % inst)
207 207 if inst.hint:
208 208 ui.warn(_("(%s)\n") % inst.hint)
209 209 except ImportError as inst:
210 210 ui.warn(_("abort: %s!\n") % inst)
211 211 m = str(inst).split()[-1]
212 212 if m in "mpatch bdiff".split():
213 213 ui.warn(_("(did you forget to compile extensions?)\n"))
214 214 elif m in "zlib".split():
215 215 ui.warn(_("(is your Python install correct?)\n"))
216 216 except IOError as inst:
217 217 if util.safehasattr(inst, "code"):
218 218 ui.warn(_("abort: %s\n") % inst)
219 219 elif util.safehasattr(inst, "reason"):
220 220 try: # usually it is in the form (errno, strerror)
221 221 reason = inst.reason.args[1]
222 222 except (AttributeError, IndexError):
223 223 # it might be anything, for example a string
224 224 reason = inst.reason
225 225 if isinstance(reason, unicode):
226 226 # SSLError of Python 2.7.9 contains a unicode
227 227 reason = encoding.unitolocal(reason)
228 228 ui.warn(_("abort: error: %s\n") % reason)
229 229 elif (util.safehasattr(inst, "args")
230 230 and inst.args and inst.args[0] == errno.EPIPE):
231 231 pass
232 232 elif getattr(inst, "strerror", None):
233 233 if getattr(inst, "filename", None):
234 234 ui.warn(_("abort: %s: %s\n") % (
235 235 encoding.strtolocal(inst.strerror), inst.filename))
236 236 else:
237 237 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
238 238 else:
239 239 raise
240 240 except OSError as inst:
241 241 if getattr(inst, "filename", None) is not None:
242 242 ui.warn(_("abort: %s: '%s'\n") % (
243 243 encoding.strtolocal(inst.strerror), inst.filename))
244 244 else:
245 245 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
246 246 except MemoryError:
247 247 ui.warn(_("abort: out of memory\n"))
248 248 except SystemExit as inst:
249 249 # Commands shouldn't sys.exit directly, but give a return code.
250 250 # Just in case catch this and and pass exit code to caller.
251 251 return inst.code
252 252 except socket.error as inst:
253 253 ui.warn(_("abort: %s\n") % inst.args[-1])
254 254
255 255 return -1
256 256
257 257 def checknewlabel(repo, lbl, kind):
258 258 # Do not use the "kind" parameter in ui output.
259 259 # It makes strings difficult to translate.
260 260 if lbl in ['tip', '.', 'null']:
261 261 raise error.Abort(_("the name '%s' is reserved") % lbl)
262 262 for c in (':', '\0', '\n', '\r'):
263 263 if c in lbl:
264 264 raise error.Abort(_("%r cannot be used in a name") % c)
265 265 try:
266 266 int(lbl)
267 267 raise error.Abort(_("cannot use an integer as a name"))
268 268 except ValueError:
269 269 pass
270 270
271 271 def checkfilename(f):
272 272 '''Check that the filename f is an acceptable filename for a tracked file'''
273 273 if '\r' in f or '\n' in f:
274 274 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
275 275
276 276 def checkportable(ui, f):
277 277 '''Check if filename f is portable and warn or abort depending on config'''
278 278 checkfilename(f)
279 279 abort, warn = checkportabilityalert(ui)
280 280 if abort or warn:
281 281 msg = util.checkwinfilename(f)
282 282 if msg:
283 283 msg = "%s: %s" % (msg, util.shellquote(f))
284 284 if abort:
285 285 raise error.Abort(msg)
286 286 ui.warn(_("warning: %s\n") % msg)
287 287
288 288 def checkportabilityalert(ui):
289 289 '''check if the user's config requests nothing, a warning, or abort for
290 290 non-portable filenames'''
291 291 val = ui.config('ui', 'portablefilenames')
292 292 lval = val.lower()
293 293 bval = util.parsebool(val)
294 294 abort = pycompat.iswindows or lval == 'abort'
295 295 warn = bval or lval == 'warn'
296 296 if bval is None and not (warn or abort or lval == 'ignore'):
297 297 raise error.ConfigError(
298 298 _("ui.portablefilenames value is invalid ('%s')") % val)
299 299 return abort, warn
300 300
301 301 class casecollisionauditor(object):
302 302 def __init__(self, ui, abort, dirstate):
303 303 self._ui = ui
304 304 self._abort = abort
305 305 allfiles = '\0'.join(dirstate._map)
306 306 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
307 307 self._dirstate = dirstate
308 308 # The purpose of _newfiles is so that we don't complain about
309 309 # case collisions if someone were to call this object with the
310 310 # same filename twice.
311 311 self._newfiles = set()
312 312
313 313 def __call__(self, f):
314 314 if f in self._newfiles:
315 315 return
316 316 fl = encoding.lower(f)
317 317 if fl in self._loweredfiles and f not in self._dirstate:
318 318 msg = _('possible case-folding collision for %s') % f
319 319 if self._abort:
320 320 raise error.Abort(msg)
321 321 self._ui.warn(_("warning: %s\n") % msg)
322 322 self._loweredfiles.add(fl)
323 323 self._newfiles.add(f)
324 324
325 325 def filteredhash(repo, maxrev):
326 326 """build hash of filtered revisions in the current repoview.
327 327
328 328 Multiple caches perform up-to-date validation by checking that the
329 329 tiprev and tipnode stored in the cache file match the current repository.
330 330 However, this is not sufficient for validating repoviews because the set
331 331 of revisions in the view may change without the repository tiprev and
332 332 tipnode changing.
333 333
334 334 This function hashes all the revs filtered from the view and returns
335 335 that SHA-1 digest.
336 336 """
337 337 cl = repo.changelog
338 338 if not cl.filteredrevs:
339 339 return None
340 340 key = None
341 341 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
342 342 if revs:
343 343 s = hashlib.sha1()
344 344 for rev in revs:
345 345 s.update('%d;' % rev)
346 346 key = s.digest()
347 347 return key
348 348
349 349 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
350 350 '''yield every hg repository under path, always recursively.
351 351 The recurse flag will only control recursion into repo working dirs'''
352 352 def errhandler(err):
353 353 if err.filename == path:
354 354 raise err
355 355 samestat = getattr(os.path, 'samestat', None)
356 356 if followsym and samestat is not None:
357 357 def adddir(dirlst, dirname):
358 358 match = False
359 359 dirstat = os.stat(dirname)
360 360 for lstdirstat in dirlst:
361 361 if samestat(dirstat, lstdirstat):
362 362 match = True
363 363 break
364 364 if not match:
365 365 dirlst.append(dirstat)
366 366 return not match
367 367 else:
368 368 followsym = False
369 369
370 370 if (seen_dirs is None) and followsym:
371 371 seen_dirs = []
372 372 adddir(seen_dirs, path)
373 373 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
374 374 dirs.sort()
375 375 if '.hg' in dirs:
376 376 yield root # found a repository
377 377 qroot = os.path.join(root, '.hg', 'patches')
378 378 if os.path.isdir(os.path.join(qroot, '.hg')):
379 379 yield qroot # we have a patch queue repo here
380 380 if recurse:
381 381 # avoid recursing inside the .hg directory
382 382 dirs.remove('.hg')
383 383 else:
384 384 dirs[:] = [] # don't descend further
385 385 elif followsym:
386 386 newdirs = []
387 387 for d in dirs:
388 388 fname = os.path.join(root, d)
389 389 if adddir(seen_dirs, fname):
390 390 if os.path.islink(fname):
391 391 for hgname in walkrepos(fname, True, seen_dirs):
392 392 yield hgname
393 393 else:
394 394 newdirs.append(d)
395 395 dirs[:] = newdirs
396 396
397 397 def binnode(ctx):
398 398 """Return binary node id for a given basectx"""
399 399 node = ctx.node()
400 400 if node is None:
401 401 return wdirid
402 402 return node
403 403
404 404 def intrev(ctx):
405 405 """Return integer for a given basectx that can be used in comparison or
406 406 arithmetic operation"""
407 407 rev = ctx.rev()
408 408 if rev is None:
409 409 return wdirrev
410 410 return rev
411 411
412 412 def formatchangeid(ctx):
413 413 """Format changectx as '{rev}:{node|formatnode}', which is the default
414 414 template provided by cmdutil.changeset_templater"""
415 415 repo = ctx.repo()
416 416 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
417 417
418 418 def formatrevnode(ui, rev, node):
419 419 """Format given revision and node depending on the current verbosity"""
420 420 if ui.debugflag:
421 421 hexfunc = hex
422 422 else:
423 423 hexfunc = short
424 424 return '%d:%s' % (rev, hexfunc(node))
425 425
426 426 def revsingle(repo, revspec, default='.', localalias=None):
427 427 if not revspec and revspec != 0:
428 428 return repo[default]
429 429
430 430 l = revrange(repo, [revspec], localalias=localalias)
431 431 if not l:
432 432 raise error.Abort(_('empty revision set'))
433 433 return repo[l.last()]
434 434
435 435 def _pairspec(revspec):
436 436 tree = revsetlang.parse(revspec)
437 437 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
438 438
439 439 def revpair(repo, revs):
440 440 if not revs:
441 441 return repo.dirstate.p1(), None
442 442
443 443 l = revrange(repo, revs)
444 444
445 445 if not l:
446 446 first = second = None
447 447 elif l.isascending():
448 448 first = l.min()
449 449 second = l.max()
450 450 elif l.isdescending():
451 451 first = l.max()
452 452 second = l.min()
453 453 else:
454 454 first = l.first()
455 455 second = l.last()
456 456
457 457 if first is None:
458 458 raise error.Abort(_('empty revision range'))
459 459 if (first == second and len(revs) >= 2
460 460 and not all(revrange(repo, [r]) for r in revs)):
461 461 raise error.Abort(_('empty revision on one side of range'))
462 462
463 463 # if top-level is range expression, the result must always be a pair
464 464 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
465 465 return repo.lookup(first), None
466 466
467 467 return repo.lookup(first), repo.lookup(second)
468 468
469 469 def revrange(repo, specs, localalias=None):
470 470 """Execute 1 to many revsets and return the union.
471 471
472 472 This is the preferred mechanism for executing revsets using user-specified
473 473 config options, such as revset aliases.
474 474
475 475 The revsets specified by ``specs`` will be executed via a chained ``OR``
476 476 expression. If ``specs`` is empty, an empty result is returned.
477 477
478 478 ``specs`` can contain integers, in which case they are assumed to be
479 479 revision numbers.
480 480
481 481 It is assumed the revsets are already formatted. If you have arguments
482 482 that need to be expanded in the revset, call ``revsetlang.formatspec()``
483 483 and pass the result as an element of ``specs``.
484 484
485 485 Specifying a single revset is allowed.
486 486
487 487 Returns a ``revset.abstractsmartset`` which is a list-like interface over
488 488 integer revisions.
489 489 """
490 490 allspecs = []
491 491 for spec in specs:
492 492 if isinstance(spec, int):
493 493 spec = revsetlang.formatspec('rev(%d)', spec)
494 494 allspecs.append(spec)
495 495 return repo.anyrevs(allspecs, user=True, localalias=localalias)
496 496
497 497 def meaningfulparents(repo, ctx):
498 498 """Return list of meaningful (or all if debug) parentrevs for rev.
499 499
500 500 For merges (two non-nullrev revisions) both parents are meaningful.
501 501 Otherwise the first parent revision is considered meaningful if it
502 502 is not the preceding revision.
503 503 """
504 504 parents = ctx.parents()
505 505 if len(parents) > 1:
506 506 return parents
507 507 if repo.ui.debugflag:
508 508 return [parents[0], repo['null']]
509 509 if parents[0].rev() >= intrev(ctx) - 1:
510 510 return []
511 511 return parents
512 512
513 513 def expandpats(pats):
514 514 '''Expand bare globs when running on windows.
515 515 On posix we assume it already has already been done by sh.'''
516 516 if not util.expandglobs:
517 517 return list(pats)
518 518 ret = []
519 519 for kindpat in pats:
520 520 kind, pat = matchmod._patsplit(kindpat, None)
521 521 if kind is None:
522 522 try:
523 523 globbed = glob.glob(pat)
524 524 except re.error:
525 525 globbed = [pat]
526 526 if globbed:
527 527 ret.extend(globbed)
528 528 continue
529 529 ret.append(kindpat)
530 530 return ret
531 531
532 532 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
533 533 badfn=None):
534 534 '''Return a matcher and the patterns that were used.
535 535 The matcher will warn about bad matches, unless an alternate badfn callback
536 536 is provided.'''
537 537 if pats == ("",):
538 538 pats = []
539 539 if opts is None:
540 540 opts = {}
541 541 if not globbed and default == 'relpath':
542 542 pats = expandpats(pats or [])
543 543
544 544 def bad(f, msg):
545 545 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
546 546
547 547 if badfn is None:
548 548 badfn = bad
549 549
550 550 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
551 551 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
552 552
553 553 if m.always():
554 554 pats = []
555 555 return m, pats
556 556
557 557 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
558 558 badfn=None):
559 559 '''Return a matcher that will warn about bad matches.'''
560 560 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
561 561
562 562 def matchall(repo):
563 563 '''Return a matcher that will efficiently match everything.'''
564 564 return matchmod.always(repo.root, repo.getcwd())
565 565
566 566 def matchfiles(repo, files, badfn=None):
567 567 '''Return a matcher that will efficiently match exactly these files.'''
568 568 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
569 569
570 def parsefollowlinespattern(repo, rev, pat, msg):
571 """Return a file name from `pat` pattern suitable for usage in followlines
572 logic.
573 """
574 if not matchmod.patkind(pat):
575 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
576 else:
577 ctx = repo[rev]
578 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
579 files = [f for f in ctx if m(f)]
580 if len(files) != 1:
581 raise error.ParseError(msg)
582 return files[0]
583
570 584 def origpath(ui, repo, filepath):
571 585 '''customize where .orig files are created
572 586
573 587 Fetch user defined path from config file: [ui] origbackuppath = <path>
574 588 Fall back to default (filepath with .orig suffix) if not specified
575 589 '''
576 590 origbackuppath = ui.config('ui', 'origbackuppath')
577 591 if not origbackuppath:
578 592 return filepath + ".orig"
579 593
580 594 # Convert filepath from an absolute path into a path inside the repo.
581 595 filepathfromroot = util.normpath(os.path.relpath(filepath,
582 596 start=repo.root))
583 597
584 598 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
585 599 origbackupdir = origvfs.dirname(filepathfromroot)
586 600 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
587 601 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
588 602
589 603 # Remove any files that conflict with the backup file's path
590 604 for f in reversed(list(util.finddirs(filepathfromroot))):
591 605 if origvfs.isfileorlink(f):
592 606 ui.note(_('removing conflicting file: %s\n')
593 607 % origvfs.join(f))
594 608 origvfs.unlink(f)
595 609 break
596 610
597 611 origvfs.makedirs(origbackupdir)
598 612
599 613 if origvfs.isdir(filepathfromroot):
600 614 ui.note(_('removing conflicting directory: %s\n')
601 615 % origvfs.join(filepathfromroot))
602 616 origvfs.rmtree(filepathfromroot, forcibly=True)
603 617
604 618 return origvfs.join(filepathfromroot)
605 619
606 620 class _containsnode(object):
607 621 """proxy __contains__(node) to container.__contains__ which accepts revs"""
608 622
609 623 def __init__(self, repo, revcontainer):
610 624 self._torev = repo.changelog.rev
611 625 self._revcontains = revcontainer.__contains__
612 626
613 627 def __contains__(self, node):
614 628 return self._revcontains(self._torev(node))
615 629
616 630 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
617 631 """do common cleanups when old nodes are replaced by new nodes
618 632
619 633 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
620 634 (we might also want to move working directory parent in the future)
621 635
622 636 By default, bookmark moves are calculated automatically from 'replacements',
623 637 but 'moves' can be used to override that. Also, 'moves' may include
624 638 additional bookmark moves that should not have associated obsmarkers.
625 639
626 640 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
627 641 have replacements. operation is a string, like "rebase".
628 642
629 643 metadata is dictionary containing metadata to be stored in obsmarker if
630 644 obsolescence is enabled.
631 645 """
632 646 if not replacements and not moves:
633 647 return
634 648
635 649 # translate mapping's other forms
636 650 if not util.safehasattr(replacements, 'items'):
637 651 replacements = {n: () for n in replacements}
638 652
639 653 # Calculate bookmark movements
640 654 if moves is None:
641 655 moves = {}
642 656 # Unfiltered repo is needed since nodes in replacements might be hidden.
643 657 unfi = repo.unfiltered()
644 658 for oldnode, newnodes in replacements.items():
645 659 if oldnode in moves:
646 660 continue
647 661 if len(newnodes) > 1:
648 662 # usually a split, take the one with biggest rev number
649 663 newnode = next(unfi.set('max(%ln)', newnodes)).node()
650 664 elif len(newnodes) == 0:
651 665 # move bookmark backwards
652 666 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
653 667 list(replacements)))
654 668 if roots:
655 669 newnode = roots[0].node()
656 670 else:
657 671 newnode = nullid
658 672 else:
659 673 newnode = newnodes[0]
660 674 moves[oldnode] = newnode
661 675
662 676 with repo.transaction('cleanup') as tr:
663 677 # Move bookmarks
664 678 bmarks = repo._bookmarks
665 679 bmarkchanges = []
666 680 allnewnodes = [n for ns in replacements.values() for n in ns]
667 681 for oldnode, newnode in moves.items():
668 682 oldbmarks = repo.nodebookmarks(oldnode)
669 683 if not oldbmarks:
670 684 continue
671 685 from . import bookmarks # avoid import cycle
672 686 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
673 687 (oldbmarks, hex(oldnode), hex(newnode)))
674 688 # Delete divergent bookmarks being parents of related newnodes
675 689 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
676 690 allnewnodes, newnode, oldnode)
677 691 deletenodes = _containsnode(repo, deleterevs)
678 692 for name in oldbmarks:
679 693 bmarkchanges.append((name, newnode))
680 694 for b in bookmarks.divergent2delete(repo, deletenodes, name):
681 695 bmarkchanges.append((b, None))
682 696
683 697 if bmarkchanges:
684 698 bmarks.applychanges(repo, tr, bmarkchanges)
685 699
686 700 # Obsolete or strip nodes
687 701 if obsolete.isenabled(repo, obsolete.createmarkersopt):
688 702 # If a node is already obsoleted, and we want to obsolete it
689 703 # without a successor, skip that obssolete request since it's
690 704 # unnecessary. That's the "if s or not isobs(n)" check below.
691 705 # Also sort the node in topology order, that might be useful for
692 706 # some obsstore logic.
693 707 # NOTE: the filtering and sorting might belong to createmarkers.
694 708 isobs = unfi.obsstore.successors.__contains__
695 709 torev = unfi.changelog.rev
696 710 sortfunc = lambda ns: torev(ns[0])
697 711 rels = [(unfi[n], tuple(unfi[m] for m in s))
698 712 for n, s in sorted(replacements.items(), key=sortfunc)
699 713 if s or not isobs(n)]
700 714 if rels:
701 715 obsolete.createmarkers(repo, rels, operation=operation,
702 716 metadata=metadata)
703 717 else:
704 718 from . import repair # avoid import cycle
705 719 tostrip = list(replacements)
706 720 if tostrip:
707 721 repair.delayedstrip(repo.ui, repo, tostrip, operation)
708 722
709 723 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
710 724 if opts is None:
711 725 opts = {}
712 726 m = matcher
713 727 if dry_run is None:
714 728 dry_run = opts.get('dry_run')
715 729 if similarity is None:
716 730 similarity = float(opts.get('similarity') or 0)
717 731
718 732 ret = 0
719 733 join = lambda f: os.path.join(prefix, f)
720 734
721 735 wctx = repo[None]
722 736 for subpath in sorted(wctx.substate):
723 737 submatch = matchmod.subdirmatcher(subpath, m)
724 738 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
725 739 sub = wctx.sub(subpath)
726 740 try:
727 741 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
728 742 ret = 1
729 743 except error.LookupError:
730 744 repo.ui.status(_("skipping missing subrepository: %s\n")
731 745 % join(subpath))
732 746
733 747 rejected = []
734 748 def badfn(f, msg):
735 749 if f in m.files():
736 750 m.bad(f, msg)
737 751 rejected.append(f)
738 752
739 753 badmatch = matchmod.badmatch(m, badfn)
740 754 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
741 755 badmatch)
742 756
743 757 unknownset = set(unknown + forgotten)
744 758 toprint = unknownset.copy()
745 759 toprint.update(deleted)
746 760 for abs in sorted(toprint):
747 761 if repo.ui.verbose or not m.exact(abs):
748 762 if abs in unknownset:
749 763 status = _('adding %s\n') % m.uipath(abs)
750 764 else:
751 765 status = _('removing %s\n') % m.uipath(abs)
752 766 repo.ui.status(status)
753 767
754 768 renames = _findrenames(repo, m, added + unknown, removed + deleted,
755 769 similarity)
756 770
757 771 if not dry_run:
758 772 _markchanges(repo, unknown + forgotten, deleted, renames)
759 773
760 774 for f in rejected:
761 775 if f in m.files():
762 776 return 1
763 777 return ret
764 778
765 779 def marktouched(repo, files, similarity=0.0):
766 780 '''Assert that files have somehow been operated upon. files are relative to
767 781 the repo root.'''
768 782 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
769 783 rejected = []
770 784
771 785 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
772 786
773 787 if repo.ui.verbose:
774 788 unknownset = set(unknown + forgotten)
775 789 toprint = unknownset.copy()
776 790 toprint.update(deleted)
777 791 for abs in sorted(toprint):
778 792 if abs in unknownset:
779 793 status = _('adding %s\n') % abs
780 794 else:
781 795 status = _('removing %s\n') % abs
782 796 repo.ui.status(status)
783 797
784 798 renames = _findrenames(repo, m, added + unknown, removed + deleted,
785 799 similarity)
786 800
787 801 _markchanges(repo, unknown + forgotten, deleted, renames)
788 802
789 803 for f in rejected:
790 804 if f in m.files():
791 805 return 1
792 806 return 0
793 807
794 808 def _interestingfiles(repo, matcher):
795 809 '''Walk dirstate with matcher, looking for files that addremove would care
796 810 about.
797 811
798 812 This is different from dirstate.status because it doesn't care about
799 813 whether files are modified or clean.'''
800 814 added, unknown, deleted, removed, forgotten = [], [], [], [], []
801 815 audit_path = pathutil.pathauditor(repo.root, cached=True)
802 816
803 817 ctx = repo[None]
804 818 dirstate = repo.dirstate
805 819 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
806 820 unknown=True, ignored=False, full=False)
807 821 for abs, st in walkresults.iteritems():
808 822 dstate = dirstate[abs]
809 823 if dstate == '?' and audit_path.check(abs):
810 824 unknown.append(abs)
811 825 elif dstate != 'r' and not st:
812 826 deleted.append(abs)
813 827 elif dstate == 'r' and st:
814 828 forgotten.append(abs)
815 829 # for finding renames
816 830 elif dstate == 'r' and not st:
817 831 removed.append(abs)
818 832 elif dstate == 'a':
819 833 added.append(abs)
820 834
821 835 return added, unknown, deleted, removed, forgotten
822 836
823 837 def _findrenames(repo, matcher, added, removed, similarity):
824 838 '''Find renames from removed files to added ones.'''
825 839 renames = {}
826 840 if similarity > 0:
827 841 for old, new, score in similar.findrenames(repo, added, removed,
828 842 similarity):
829 843 if (repo.ui.verbose or not matcher.exact(old)
830 844 or not matcher.exact(new)):
831 845 repo.ui.status(_('recording removal of %s as rename to %s '
832 846 '(%d%% similar)\n') %
833 847 (matcher.rel(old), matcher.rel(new),
834 848 score * 100))
835 849 renames[new] = old
836 850 return renames
837 851
838 852 def _markchanges(repo, unknown, deleted, renames):
839 853 '''Marks the files in unknown as added, the files in deleted as removed,
840 854 and the files in renames as copied.'''
841 855 wctx = repo[None]
842 856 with repo.wlock():
843 857 wctx.forget(deleted)
844 858 wctx.add(unknown)
845 859 for new, old in renames.iteritems():
846 860 wctx.copy(old, new)
847 861
848 862 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
849 863 """Update the dirstate to reflect the intent of copying src to dst. For
850 864 different reasons it might not end with dst being marked as copied from src.
851 865 """
852 866 origsrc = repo.dirstate.copied(src) or src
853 867 if dst == origsrc: # copying back a copy?
854 868 if repo.dirstate[dst] not in 'mn' and not dryrun:
855 869 repo.dirstate.normallookup(dst)
856 870 else:
857 871 if repo.dirstate[origsrc] == 'a' and origsrc == src:
858 872 if not ui.quiet:
859 873 ui.warn(_("%s has not been committed yet, so no copy "
860 874 "data will be stored for %s.\n")
861 875 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
862 876 if repo.dirstate[dst] in '?r' and not dryrun:
863 877 wctx.add([dst])
864 878 elif not dryrun:
865 879 wctx.copy(origsrc, dst)
866 880
867 881 def readrequires(opener, supported):
868 882 '''Reads and parses .hg/requires and checks if all entries found
869 883 are in the list of supported features.'''
870 884 requirements = set(opener.read("requires").splitlines())
871 885 missings = []
872 886 for r in requirements:
873 887 if r not in supported:
874 888 if not r or not r[0].isalnum():
875 889 raise error.RequirementError(_(".hg/requires file is corrupt"))
876 890 missings.append(r)
877 891 missings.sort()
878 892 if missings:
879 893 raise error.RequirementError(
880 894 _("repository requires features unknown to this Mercurial: %s")
881 895 % " ".join(missings),
882 896 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
883 897 " for more information"))
884 898 return requirements
885 899
886 900 def writerequires(opener, requirements):
887 901 with opener('requires', 'w') as fp:
888 902 for r in sorted(requirements):
889 903 fp.write("%s\n" % r)
890 904
891 905 class filecachesubentry(object):
892 906 def __init__(self, path, stat):
893 907 self.path = path
894 908 self.cachestat = None
895 909 self._cacheable = None
896 910
897 911 if stat:
898 912 self.cachestat = filecachesubentry.stat(self.path)
899 913
900 914 if self.cachestat:
901 915 self._cacheable = self.cachestat.cacheable()
902 916 else:
903 917 # None means we don't know yet
904 918 self._cacheable = None
905 919
906 920 def refresh(self):
907 921 if self.cacheable():
908 922 self.cachestat = filecachesubentry.stat(self.path)
909 923
910 924 def cacheable(self):
911 925 if self._cacheable is not None:
912 926 return self._cacheable
913 927
914 928 # we don't know yet, assume it is for now
915 929 return True
916 930
917 931 def changed(self):
918 932 # no point in going further if we can't cache it
919 933 if not self.cacheable():
920 934 return True
921 935
922 936 newstat = filecachesubentry.stat(self.path)
923 937
924 938 # we may not know if it's cacheable yet, check again now
925 939 if newstat and self._cacheable is None:
926 940 self._cacheable = newstat.cacheable()
927 941
928 942 # check again
929 943 if not self._cacheable:
930 944 return True
931 945
932 946 if self.cachestat != newstat:
933 947 self.cachestat = newstat
934 948 return True
935 949 else:
936 950 return False
937 951
938 952 @staticmethod
939 953 def stat(path):
940 954 try:
941 955 return util.cachestat(path)
942 956 except OSError as e:
943 957 if e.errno != errno.ENOENT:
944 958 raise
945 959
946 960 class filecacheentry(object):
947 961 def __init__(self, paths, stat=True):
948 962 self._entries = []
949 963 for path in paths:
950 964 self._entries.append(filecachesubentry(path, stat))
951 965
952 966 def changed(self):
953 967 '''true if any entry has changed'''
954 968 for entry in self._entries:
955 969 if entry.changed():
956 970 return True
957 971 return False
958 972
959 973 def refresh(self):
960 974 for entry in self._entries:
961 975 entry.refresh()
962 976
963 977 class filecache(object):
964 978 '''A property like decorator that tracks files under .hg/ for updates.
965 979
966 980 Records stat info when called in _filecache.
967 981
968 982 On subsequent calls, compares old stat info with new info, and recreates the
969 983 object when any of the files changes, updating the new stat info in
970 984 _filecache.
971 985
972 986 Mercurial either atomic renames or appends for files under .hg,
973 987 so to ensure the cache is reliable we need the filesystem to be able
974 988 to tell us if a file has been replaced. If it can't, we fallback to
975 989 recreating the object on every call (essentially the same behavior as
976 990 propertycache).
977 991
978 992 '''
979 993 def __init__(self, *paths):
980 994 self.paths = paths
981 995
982 996 def join(self, obj, fname):
983 997 """Used to compute the runtime path of a cached file.
984 998
985 999 Users should subclass filecache and provide their own version of this
986 1000 function to call the appropriate join function on 'obj' (an instance
987 1001 of the class that its member function was decorated).
988 1002 """
989 1003 raise NotImplementedError
990 1004
991 1005 def __call__(self, func):
992 1006 self.func = func
993 1007 self.name = func.__name__.encode('ascii')
994 1008 return self
995 1009
996 1010 def __get__(self, obj, type=None):
997 1011 # if accessed on the class, return the descriptor itself.
998 1012 if obj is None:
999 1013 return self
1000 1014 # do we need to check if the file changed?
1001 1015 if self.name in obj.__dict__:
1002 1016 assert self.name in obj._filecache, self.name
1003 1017 return obj.__dict__[self.name]
1004 1018
1005 1019 entry = obj._filecache.get(self.name)
1006 1020
1007 1021 if entry:
1008 1022 if entry.changed():
1009 1023 entry.obj = self.func(obj)
1010 1024 else:
1011 1025 paths = [self.join(obj, path) for path in self.paths]
1012 1026
1013 1027 # We stat -before- creating the object so our cache doesn't lie if
1014 1028 # a writer modified between the time we read and stat
1015 1029 entry = filecacheentry(paths, True)
1016 1030 entry.obj = self.func(obj)
1017 1031
1018 1032 obj._filecache[self.name] = entry
1019 1033
1020 1034 obj.__dict__[self.name] = entry.obj
1021 1035 return entry.obj
1022 1036
1023 1037 def __set__(self, obj, value):
1024 1038 if self.name not in obj._filecache:
1025 1039 # we add an entry for the missing value because X in __dict__
1026 1040 # implies X in _filecache
1027 1041 paths = [self.join(obj, path) for path in self.paths]
1028 1042 ce = filecacheentry(paths, False)
1029 1043 obj._filecache[self.name] = ce
1030 1044 else:
1031 1045 ce = obj._filecache[self.name]
1032 1046
1033 1047 ce.obj = value # update cached copy
1034 1048 obj.__dict__[self.name] = value # update copy returned by obj.x
1035 1049
1036 1050 def __delete__(self, obj):
1037 1051 try:
1038 1052 del obj.__dict__[self.name]
1039 1053 except KeyError:
1040 1054 raise AttributeError(self.name)
1041 1055
1042 1056 def extdatasource(repo, source):
1043 1057 """Gather a map of rev -> value dict from the specified source
1044 1058
1045 1059 A source spec is treated as a URL, with a special case shell: type
1046 1060 for parsing the output from a shell command.
1047 1061
1048 1062 The data is parsed as a series of newline-separated records where
1049 1063 each record is a revision specifier optionally followed by a space
1050 1064 and a freeform string value. If the revision is known locally, it
1051 1065 is converted to a rev, otherwise the record is skipped.
1052 1066
1053 1067 Note that both key and value are treated as UTF-8 and converted to
1054 1068 the local encoding. This allows uniformity between local and
1055 1069 remote data sources.
1056 1070 """
1057 1071
1058 1072 spec = repo.ui.config("extdata", source)
1059 1073 if not spec:
1060 1074 raise error.Abort(_("unknown extdata source '%s'") % source)
1061 1075
1062 1076 data = {}
1063 1077 src = proc = None
1064 1078 try:
1065 1079 if spec.startswith("shell:"):
1066 1080 # external commands should be run relative to the repo root
1067 1081 cmd = spec[6:]
1068 1082 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1069 1083 close_fds=util.closefds,
1070 1084 stdout=subprocess.PIPE, cwd=repo.root)
1071 1085 src = proc.stdout
1072 1086 else:
1073 1087 # treat as a URL or file
1074 1088 src = url.open(repo.ui, spec)
1075 1089 for l in src:
1076 1090 if " " in l:
1077 1091 k, v = l.strip().split(" ", 1)
1078 1092 else:
1079 1093 k, v = l.strip(), ""
1080 1094
1081 1095 k = encoding.tolocal(k)
1082 1096 try:
1083 1097 data[repo[k].rev()] = encoding.tolocal(v)
1084 1098 except (error.LookupError, error.RepoLookupError):
1085 1099 pass # we ignore data for nodes that don't exist locally
1086 1100 finally:
1087 1101 if proc:
1088 1102 proc.communicate()
1089 1103 if proc.returncode != 0:
1090 1104 # not an error so 'cmd | grep' can be empty
1091 1105 repo.ui.debug("extdata command '%s' %s\n"
1092 1106 % (cmd, util.explainexit(proc.returncode)[0]))
1093 1107 if src:
1094 1108 src.close()
1095 1109
1096 1110 return data
1097 1111
1098 1112 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1099 1113 if lock is None:
1100 1114 raise error.LockInheritanceContractViolation(
1101 1115 'lock can only be inherited while held')
1102 1116 if environ is None:
1103 1117 environ = {}
1104 1118 with lock.inherit() as locker:
1105 1119 environ[envvar] = locker
1106 1120 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1107 1121
1108 1122 def wlocksub(repo, cmd, *args, **kwargs):
1109 1123 """run cmd as a subprocess that allows inheriting repo's wlock
1110 1124
1111 1125 This can only be called while the wlock is held. This takes all the
1112 1126 arguments that ui.system does, and returns the exit code of the
1113 1127 subprocess."""
1114 1128 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1115 1129 **kwargs)
1116 1130
1117 1131 def gdinitconfig(ui):
1118 1132 """helper function to know if a repo should be created as general delta
1119 1133 """
1120 1134 # experimental config: format.generaldelta
1121 1135 return (ui.configbool('format', 'generaldelta')
1122 1136 or ui.configbool('format', 'usegeneraldelta'))
1123 1137
1124 1138 def gddeltaconfig(ui):
1125 1139 """helper function to know if incoming delta should be optimised
1126 1140 """
1127 1141 # experimental config: format.generaldelta
1128 1142 return ui.configbool('format', 'generaldelta')
1129 1143
1130 1144 class simplekeyvaluefile(object):
1131 1145 """A simple file with key=value lines
1132 1146
1133 1147 Keys must be alphanumerics and start with a letter, values must not
1134 1148 contain '\n' characters"""
1135 1149 firstlinekey = '__firstline'
1136 1150
1137 1151 def __init__(self, vfs, path, keys=None):
1138 1152 self.vfs = vfs
1139 1153 self.path = path
1140 1154
1141 1155 def read(self, firstlinenonkeyval=False):
1142 1156 """Read the contents of a simple key-value file
1143 1157
1144 1158 'firstlinenonkeyval' indicates whether the first line of file should
1145 1159 be treated as a key-value pair or reuturned fully under the
1146 1160 __firstline key."""
1147 1161 lines = self.vfs.readlines(self.path)
1148 1162 d = {}
1149 1163 if firstlinenonkeyval:
1150 1164 if not lines:
1151 1165 e = _("empty simplekeyvalue file")
1152 1166 raise error.CorruptedState(e)
1153 1167 # we don't want to include '\n' in the __firstline
1154 1168 d[self.firstlinekey] = lines[0][:-1]
1155 1169 del lines[0]
1156 1170
1157 1171 try:
1158 1172 # the 'if line.strip()' part prevents us from failing on empty
1159 1173 # lines which only contain '\n' therefore are not skipped
1160 1174 # by 'if line'
1161 1175 updatedict = dict(line[:-1].split('=', 1) for line in lines
1162 1176 if line.strip())
1163 1177 if self.firstlinekey in updatedict:
1164 1178 e = _("%r can't be used as a key")
1165 1179 raise error.CorruptedState(e % self.firstlinekey)
1166 1180 d.update(updatedict)
1167 1181 except ValueError as e:
1168 1182 raise error.CorruptedState(str(e))
1169 1183 return d
1170 1184
1171 1185 def write(self, data, firstline=None):
1172 1186 """Write key=>value mapping to a file
1173 1187 data is a dict. Keys must be alphanumerical and start with a letter.
1174 1188 Values must not contain newline characters.
1175 1189
1176 1190 If 'firstline' is not None, it is written to file before
1177 1191 everything else, as it is, not in a key=value form"""
1178 1192 lines = []
1179 1193 if firstline is not None:
1180 1194 lines.append('%s\n' % firstline)
1181 1195
1182 1196 for k, v in data.items():
1183 1197 if k == self.firstlinekey:
1184 1198 e = "key name '%s' is reserved" % self.firstlinekey
1185 1199 raise error.ProgrammingError(e)
1186 1200 if not k[0].isalpha():
1187 1201 e = "keys must start with a letter in a key-value file"
1188 1202 raise error.ProgrammingError(e)
1189 1203 if not k.isalnum():
1190 1204 e = "invalid key name in a simple key-value file"
1191 1205 raise error.ProgrammingError(e)
1192 1206 if '\n' in v:
1193 1207 e = "invalid value in a simple key-value file"
1194 1208 raise error.ProgrammingError(e)
1195 1209 lines.append("%s=%s\n" % (k, v))
1196 1210 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1197 1211 fp.write(''.join(lines))
1198 1212
1199 1213 _reportobsoletedsource = [
1200 1214 'debugobsolete',
1201 1215 'pull',
1202 1216 'push',
1203 1217 'serve',
1204 1218 'unbundle',
1205 1219 ]
1206 1220
1207 1221 _reportnewcssource = [
1208 1222 'pull',
1209 1223 'unbundle',
1210 1224 ]
1211 1225
1212 1226 def registersummarycallback(repo, otr, txnname=''):
1213 1227 """register a callback to issue a summary after the transaction is closed
1214 1228 """
1215 1229 def txmatch(sources):
1216 1230 return any(txnname.startswith(source) for source in sources)
1217 1231
1218 1232 categories = []
1219 1233
1220 1234 def reportsummary(func):
1221 1235 """decorator for report callbacks."""
1222 1236 reporef = weakref.ref(repo)
1223 1237 def wrapped(tr):
1224 1238 repo = reporef()
1225 1239 func(repo, tr)
1226 1240 newcat = '%2i-txnreport' % len(categories)
1227 1241 otr.addpostclose(newcat, wrapped)
1228 1242 categories.append(newcat)
1229 1243 return wrapped
1230 1244
1231 1245 if txmatch(_reportobsoletedsource):
1232 1246 @reportsummary
1233 1247 def reportobsoleted(repo, tr):
1234 1248 obsoleted = obsutil.getobsoleted(repo, tr)
1235 1249 if obsoleted:
1236 1250 repo.ui.status(_('obsoleted %i changesets\n')
1237 1251 % len(obsoleted))
1238 1252
1239 1253 if txmatch(_reportnewcssource):
1240 1254 @reportsummary
1241 1255 def reportnewcs(repo, tr):
1242 1256 """Report the range of new revisions pulled/unbundled."""
1243 1257 newrevs = list(tr.changes.get('revs', set()))
1244 1258 if not newrevs:
1245 1259 return
1246 1260
1247 1261 # Compute the bounds of new revisions' range, excluding obsoletes.
1248 1262 unfi = repo.unfiltered()
1249 1263 revs = unfi.revs('%ld and not obsolete()', newrevs)
1250 1264 if not revs:
1251 1265 # Got only obsoletes.
1252 1266 return
1253 1267 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1254 1268
1255 1269 if minrev == maxrev:
1256 1270 revrange = minrev
1257 1271 else:
1258 1272 revrange = '%s:%s' % (minrev, maxrev)
1259 1273 repo.ui.status(_('new changesets %s\n') % revrange)
General Comments 0
You need to be logged in to leave comments. Login now