##// END OF EJS Templates
match: resolve filesets against the passed `cwd`, not the current one...
Matt Harbison -
r44461:e685fac5 default
parent child Browse files
Show More
@@ -1,109 +1,109 b''
1 1 # highlight - syntax highlighting in hgweb, based on Pygments
2 2 #
3 3 # Copyright 2008, 2009 Patrick Mezard <pmezard@gmail.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # The original module was split in an interface and an implementation
9 9 # file to defer pygments loading and speedup extension setup.
10 10
11 11 """syntax highlighting for hgweb (requires Pygments)
12 12
13 13 It depends on the Pygments syntax highlighting library:
14 14 http://pygments.org/
15 15
16 16 There are the following configuration options::
17 17
18 18 [web]
19 19 pygments_style = <style> (default: colorful)
20 20 highlightfiles = <fileset> (default: size('<5M'))
21 21 highlightonlymatchfilename = <bool> (default False)
22 22
23 23 ``highlightonlymatchfilename`` will only highlight files if their type could
24 24 be identified by their filename. When this is not enabled (the default),
25 25 Pygments will try very hard to identify the file type from content and any
26 26 match (even matches with a low confidence score) will be used.
27 27 """
28 28
29 29 from __future__ import absolute_import
30 30
31 31 from . import highlight
32 32 from mercurial.hgweb import (
33 33 webcommands,
34 34 webutil,
35 35 )
36 36
37 37 from mercurial import (
38 38 extensions,
39 39 pycompat,
40 40 )
41 41
42 42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 44 # be specifying the version(s) of Mercurial they are tested with, or
45 45 # leave the attribute unspecified.
46 46 testedwith = b'ships-with-hg-core'
47 47
48 48
49 49 def pygmentize(web, field, fctx, tmpl):
50 50 style = web.config(b'web', b'pygments_style', b'colorful')
51 51 expr = web.config(b'web', b'highlightfiles', b"size('<5M')")
52 52 filenameonly = web.configbool(b'web', b'highlightonlymatchfilename', False)
53 53
54 54 ctx = fctx.changectx()
55 m = ctx.matchfileset(expr)
55 m = ctx.matchfileset(fctx.repo().root, expr)
56 56 if m(fctx.path()):
57 57 highlight.pygmentize(
58 58 field, fctx, style, tmpl, guessfilenameonly=filenameonly
59 59 )
60 60
61 61
62 62 def filerevision_highlight(orig, web, fctx):
63 63 mt = web.res.headers[b'Content-Type']
64 64 # only pygmentize for mimetype containing 'html' so we both match
65 65 # 'text/html' and possibly 'application/xhtml+xml' in the future
66 66 # so that we don't have to touch the extension when the mimetype
67 67 # for a template changes; also hgweb optimizes the case that a
68 68 # raw file is sent using rawfile() and doesn't call us, so we
69 69 # can't clash with the file's content-type here in case we
70 70 # pygmentize a html file
71 71 if b'html' in mt:
72 72 pygmentize(web, b'fileline', fctx, web.tmpl)
73 73
74 74 return orig(web, fctx)
75 75
76 76
77 77 def annotate_highlight(orig, web):
78 78 mt = web.res.headers[b'Content-Type']
79 79 if b'html' in mt:
80 80 fctx = webutil.filectx(web.repo, web.req)
81 81 pygmentize(web, b'annotateline', fctx, web.tmpl)
82 82
83 83 return orig(web)
84 84
85 85
86 86 def generate_css(web):
87 87 pg_style = web.config(b'web', b'pygments_style', b'colorful')
88 88 fmter = highlight.HtmlFormatter(style=pycompat.sysstr(pg_style))
89 89 web.res.headers[b'Content-Type'] = b'text/css'
90 90 style_defs = fmter.get_style_defs(pycompat.sysstr(b''))
91 91 web.res.setbodybytes(
92 92 b''.join(
93 93 [
94 94 b'/* pygments_style = %s */\n\n' % pg_style,
95 95 pycompat.bytestr(style_defs),
96 96 ]
97 97 )
98 98 )
99 99 return web.res.sendresponse()
100 100
101 101
102 102 def extsetup(ui):
103 103 # monkeypatch in the new version
104 104 extensions.wrapfunction(
105 105 webcommands, b'_filerevision', filerevision_highlight
106 106 )
107 107 extensions.wrapfunction(webcommands, b'annotate', annotate_highlight)
108 108 webcommands.highlightcss = generate_css
109 109 webcommands.__all__.append(b'highlightcss')
@@ -1,3021 +1,3027 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from .pycompat import (
27 27 getattr,
28 28 open,
29 29 )
30 30 from . import (
31 31 copies,
32 32 dagop,
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 obsolete as obsmod,
38 38 patch,
39 39 pathutil,
40 40 phases,
41 41 pycompat,
42 42 repoview,
43 43 scmutil,
44 44 sparse,
45 45 subrepo,
46 46 subrepoutil,
47 47 util,
48 48 )
49 49 from .utils import (
50 50 dateutil,
51 51 stringutil,
52 52 )
53 53
54 54 propertycache = util.propertycache
55 55
56 56
57 57 class basectx(object):
58 58 """A basectx object represents the common logic for its children:
59 59 changectx: read-only context that is already present in the repo,
60 60 workingctx: a context that represents the working directory and can
61 61 be committed,
62 62 memctx: a context that represents changes in-memory and can also
63 63 be committed."""
64 64
65 65 def __init__(self, repo):
66 66 self._repo = repo
67 67
68 68 def __bytes__(self):
69 69 return short(self.node())
70 70
71 71 __str__ = encoding.strmethod(__bytes__)
72 72
73 73 def __repr__(self):
74 74 return "<%s %s>" % (type(self).__name__, str(self))
75 75
76 76 def __eq__(self, other):
77 77 try:
78 78 return type(self) == type(other) and self._rev == other._rev
79 79 except AttributeError:
80 80 return False
81 81
82 82 def __ne__(self, other):
83 83 return not (self == other)
84 84
85 85 def __contains__(self, key):
86 86 return key in self._manifest
87 87
88 88 def __getitem__(self, key):
89 89 return self.filectx(key)
90 90
91 91 def __iter__(self):
92 92 return iter(self._manifest)
93 93
94 94 def _buildstatusmanifest(self, status):
95 95 """Builds a manifest that includes the given status results, if this is
96 96 a working copy context. For non-working copy contexts, it just returns
97 97 the normal manifest."""
98 98 return self.manifest()
99 99
100 100 def _matchstatus(self, other, match):
101 101 """This internal method provides a way for child objects to override the
102 102 match operator.
103 103 """
104 104 return match
105 105
106 106 def _buildstatus(
107 107 self, other, s, match, listignored, listclean, listunknown
108 108 ):
109 109 """build a status with respect to another context"""
110 110 # Load earliest manifest first for caching reasons. More specifically,
111 111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 114 # delta to what's in the cache. So that's one full reconstruction + one
115 115 # delta application.
116 116 mf2 = None
117 117 if self.rev() is not None and self.rev() < other.rev():
118 118 mf2 = self._buildstatusmanifest(s)
119 119 mf1 = other._buildstatusmanifest(s)
120 120 if mf2 is None:
121 121 mf2 = self._buildstatusmanifest(s)
122 122
123 123 modified, added = [], []
124 124 removed = []
125 125 clean = []
126 126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 127 deletedset = set(deleted)
128 128 d = mf1.diff(mf2, match=match, clean=listclean)
129 129 for fn, value in pycompat.iteritems(d):
130 130 if fn in deletedset:
131 131 continue
132 132 if value is None:
133 133 clean.append(fn)
134 134 continue
135 135 (node1, flag1), (node2, flag2) = value
136 136 if node1 is None:
137 137 added.append(fn)
138 138 elif node2 is None:
139 139 removed.append(fn)
140 140 elif flag1 != flag2:
141 141 modified.append(fn)
142 142 elif node2 not in wdirfilenodeids:
143 143 # When comparing files between two commits, we save time by
144 144 # not comparing the file contents when the nodeids differ.
145 145 # Note that this means we incorrectly report a reverted change
146 146 # to a file as a modification.
147 147 modified.append(fn)
148 148 elif self[fn].cmp(other[fn]):
149 149 modified.append(fn)
150 150 else:
151 151 clean.append(fn)
152 152
153 153 if removed:
154 154 # need to filter files if they are already reported as removed
155 155 unknown = [
156 156 fn
157 157 for fn in unknown
158 158 if fn not in mf1 and (not match or match(fn))
159 159 ]
160 160 ignored = [
161 161 fn
162 162 for fn in ignored
163 163 if fn not in mf1 and (not match or match(fn))
164 164 ]
165 165 # if they're deleted, don't report them as removed
166 166 removed = [fn for fn in removed if fn not in deletedset]
167 167
168 168 return scmutil.status(
169 169 modified, added, removed, deleted, unknown, ignored, clean
170 170 )
171 171
172 172 @propertycache
173 173 def substate(self):
174 174 return subrepoutil.state(self, self._repo.ui)
175 175
176 176 def subrev(self, subpath):
177 177 return self.substate[subpath][1]
178 178
179 179 def rev(self):
180 180 return self._rev
181 181
182 182 def node(self):
183 183 return self._node
184 184
185 185 def hex(self):
186 186 return hex(self.node())
187 187
188 188 def manifest(self):
189 189 return self._manifest
190 190
191 191 def manifestctx(self):
192 192 return self._manifestctx
193 193
194 194 def repo(self):
195 195 return self._repo
196 196
197 197 def phasestr(self):
198 198 return phases.phasenames[self.phase()]
199 199
200 200 def mutable(self):
201 201 return self.phase() > phases.public
202 202
203 def matchfileset(self, expr, badfn=None):
204 return fileset.match(self, expr, badfn=badfn)
203 def matchfileset(self, cwd, expr, badfn=None):
204 return fileset.match(self, cwd, expr, badfn=badfn)
205 205
206 206 def obsolete(self):
207 207 """True if the changeset is obsolete"""
208 208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209 209
210 210 def extinct(self):
211 211 """True if the changeset is extinct"""
212 212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213 213
214 214 def orphan(self):
215 215 """True if the changeset is not obsolete, but its ancestor is"""
216 216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217 217
218 218 def phasedivergent(self):
219 219 """True if the changeset tries to be a successor of a public changeset
220 220
221 221 Only non-public and non-obsolete changesets may be phase-divergent.
222 222 """
223 223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224 224
225 225 def contentdivergent(self):
226 226 """Is a successor of a changeset with multiple possible successor sets
227 227
228 228 Only non-public and non-obsolete changesets may be content-divergent.
229 229 """
230 230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231 231
232 232 def isunstable(self):
233 233 """True if the changeset is either orphan, phase-divergent or
234 234 content-divergent"""
235 235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236 236
237 237 def instabilities(self):
238 238 """return the list of instabilities affecting this changeset.
239 239
240 240 Instabilities are returned as strings. possible values are:
241 241 - orphan,
242 242 - phase-divergent,
243 243 - content-divergent.
244 244 """
245 245 instabilities = []
246 246 if self.orphan():
247 247 instabilities.append(b'orphan')
248 248 if self.phasedivergent():
249 249 instabilities.append(b'phase-divergent')
250 250 if self.contentdivergent():
251 251 instabilities.append(b'content-divergent')
252 252 return instabilities
253 253
254 254 def parents(self):
255 255 """return contexts for each parent changeset"""
256 256 return self._parents
257 257
258 258 def p1(self):
259 259 return self._parents[0]
260 260
261 261 def p2(self):
262 262 parents = self._parents
263 263 if len(parents) == 2:
264 264 return parents[1]
265 265 return self._repo[nullrev]
266 266
267 267 def _fileinfo(self, path):
268 268 if '_manifest' in self.__dict__:
269 269 try:
270 270 return self._manifest[path], self._manifest.flags(path)
271 271 except KeyError:
272 272 raise error.ManifestLookupError(
273 273 self._node, path, _(b'not found in manifest')
274 274 )
275 275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 276 if path in self._manifestdelta:
277 277 return (
278 278 self._manifestdelta[path],
279 279 self._manifestdelta.flags(path),
280 280 )
281 281 mfl = self._repo.manifestlog
282 282 try:
283 283 node, flag = mfl[self._changeset.manifest].find(path)
284 284 except KeyError:
285 285 raise error.ManifestLookupError(
286 286 self._node, path, _(b'not found in manifest')
287 287 )
288 288
289 289 return node, flag
290 290
291 291 def filenode(self, path):
292 292 return self._fileinfo(path)[0]
293 293
294 294 def flags(self, path):
295 295 try:
296 296 return self._fileinfo(path)[1]
297 297 except error.LookupError:
298 298 return b''
299 299
300 300 @propertycache
301 301 def _copies(self):
302 302 return copies.computechangesetcopies(self)
303 303
304 304 def p1copies(self):
305 305 return self._copies[0]
306 306
307 307 def p2copies(self):
308 308 return self._copies[1]
309 309
310 310 def sub(self, path, allowcreate=True):
311 311 '''return a subrepo for the stored revision of path, never wdir()'''
312 312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313 313
314 314 def nullsub(self, path, pctx):
315 315 return subrepo.nullsubrepo(self, path, pctx)
316 316
317 317 def workingsub(self, path):
318 318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 319 context.
320 320 '''
321 321 return subrepo.subrepo(self, path, allowwdir=True)
322 322
323 323 def match(
324 324 self,
325 325 pats=None,
326 326 include=None,
327 327 exclude=None,
328 328 default=b'glob',
329 329 listsubrepos=False,
330 330 badfn=None,
331 cwd=None,
331 332 ):
332 333 r = self._repo
334 if not cwd:
335 cwd = r.getcwd()
333 336 return matchmod.match(
334 337 r.root,
335 r.getcwd(),
338 cwd,
336 339 pats,
337 340 include,
338 341 exclude,
339 342 default,
340 343 auditor=r.nofsauditor,
341 344 ctx=self,
342 345 listsubrepos=listsubrepos,
343 346 badfn=badfn,
344 347 )
345 348
346 349 def diff(
347 350 self,
348 351 ctx2=None,
349 352 match=None,
350 353 changes=None,
351 354 opts=None,
352 355 losedatafn=None,
353 356 pathfn=None,
354 357 copy=None,
355 358 copysourcematch=None,
356 359 hunksfilterfn=None,
357 360 ):
358 361 """Returns a diff generator for the given contexts and matcher"""
359 362 if ctx2 is None:
360 363 ctx2 = self.p1()
361 364 if ctx2 is not None:
362 365 ctx2 = self._repo[ctx2]
363 366 return patch.diff(
364 367 self._repo,
365 368 ctx2,
366 369 self,
367 370 match=match,
368 371 changes=changes,
369 372 opts=opts,
370 373 losedatafn=losedatafn,
371 374 pathfn=pathfn,
372 375 copy=copy,
373 376 copysourcematch=copysourcematch,
374 377 hunksfilterfn=hunksfilterfn,
375 378 )
376 379
377 380 def dirs(self):
378 381 return self._manifest.dirs()
379 382
380 383 def hasdir(self, dir):
381 384 return self._manifest.hasdir(dir)
382 385
383 386 def status(
384 387 self,
385 388 other=None,
386 389 match=None,
387 390 listignored=False,
388 391 listclean=False,
389 392 listunknown=False,
390 393 listsubrepos=False,
391 394 ):
392 395 """return status of files between two nodes or node and working
393 396 directory.
394 397
395 398 If other is None, compare this node with working directory.
396 399
397 400 returns (modified, added, removed, deleted, unknown, ignored, clean)
398 401 """
399 402
400 403 ctx1 = self
401 404 ctx2 = self._repo[other]
402 405
403 406 # This next code block is, admittedly, fragile logic that tests for
404 407 # reversing the contexts and wouldn't need to exist if it weren't for
405 408 # the fast (and common) code path of comparing the working directory
406 409 # with its first parent.
407 410 #
408 411 # What we're aiming for here is the ability to call:
409 412 #
410 413 # workingctx.status(parentctx)
411 414 #
412 415 # If we always built the manifest for each context and compared those,
413 416 # then we'd be done. But the special case of the above call means we
414 417 # just copy the manifest of the parent.
415 418 reversed = False
416 419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
417 420 reversed = True
418 421 ctx1, ctx2 = ctx2, ctx1
419 422
420 423 match = self._repo.narrowmatch(match)
421 424 match = ctx2._matchstatus(ctx1, match)
422 425 r = scmutil.status([], [], [], [], [], [], [])
423 426 r = ctx2._buildstatus(
424 427 ctx1, r, match, listignored, listclean, listunknown
425 428 )
426 429
427 430 if reversed:
428 431 # Reverse added and removed. Clear deleted, unknown and ignored as
429 432 # these make no sense to reverse.
430 433 r = scmutil.status(
431 434 r.modified, r.removed, r.added, [], [], [], r.clean
432 435 )
433 436
434 437 if listsubrepos:
435 438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
436 439 try:
437 440 rev2 = ctx2.subrev(subpath)
438 441 except KeyError:
439 442 # A subrepo that existed in node1 was deleted between
440 443 # node1 and node2 (inclusive). Thus, ctx2's substate
441 444 # won't contain that subpath. The best we can do ignore it.
442 445 rev2 = None
443 446 submatch = matchmod.subdirmatcher(subpath, match)
444 447 s = sub.status(
445 448 rev2,
446 449 match=submatch,
447 450 ignored=listignored,
448 451 clean=listclean,
449 452 unknown=listunknown,
450 453 listsubrepos=True,
451 454 )
452 455 for k in (
453 456 'modified',
454 457 'added',
455 458 'removed',
456 459 'deleted',
457 460 'unknown',
458 461 'ignored',
459 462 'clean',
460 463 ):
461 464 rfiles, sfiles = getattr(r, k), getattr(s, k)
462 465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
463 466
464 467 r.modified.sort()
465 468 r.added.sort()
466 469 r.removed.sort()
467 470 r.deleted.sort()
468 471 r.unknown.sort()
469 472 r.ignored.sort()
470 473 r.clean.sort()
471 474
472 475 return r
473 476
474 477
475 478 class changectx(basectx):
476 479 """A changecontext object makes access to data related to a particular
477 480 changeset convenient. It represents a read-only context already present in
478 481 the repo."""
479 482
480 483 def __init__(self, repo, rev, node, maybe_filtered=True):
481 484 super(changectx, self).__init__(repo)
482 485 self._rev = rev
483 486 self._node = node
484 487 # When maybe_filtered is True, the revision might be affected by
485 488 # changelog filtering and operation through the filtered changelog must be used.
486 489 #
487 490 # When maybe_filtered is False, the revision has already been checked
488 491 # against filtering and is not filtered. Operation through the
489 492 # unfiltered changelog might be used in some case.
490 493 self._maybe_filtered = maybe_filtered
491 494
492 495 def __hash__(self):
493 496 try:
494 497 return hash(self._rev)
495 498 except AttributeError:
496 499 return id(self)
497 500
498 501 def __nonzero__(self):
499 502 return self._rev != nullrev
500 503
501 504 __bool__ = __nonzero__
502 505
503 506 @propertycache
504 507 def _changeset(self):
505 508 if self._maybe_filtered:
506 509 repo = self._repo
507 510 else:
508 511 repo = self._repo.unfiltered()
509 512 return repo.changelog.changelogrevision(self.rev())
510 513
511 514 @propertycache
512 515 def _manifest(self):
513 516 return self._manifestctx.read()
514 517
515 518 @property
516 519 def _manifestctx(self):
517 520 return self._repo.manifestlog[self._changeset.manifest]
518 521
519 522 @propertycache
520 523 def _manifestdelta(self):
521 524 return self._manifestctx.readdelta()
522 525
523 526 @propertycache
524 527 def _parents(self):
525 528 repo = self._repo
526 529 if self._maybe_filtered:
527 530 cl = repo.changelog
528 531 else:
529 532 cl = repo.unfiltered().changelog
530 533
531 534 p1, p2 = cl.parentrevs(self._rev)
532 535 if p2 == nullrev:
533 536 return [repo[p1]]
534 537 return [repo[p1], repo[p2]]
535 538
536 539 def changeset(self):
537 540 c = self._changeset
538 541 return (
539 542 c.manifest,
540 543 c.user,
541 544 c.date,
542 545 c.files,
543 546 c.description,
544 547 c.extra,
545 548 )
546 549
547 550 def manifestnode(self):
548 551 return self._changeset.manifest
549 552
550 553 def user(self):
551 554 return self._changeset.user
552 555
553 556 def date(self):
554 557 return self._changeset.date
555 558
556 559 def files(self):
557 560 return self._changeset.files
558 561
559 562 def filesmodified(self):
560 563 modified = set(self.files())
561 564 modified.difference_update(self.filesadded())
562 565 modified.difference_update(self.filesremoved())
563 566 return sorted(modified)
564 567
565 568 def filesadded(self):
566 569 filesadded = self._changeset.filesadded
567 570 compute_on_none = True
568 571 if self._repo.filecopiesmode == b'changeset-sidedata':
569 572 compute_on_none = False
570 573 else:
571 574 source = self._repo.ui.config(b'experimental', b'copies.read-from')
572 575 if source == b'changeset-only':
573 576 compute_on_none = False
574 577 elif source != b'compatibility':
575 578 # filelog mode, ignore any changelog content
576 579 filesadded = None
577 580 if filesadded is None:
578 581 if compute_on_none:
579 582 filesadded = copies.computechangesetfilesadded(self)
580 583 else:
581 584 filesadded = []
582 585 return filesadded
583 586
584 587 def filesremoved(self):
585 588 filesremoved = self._changeset.filesremoved
586 589 compute_on_none = True
587 590 if self._repo.filecopiesmode == b'changeset-sidedata':
588 591 compute_on_none = False
589 592 else:
590 593 source = self._repo.ui.config(b'experimental', b'copies.read-from')
591 594 if source == b'changeset-only':
592 595 compute_on_none = False
593 596 elif source != b'compatibility':
594 597 # filelog mode, ignore any changelog content
595 598 filesremoved = None
596 599 if filesremoved is None:
597 600 if compute_on_none:
598 601 filesremoved = copies.computechangesetfilesremoved(self)
599 602 else:
600 603 filesremoved = []
601 604 return filesremoved
602 605
603 606 @propertycache
604 607 def _copies(self):
605 608 p1copies = self._changeset.p1copies
606 609 p2copies = self._changeset.p2copies
607 610 compute_on_none = True
608 611 if self._repo.filecopiesmode == b'changeset-sidedata':
609 612 compute_on_none = False
610 613 else:
611 614 source = self._repo.ui.config(b'experimental', b'copies.read-from')
612 615 # If config says to get copy metadata only from changeset, then
613 616 # return that, defaulting to {} if there was no copy metadata. In
614 617 # compatibility mode, we return copy data from the changeset if it
615 618 # was recorded there, and otherwise we fall back to getting it from
616 619 # the filelogs (below).
617 620 #
618 621 # If we are in compatiblity mode and there is not data in the
619 622 # changeset), we get the copy metadata from the filelogs.
620 623 #
621 624 # otherwise, when config said to read only from filelog, we get the
622 625 # copy metadata from the filelogs.
623 626 if source == b'changeset-only':
624 627 compute_on_none = False
625 628 elif source != b'compatibility':
626 629 # filelog mode, ignore any changelog content
627 630 p1copies = p2copies = None
628 631 if p1copies is None:
629 632 if compute_on_none:
630 633 p1copies, p2copies = super(changectx, self)._copies
631 634 else:
632 635 if p1copies is None:
633 636 p1copies = {}
634 637 if p2copies is None:
635 638 p2copies = {}
636 639 return p1copies, p2copies
637 640
638 641 def description(self):
639 642 return self._changeset.description
640 643
641 644 def branch(self):
642 645 return encoding.tolocal(self._changeset.extra.get(b"branch"))
643 646
644 647 def closesbranch(self):
645 648 return b'close' in self._changeset.extra
646 649
647 650 def extra(self):
648 651 """Return a dict of extra information."""
649 652 return self._changeset.extra
650 653
651 654 def tags(self):
652 655 """Return a list of byte tag names"""
653 656 return self._repo.nodetags(self._node)
654 657
655 658 def bookmarks(self):
656 659 """Return a list of byte bookmark names."""
657 660 return self._repo.nodebookmarks(self._node)
658 661
659 662 def phase(self):
660 663 return self._repo._phasecache.phase(self._repo, self._rev)
661 664
662 665 def hidden(self):
663 666 return self._rev in repoview.filterrevs(self._repo, b'visible')
664 667
665 668 def isinmemory(self):
666 669 return False
667 670
668 671 def children(self):
669 672 """return list of changectx contexts for each child changeset.
670 673
671 674 This returns only the immediate child changesets. Use descendants() to
672 675 recursively walk children.
673 676 """
674 677 c = self._repo.changelog.children(self._node)
675 678 return [self._repo[x] for x in c]
676 679
677 680 def ancestors(self):
678 681 for a in self._repo.changelog.ancestors([self._rev]):
679 682 yield self._repo[a]
680 683
681 684 def descendants(self):
682 685 """Recursively yield all children of the changeset.
683 686
684 687 For just the immediate children, use children()
685 688 """
686 689 for d in self._repo.changelog.descendants([self._rev]):
687 690 yield self._repo[d]
688 691
689 692 def filectx(self, path, fileid=None, filelog=None):
690 693 """get a file context from this changeset"""
691 694 if fileid is None:
692 695 fileid = self.filenode(path)
693 696 return filectx(
694 697 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
695 698 )
696 699
697 700 def ancestor(self, c2, warn=False):
698 701 """return the "best" ancestor context of self and c2
699 702
700 703 If there are multiple candidates, it will show a message and check
701 704 merge.preferancestor configuration before falling back to the
702 705 revlog ancestor."""
703 706 # deal with workingctxs
704 707 n2 = c2._node
705 708 if n2 is None:
706 709 n2 = c2._parents[0]._node
707 710 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
708 711 if not cahs:
709 712 anc = nullid
710 713 elif len(cahs) == 1:
711 714 anc = cahs[0]
712 715 else:
713 716 # experimental config: merge.preferancestor
714 717 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
715 718 try:
716 719 ctx = scmutil.revsymbol(self._repo, r)
717 720 except error.RepoLookupError:
718 721 continue
719 722 anc = ctx.node()
720 723 if anc in cahs:
721 724 break
722 725 else:
723 726 anc = self._repo.changelog.ancestor(self._node, n2)
724 727 if warn:
725 728 self._repo.ui.status(
726 729 (
727 730 _(b"note: using %s as ancestor of %s and %s\n")
728 731 % (short(anc), short(self._node), short(n2))
729 732 )
730 733 + b''.join(
731 734 _(
732 735 b" alternatively, use --config "
733 736 b"merge.preferancestor=%s\n"
734 737 )
735 738 % short(n)
736 739 for n in sorted(cahs)
737 740 if n != anc
738 741 )
739 742 )
740 743 return self._repo[anc]
741 744
742 745 def isancestorof(self, other):
743 746 """True if this changeset is an ancestor of other"""
744 747 return self._repo.changelog.isancestorrev(self._rev, other._rev)
745 748
746 749 def walk(self, match):
747 750 '''Generates matching file names.'''
748 751
749 752 # Wrap match.bad method to have message with nodeid
750 753 def bad(fn, msg):
751 754 # The manifest doesn't know about subrepos, so don't complain about
752 755 # paths into valid subrepos.
753 756 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
754 757 return
755 758 match.bad(fn, _(b'no such file in rev %s') % self)
756 759
757 760 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
758 761 return self._manifest.walk(m)
759 762
760 763 def matches(self, match):
761 764 return self.walk(match)
762 765
763 766
764 767 class basefilectx(object):
765 768 """A filecontext object represents the common logic for its children:
766 769 filectx: read-only access to a filerevision that is already present
767 770 in the repo,
768 771 workingfilectx: a filecontext that represents files from the working
769 772 directory,
770 773 memfilectx: a filecontext that represents files in-memory,
771 774 """
772 775
773 776 @propertycache
774 777 def _filelog(self):
775 778 return self._repo.file(self._path)
776 779
777 780 @propertycache
778 781 def _changeid(self):
779 782 if '_changectx' in self.__dict__:
780 783 return self._changectx.rev()
781 784 elif '_descendantrev' in self.__dict__:
782 785 # this file context was created from a revision with a known
783 786 # descendant, we can (lazily) correct for linkrev aliases
784 787 return self._adjustlinkrev(self._descendantrev)
785 788 else:
786 789 return self._filelog.linkrev(self._filerev)
787 790
788 791 @propertycache
789 792 def _filenode(self):
790 793 if '_fileid' in self.__dict__:
791 794 return self._filelog.lookup(self._fileid)
792 795 else:
793 796 return self._changectx.filenode(self._path)
794 797
795 798 @propertycache
796 799 def _filerev(self):
797 800 return self._filelog.rev(self._filenode)
798 801
799 802 @propertycache
800 803 def _repopath(self):
801 804 return self._path
802 805
803 806 def __nonzero__(self):
804 807 try:
805 808 self._filenode
806 809 return True
807 810 except error.LookupError:
808 811 # file is missing
809 812 return False
810 813
811 814 __bool__ = __nonzero__
812 815
813 816 def __bytes__(self):
814 817 try:
815 818 return b"%s@%s" % (self.path(), self._changectx)
816 819 except error.LookupError:
817 820 return b"%s@???" % self.path()
818 821
819 822 __str__ = encoding.strmethod(__bytes__)
820 823
821 824 def __repr__(self):
822 825 return "<%s %s>" % (type(self).__name__, str(self))
823 826
824 827 def __hash__(self):
825 828 try:
826 829 return hash((self._path, self._filenode))
827 830 except AttributeError:
828 831 return id(self)
829 832
830 833 def __eq__(self, other):
831 834 try:
832 835 return (
833 836 type(self) == type(other)
834 837 and self._path == other._path
835 838 and self._filenode == other._filenode
836 839 )
837 840 except AttributeError:
838 841 return False
839 842
840 843 def __ne__(self, other):
841 844 return not (self == other)
842 845
843 846 def filerev(self):
844 847 return self._filerev
845 848
846 849 def filenode(self):
847 850 return self._filenode
848 851
849 852 @propertycache
850 853 def _flags(self):
851 854 return self._changectx.flags(self._path)
852 855
853 856 def flags(self):
854 857 return self._flags
855 858
856 859 def filelog(self):
857 860 return self._filelog
858 861
859 862 def rev(self):
860 863 return self._changeid
861 864
862 865 def linkrev(self):
863 866 return self._filelog.linkrev(self._filerev)
864 867
865 868 def node(self):
866 869 return self._changectx.node()
867 870
868 871 def hex(self):
869 872 return self._changectx.hex()
870 873
871 874 def user(self):
872 875 return self._changectx.user()
873 876
874 877 def date(self):
875 878 return self._changectx.date()
876 879
877 880 def files(self):
878 881 return self._changectx.files()
879 882
880 883 def description(self):
881 884 return self._changectx.description()
882 885
883 886 def branch(self):
884 887 return self._changectx.branch()
885 888
886 889 def extra(self):
887 890 return self._changectx.extra()
888 891
889 892 def phase(self):
890 893 return self._changectx.phase()
891 894
892 895 def phasestr(self):
893 896 return self._changectx.phasestr()
894 897
895 898 def obsolete(self):
896 899 return self._changectx.obsolete()
897 900
898 901 def instabilities(self):
899 902 return self._changectx.instabilities()
900 903
901 904 def manifest(self):
902 905 return self._changectx.manifest()
903 906
904 907 def changectx(self):
905 908 return self._changectx
906 909
907 910 def renamed(self):
908 911 return self._copied
909 912
910 913 def copysource(self):
911 914 return self._copied and self._copied[0]
912 915
913 916 def repo(self):
914 917 return self._repo
915 918
916 919 def size(self):
917 920 return len(self.data())
918 921
919 922 def path(self):
920 923 return self._path
921 924
922 925 def isbinary(self):
923 926 try:
924 927 return stringutil.binary(self.data())
925 928 except IOError:
926 929 return False
927 930
928 931 def isexec(self):
929 932 return b'x' in self.flags()
930 933
931 934 def islink(self):
932 935 return b'l' in self.flags()
933 936
934 937 def isabsent(self):
935 938 """whether this filectx represents a file not in self._changectx
936 939
937 940 This is mainly for merge code to detect change/delete conflicts. This is
938 941 expected to be True for all subclasses of basectx."""
939 942 return False
940 943
941 944 _customcmp = False
942 945
943 946 def cmp(self, fctx):
944 947 """compare with other file context
945 948
946 949 returns True if different than fctx.
947 950 """
948 951 if fctx._customcmp:
949 952 return fctx.cmp(self)
950 953
951 954 if self._filenode is None:
952 955 raise error.ProgrammingError(
953 956 b'filectx.cmp() must be reimplemented if not backed by revlog'
954 957 )
955 958
956 959 if fctx._filenode is None:
957 960 if self._repo._encodefilterpats:
958 961 # can't rely on size() because wdir content may be decoded
959 962 return self._filelog.cmp(self._filenode, fctx.data())
960 963 if self.size() - 4 == fctx.size():
961 964 # size() can match:
962 965 # if file data starts with '\1\n', empty metadata block is
963 966 # prepended, which adds 4 bytes to filelog.size().
964 967 return self._filelog.cmp(self._filenode, fctx.data())
965 968 if self.size() == fctx.size():
966 969 # size() matches: need to compare content
967 970 return self._filelog.cmp(self._filenode, fctx.data())
968 971
969 972 # size() differs
970 973 return True
971 974
972 975 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
973 976 """return the first ancestor of <srcrev> introducing <fnode>
974 977
975 978 If the linkrev of the file revision does not point to an ancestor of
976 979 srcrev, we'll walk down the ancestors until we find one introducing
977 980 this file revision.
978 981
979 982 :srcrev: the changeset revision we search ancestors from
980 983 :inclusive: if true, the src revision will also be checked
981 984 :stoprev: an optional revision to stop the walk at. If no introduction
982 985 of this file content could be found before this floor
983 986 revision, the function will returns "None" and stops its
984 987 iteration.
985 988 """
986 989 repo = self._repo
987 990 cl = repo.unfiltered().changelog
988 991 mfl = repo.manifestlog
989 992 # fetch the linkrev
990 993 lkr = self.linkrev()
991 994 if srcrev == lkr:
992 995 return lkr
993 996 # hack to reuse ancestor computation when searching for renames
994 997 memberanc = getattr(self, '_ancestrycontext', None)
995 998 iteranc = None
996 999 if srcrev is None:
997 1000 # wctx case, used by workingfilectx during mergecopy
998 1001 revs = [p.rev() for p in self._repo[None].parents()]
999 1002 inclusive = True # we skipped the real (revless) source
1000 1003 else:
1001 1004 revs = [srcrev]
1002 1005 if memberanc is None:
1003 1006 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1004 1007 # check if this linkrev is an ancestor of srcrev
1005 1008 if lkr not in memberanc:
1006 1009 if iteranc is None:
1007 1010 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1008 1011 fnode = self._filenode
1009 1012 path = self._path
1010 1013 for a in iteranc:
1011 1014 if stoprev is not None and a < stoprev:
1012 1015 return None
1013 1016 ac = cl.read(a) # get changeset data (we avoid object creation)
1014 1017 if path in ac[3]: # checking the 'files' field.
1015 1018 # The file has been touched, check if the content is
1016 1019 # similar to the one we search for.
1017 1020 if fnode == mfl[ac[0]].readfast().get(path):
1018 1021 return a
1019 1022 # In theory, we should never get out of that loop without a result.
1020 1023 # But if manifest uses a buggy file revision (not children of the
1021 1024 # one it replaces) we could. Such a buggy situation will likely
1022 1025 # result is crash somewhere else at to some point.
1023 1026 return lkr
1024 1027
1025 1028 def isintroducedafter(self, changelogrev):
1026 1029 """True if a filectx has been introduced after a given floor revision
1027 1030 """
1028 1031 if self.linkrev() >= changelogrev:
1029 1032 return True
1030 1033 introrev = self._introrev(stoprev=changelogrev)
1031 1034 if introrev is None:
1032 1035 return False
1033 1036 return introrev >= changelogrev
1034 1037
1035 1038 def introrev(self):
1036 1039 """return the rev of the changeset which introduced this file revision
1037 1040
1038 1041 This method is different from linkrev because it take into account the
1039 1042 changeset the filectx was created from. It ensures the returned
1040 1043 revision is one of its ancestors. This prevents bugs from
1041 1044 'linkrev-shadowing' when a file revision is used by multiple
1042 1045 changesets.
1043 1046 """
1044 1047 return self._introrev()
1045 1048
1046 1049 def _introrev(self, stoprev=None):
1047 1050 """
1048 1051 Same as `introrev` but, with an extra argument to limit changelog
1049 1052 iteration range in some internal usecase.
1050 1053
1051 1054 If `stoprev` is set, the `introrev` will not be searched past that
1052 1055 `stoprev` revision and "None" might be returned. This is useful to
1053 1056 limit the iteration range.
1054 1057 """
1055 1058 toprev = None
1056 1059 attrs = vars(self)
1057 1060 if '_changeid' in attrs:
1058 1061 # We have a cached value already
1059 1062 toprev = self._changeid
1060 1063 elif '_changectx' in attrs:
1061 1064 # We know which changelog entry we are coming from
1062 1065 toprev = self._changectx.rev()
1063 1066
1064 1067 if toprev is not None:
1065 1068 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1066 1069 elif '_descendantrev' in attrs:
1067 1070 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1068 1071 # be nice and cache the result of the computation
1069 1072 if introrev is not None:
1070 1073 self._changeid = introrev
1071 1074 return introrev
1072 1075 else:
1073 1076 return self.linkrev()
1074 1077
1075 1078 def introfilectx(self):
1076 1079 """Return filectx having identical contents, but pointing to the
1077 1080 changeset revision where this filectx was introduced"""
1078 1081 introrev = self.introrev()
1079 1082 if self.rev() == introrev:
1080 1083 return self
1081 1084 return self.filectx(self.filenode(), changeid=introrev)
1082 1085
1083 1086 def _parentfilectx(self, path, fileid, filelog):
1084 1087 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1085 1088 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1086 1089 if '_changeid' in vars(self) or '_changectx' in vars(self):
1087 1090 # If self is associated with a changeset (probably explicitly
1088 1091 # fed), ensure the created filectx is associated with a
1089 1092 # changeset that is an ancestor of self.changectx.
1090 1093 # This lets us later use _adjustlinkrev to get a correct link.
1091 1094 fctx._descendantrev = self.rev()
1092 1095 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1093 1096 elif '_descendantrev' in vars(self):
1094 1097 # Otherwise propagate _descendantrev if we have one associated.
1095 1098 fctx._descendantrev = self._descendantrev
1096 1099 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1097 1100 return fctx
1098 1101
1099 1102 def parents(self):
1100 1103 _path = self._path
1101 1104 fl = self._filelog
1102 1105 parents = self._filelog.parents(self._filenode)
1103 1106 pl = [(_path, node, fl) for node in parents if node != nullid]
1104 1107
1105 1108 r = fl.renamed(self._filenode)
1106 1109 if r:
1107 1110 # - In the simple rename case, both parent are nullid, pl is empty.
1108 1111 # - In case of merge, only one of the parent is null id and should
1109 1112 # be replaced with the rename information. This parent is -always-
1110 1113 # the first one.
1111 1114 #
1112 1115 # As null id have always been filtered out in the previous list
1113 1116 # comprehension, inserting to 0 will always result in "replacing
1114 1117 # first nullid parent with rename information.
1115 1118 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1116 1119
1117 1120 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1118 1121
1119 1122 def p1(self):
1120 1123 return self.parents()[0]
1121 1124
1122 1125 def p2(self):
1123 1126 p = self.parents()
1124 1127 if len(p) == 2:
1125 1128 return p[1]
1126 1129 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1127 1130
1128 1131 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1129 1132 """Returns a list of annotateline objects for each line in the file
1130 1133
1131 1134 - line.fctx is the filectx of the node where that line was last changed
1132 1135 - line.lineno is the line number at the first appearance in the managed
1133 1136 file
1134 1137 - line.text is the data on that line (including newline character)
1135 1138 """
1136 1139 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1137 1140
1138 1141 def parents(f):
1139 1142 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1140 1143 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1141 1144 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1142 1145 # isn't an ancestor of the srcrev.
1143 1146 f._changeid
1144 1147 pl = f.parents()
1145 1148
1146 1149 # Don't return renamed parents if we aren't following.
1147 1150 if not follow:
1148 1151 pl = [p for p in pl if p.path() == f.path()]
1149 1152
1150 1153 # renamed filectx won't have a filelog yet, so set it
1151 1154 # from the cache to save time
1152 1155 for p in pl:
1153 1156 if not '_filelog' in p.__dict__:
1154 1157 p._filelog = getlog(p.path())
1155 1158
1156 1159 return pl
1157 1160
1158 1161 # use linkrev to find the first changeset where self appeared
1159 1162 base = self.introfilectx()
1160 1163 if getattr(base, '_ancestrycontext', None) is None:
1161 1164 cl = self._repo.changelog
1162 1165 if base.rev() is None:
1163 1166 # wctx is not inclusive, but works because _ancestrycontext
1164 1167 # is used to test filelog revisions
1165 1168 ac = cl.ancestors(
1166 1169 [p.rev() for p in base.parents()], inclusive=True
1167 1170 )
1168 1171 else:
1169 1172 ac = cl.ancestors([base.rev()], inclusive=True)
1170 1173 base._ancestrycontext = ac
1171 1174
1172 1175 return dagop.annotate(
1173 1176 base, parents, skiprevs=skiprevs, diffopts=diffopts
1174 1177 )
1175 1178
1176 1179 def ancestors(self, followfirst=False):
1177 1180 visit = {}
1178 1181 c = self
1179 1182 if followfirst:
1180 1183 cut = 1
1181 1184 else:
1182 1185 cut = None
1183 1186
1184 1187 while True:
1185 1188 for parent in c.parents()[:cut]:
1186 1189 visit[(parent.linkrev(), parent.filenode())] = parent
1187 1190 if not visit:
1188 1191 break
1189 1192 c = visit.pop(max(visit))
1190 1193 yield c
1191 1194
1192 1195 def decodeddata(self):
1193 1196 """Returns `data()` after running repository decoding filters.
1194 1197
1195 1198 This is often equivalent to how the data would be expressed on disk.
1196 1199 """
1197 1200 return self._repo.wwritedata(self.path(), self.data())
1198 1201
1199 1202
1200 1203 class filectx(basefilectx):
1201 1204 """A filecontext object makes access to data related to a particular
1202 1205 filerevision convenient."""
1203 1206
1204 1207 def __init__(
1205 1208 self,
1206 1209 repo,
1207 1210 path,
1208 1211 changeid=None,
1209 1212 fileid=None,
1210 1213 filelog=None,
1211 1214 changectx=None,
1212 1215 ):
1213 1216 """changeid must be a revision number, if specified.
1214 1217 fileid can be a file revision or node."""
1215 1218 self._repo = repo
1216 1219 self._path = path
1217 1220
1218 1221 assert (
1219 1222 changeid is not None or fileid is not None or changectx is not None
1220 1223 ), (
1221 1224 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1222 1225 % (changeid, fileid, changectx,)
1223 1226 )
1224 1227
1225 1228 if filelog is not None:
1226 1229 self._filelog = filelog
1227 1230
1228 1231 if changeid is not None:
1229 1232 self._changeid = changeid
1230 1233 if changectx is not None:
1231 1234 self._changectx = changectx
1232 1235 if fileid is not None:
1233 1236 self._fileid = fileid
1234 1237
1235 1238 @propertycache
1236 1239 def _changectx(self):
1237 1240 try:
1238 1241 return self._repo[self._changeid]
1239 1242 except error.FilteredRepoLookupError:
1240 1243 # Linkrev may point to any revision in the repository. When the
1241 1244 # repository is filtered this may lead to `filectx` trying to build
1242 1245 # `changectx` for filtered revision. In such case we fallback to
1243 1246 # creating `changectx` on the unfiltered version of the reposition.
1244 1247 # This fallback should not be an issue because `changectx` from
1245 1248 # `filectx` are not used in complex operations that care about
1246 1249 # filtering.
1247 1250 #
1248 1251 # This fallback is a cheap and dirty fix that prevent several
1249 1252 # crashes. It does not ensure the behavior is correct. However the
1250 1253 # behavior was not correct before filtering either and "incorrect
1251 1254 # behavior" is seen as better as "crash"
1252 1255 #
1253 1256 # Linkrevs have several serious troubles with filtering that are
1254 1257 # complicated to solve. Proper handling of the issue here should be
1255 1258 # considered when solving linkrev issue are on the table.
1256 1259 return self._repo.unfiltered()[self._changeid]
1257 1260
1258 1261 def filectx(self, fileid, changeid=None):
1259 1262 '''opens an arbitrary revision of the file without
1260 1263 opening a new filelog'''
1261 1264 return filectx(
1262 1265 self._repo,
1263 1266 self._path,
1264 1267 fileid=fileid,
1265 1268 filelog=self._filelog,
1266 1269 changeid=changeid,
1267 1270 )
1268 1271
1269 1272 def rawdata(self):
1270 1273 return self._filelog.rawdata(self._filenode)
1271 1274
1272 1275 def rawflags(self):
1273 1276 """low-level revlog flags"""
1274 1277 return self._filelog.flags(self._filerev)
1275 1278
1276 1279 def data(self):
1277 1280 try:
1278 1281 return self._filelog.read(self._filenode)
1279 1282 except error.CensoredNodeError:
1280 1283 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1281 1284 return b""
1282 1285 raise error.Abort(
1283 1286 _(b"censored node: %s") % short(self._filenode),
1284 1287 hint=_(b"set censor.policy to ignore errors"),
1285 1288 )
1286 1289
1287 1290 def size(self):
1288 1291 return self._filelog.size(self._filerev)
1289 1292
1290 1293 @propertycache
1291 1294 def _copied(self):
1292 1295 """check if file was actually renamed in this changeset revision
1293 1296
1294 1297 If rename logged in file revision, we report copy for changeset only
1295 1298 if file revisions linkrev points back to the changeset in question
1296 1299 or both changeset parents contain different file revisions.
1297 1300 """
1298 1301
1299 1302 renamed = self._filelog.renamed(self._filenode)
1300 1303 if not renamed:
1301 1304 return None
1302 1305
1303 1306 if self.rev() == self.linkrev():
1304 1307 return renamed
1305 1308
1306 1309 name = self.path()
1307 1310 fnode = self._filenode
1308 1311 for p in self._changectx.parents():
1309 1312 try:
1310 1313 if fnode == p.filenode(name):
1311 1314 return None
1312 1315 except error.LookupError:
1313 1316 pass
1314 1317 return renamed
1315 1318
1316 1319 def children(self):
1317 1320 # hard for renames
1318 1321 c = self._filelog.children(self._filenode)
1319 1322 return [
1320 1323 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1321 1324 for x in c
1322 1325 ]
1323 1326
1324 1327
1325 1328 class committablectx(basectx):
1326 1329 """A committablectx object provides common functionality for a context that
1327 1330 wants the ability to commit, e.g. workingctx or memctx."""
1328 1331
1329 1332 def __init__(
1330 1333 self,
1331 1334 repo,
1332 1335 text=b"",
1333 1336 user=None,
1334 1337 date=None,
1335 1338 extra=None,
1336 1339 changes=None,
1337 1340 branch=None,
1338 1341 ):
1339 1342 super(committablectx, self).__init__(repo)
1340 1343 self._rev = None
1341 1344 self._node = None
1342 1345 self._text = text
1343 1346 if date:
1344 1347 self._date = dateutil.parsedate(date)
1345 1348 if user:
1346 1349 self._user = user
1347 1350 if changes:
1348 1351 self._status = changes
1349 1352
1350 1353 self._extra = {}
1351 1354 if extra:
1352 1355 self._extra = extra.copy()
1353 1356 if branch is not None:
1354 1357 self._extra[b'branch'] = encoding.fromlocal(branch)
1355 1358 if not self._extra.get(b'branch'):
1356 1359 self._extra[b'branch'] = b'default'
1357 1360
1358 1361 def __bytes__(self):
1359 1362 return bytes(self._parents[0]) + b"+"
1360 1363
1361 1364 __str__ = encoding.strmethod(__bytes__)
1362 1365
1363 1366 def __nonzero__(self):
1364 1367 return True
1365 1368
1366 1369 __bool__ = __nonzero__
1367 1370
1368 1371 @propertycache
1369 1372 def _status(self):
1370 1373 return self._repo.status()
1371 1374
1372 1375 @propertycache
1373 1376 def _user(self):
1374 1377 return self._repo.ui.username()
1375 1378
1376 1379 @propertycache
1377 1380 def _date(self):
1378 1381 ui = self._repo.ui
1379 1382 date = ui.configdate(b'devel', b'default-date')
1380 1383 if date is None:
1381 1384 date = dateutil.makedate()
1382 1385 return date
1383 1386
1384 1387 def subrev(self, subpath):
1385 1388 return None
1386 1389
1387 1390 def manifestnode(self):
1388 1391 return None
1389 1392
1390 1393 def user(self):
1391 1394 return self._user or self._repo.ui.username()
1392 1395
1393 1396 def date(self):
1394 1397 return self._date
1395 1398
1396 1399 def description(self):
1397 1400 return self._text
1398 1401
1399 1402 def files(self):
1400 1403 return sorted(
1401 1404 self._status.modified + self._status.added + self._status.removed
1402 1405 )
1403 1406
1404 1407 def modified(self):
1405 1408 return self._status.modified
1406 1409
1407 1410 def added(self):
1408 1411 return self._status.added
1409 1412
1410 1413 def removed(self):
1411 1414 return self._status.removed
1412 1415
1413 1416 def deleted(self):
1414 1417 return self._status.deleted
1415 1418
1416 1419 filesmodified = modified
1417 1420 filesadded = added
1418 1421 filesremoved = removed
1419 1422
1420 1423 def branch(self):
1421 1424 return encoding.tolocal(self._extra[b'branch'])
1422 1425
1423 1426 def closesbranch(self):
1424 1427 return b'close' in self._extra
1425 1428
1426 1429 def extra(self):
1427 1430 return self._extra
1428 1431
1429 1432 def isinmemory(self):
1430 1433 return False
1431 1434
1432 1435 def tags(self):
1433 1436 return []
1434 1437
1435 1438 def bookmarks(self):
1436 1439 b = []
1437 1440 for p in self.parents():
1438 1441 b.extend(p.bookmarks())
1439 1442 return b
1440 1443
1441 1444 def phase(self):
1442 1445 phase = phases.newcommitphase(self._repo.ui)
1443 1446 for p in self.parents():
1444 1447 phase = max(phase, p.phase())
1445 1448 return phase
1446 1449
1447 1450 def hidden(self):
1448 1451 return False
1449 1452
1450 1453 def children(self):
1451 1454 return []
1452 1455
1453 1456 def ancestor(self, c2):
1454 1457 """return the "best" ancestor context of self and c2"""
1455 1458 return self._parents[0].ancestor(c2) # punt on two parents for now
1456 1459
1457 1460 def ancestors(self):
1458 1461 for p in self._parents:
1459 1462 yield p
1460 1463 for a in self._repo.changelog.ancestors(
1461 1464 [p.rev() for p in self._parents]
1462 1465 ):
1463 1466 yield self._repo[a]
1464 1467
1465 1468 def markcommitted(self, node):
1466 1469 """Perform post-commit cleanup necessary after committing this ctx
1467 1470
1468 1471 Specifically, this updates backing stores this working context
1469 1472 wraps to reflect the fact that the changes reflected by this
1470 1473 workingctx have been committed. For example, it marks
1471 1474 modified and added files as normal in the dirstate.
1472 1475
1473 1476 """
1474 1477
1475 1478 def dirty(self, missing=False, merge=True, branch=True):
1476 1479 return False
1477 1480
1478 1481
1479 1482 class workingctx(committablectx):
1480 1483 """A workingctx object makes access to data related to
1481 1484 the current working directory convenient.
1482 1485 date - any valid date string or (unixtime, offset), or None.
1483 1486 user - username string, or None.
1484 1487 extra - a dictionary of extra values, or None.
1485 1488 changes - a list of file lists as returned by localrepo.status()
1486 1489 or None to use the repository status.
1487 1490 """
1488 1491
1489 1492 def __init__(
1490 1493 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1491 1494 ):
1492 1495 branch = None
1493 1496 if not extra or b'branch' not in extra:
1494 1497 try:
1495 1498 branch = repo.dirstate.branch()
1496 1499 except UnicodeDecodeError:
1497 1500 raise error.Abort(_(b'branch name not in UTF-8!'))
1498 1501 super(workingctx, self).__init__(
1499 1502 repo, text, user, date, extra, changes, branch=branch
1500 1503 )
1501 1504
1502 1505 def __iter__(self):
1503 1506 d = self._repo.dirstate
1504 1507 for f in d:
1505 1508 if d[f] != b'r':
1506 1509 yield f
1507 1510
1508 1511 def __contains__(self, key):
1509 1512 return self._repo.dirstate[key] not in b"?r"
1510 1513
1511 1514 def hex(self):
1512 1515 return wdirhex
1513 1516
1514 1517 @propertycache
1515 1518 def _parents(self):
1516 1519 p = self._repo.dirstate.parents()
1517 1520 if p[1] == nullid:
1518 1521 p = p[:-1]
1519 1522 # use unfiltered repo to delay/avoid loading obsmarkers
1520 1523 unfi = self._repo.unfiltered()
1521 1524 return [
1522 1525 changectx(
1523 1526 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1524 1527 )
1525 1528 for n in p
1526 1529 ]
1527 1530
1528 1531 def _fileinfo(self, path):
1529 1532 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1530 1533 self._manifest
1531 1534 return super(workingctx, self)._fileinfo(path)
1532 1535
1533 1536 def _buildflagfunc(self):
1534 1537 # Create a fallback function for getting file flags when the
1535 1538 # filesystem doesn't support them
1536 1539
1537 1540 copiesget = self._repo.dirstate.copies().get
1538 1541 parents = self.parents()
1539 1542 if len(parents) < 2:
1540 1543 # when we have one parent, it's easy: copy from parent
1541 1544 man = parents[0].manifest()
1542 1545
1543 1546 def func(f):
1544 1547 f = copiesget(f, f)
1545 1548 return man.flags(f)
1546 1549
1547 1550 else:
1548 1551 # merges are tricky: we try to reconstruct the unstored
1549 1552 # result from the merge (issue1802)
1550 1553 p1, p2 = parents
1551 1554 pa = p1.ancestor(p2)
1552 1555 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1553 1556
1554 1557 def func(f):
1555 1558 f = copiesget(f, f) # may be wrong for merges with copies
1556 1559 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1557 1560 if fl1 == fl2:
1558 1561 return fl1
1559 1562 if fl1 == fla:
1560 1563 return fl2
1561 1564 if fl2 == fla:
1562 1565 return fl1
1563 1566 return b'' # punt for conflicts
1564 1567
1565 1568 return func
1566 1569
1567 1570 @propertycache
1568 1571 def _flagfunc(self):
1569 1572 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1570 1573
1571 1574 def flags(self, path):
1572 1575 if '_manifest' in self.__dict__:
1573 1576 try:
1574 1577 return self._manifest.flags(path)
1575 1578 except KeyError:
1576 1579 return b''
1577 1580
1578 1581 try:
1579 1582 return self._flagfunc(path)
1580 1583 except OSError:
1581 1584 return b''
1582 1585
1583 1586 def filectx(self, path, filelog=None):
1584 1587 """get a file context from the working directory"""
1585 1588 return workingfilectx(
1586 1589 self._repo, path, workingctx=self, filelog=filelog
1587 1590 )
1588 1591
1589 1592 def dirty(self, missing=False, merge=True, branch=True):
1590 1593 """check whether a working directory is modified"""
1591 1594 # check subrepos first
1592 1595 for s in sorted(self.substate):
1593 1596 if self.sub(s).dirty(missing=missing):
1594 1597 return True
1595 1598 # check current working dir
1596 1599 return (
1597 1600 (merge and self.p2())
1598 1601 or (branch and self.branch() != self.p1().branch())
1599 1602 or self.modified()
1600 1603 or self.added()
1601 1604 or self.removed()
1602 1605 or (missing and self.deleted())
1603 1606 )
1604 1607
1605 1608 def add(self, list, prefix=b""):
1606 1609 with self._repo.wlock():
1607 1610 ui, ds = self._repo.ui, self._repo.dirstate
1608 1611 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1609 1612 rejected = []
1610 1613 lstat = self._repo.wvfs.lstat
1611 1614 for f in list:
1612 1615 # ds.pathto() returns an absolute file when this is invoked from
1613 1616 # the keyword extension. That gets flagged as non-portable on
1614 1617 # Windows, since it contains the drive letter and colon.
1615 1618 scmutil.checkportable(ui, os.path.join(prefix, f))
1616 1619 try:
1617 1620 st = lstat(f)
1618 1621 except OSError:
1619 1622 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1620 1623 rejected.append(f)
1621 1624 continue
1622 1625 limit = ui.configbytes(b'ui', b'large-file-limit')
1623 1626 if limit != 0 and st.st_size > limit:
1624 1627 ui.warn(
1625 1628 _(
1626 1629 b"%s: up to %d MB of RAM may be required "
1627 1630 b"to manage this file\n"
1628 1631 b"(use 'hg revert %s' to cancel the "
1629 1632 b"pending addition)\n"
1630 1633 )
1631 1634 % (f, 3 * st.st_size // 1000000, uipath(f))
1632 1635 )
1633 1636 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1634 1637 ui.warn(
1635 1638 _(
1636 1639 b"%s not added: only files and symlinks "
1637 1640 b"supported currently\n"
1638 1641 )
1639 1642 % uipath(f)
1640 1643 )
1641 1644 rejected.append(f)
1642 1645 elif ds[f] in b'amn':
1643 1646 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1644 1647 elif ds[f] == b'r':
1645 1648 ds.normallookup(f)
1646 1649 else:
1647 1650 ds.add(f)
1648 1651 return rejected
1649 1652
1650 1653 def forget(self, files, prefix=b""):
1651 1654 with self._repo.wlock():
1652 1655 ds = self._repo.dirstate
1653 1656 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1654 1657 rejected = []
1655 1658 for f in files:
1656 1659 if f not in ds:
1657 1660 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1658 1661 rejected.append(f)
1659 1662 elif ds[f] != b'a':
1660 1663 ds.remove(f)
1661 1664 else:
1662 1665 ds.drop(f)
1663 1666 return rejected
1664 1667
1665 1668 def copy(self, source, dest):
1666 1669 try:
1667 1670 st = self._repo.wvfs.lstat(dest)
1668 1671 except OSError as err:
1669 1672 if err.errno != errno.ENOENT:
1670 1673 raise
1671 1674 self._repo.ui.warn(
1672 1675 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1673 1676 )
1674 1677 return
1675 1678 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1676 1679 self._repo.ui.warn(
1677 1680 _(b"copy failed: %s is not a file or a symbolic link\n")
1678 1681 % self._repo.dirstate.pathto(dest)
1679 1682 )
1680 1683 else:
1681 1684 with self._repo.wlock():
1682 1685 ds = self._repo.dirstate
1683 1686 if ds[dest] in b'?':
1684 1687 ds.add(dest)
1685 1688 elif ds[dest] in b'r':
1686 1689 ds.normallookup(dest)
1687 1690 ds.copy(source, dest)
1688 1691
1689 1692 def match(
1690 1693 self,
1691 1694 pats=None,
1692 1695 include=None,
1693 1696 exclude=None,
1694 1697 default=b'glob',
1695 1698 listsubrepos=False,
1696 1699 badfn=None,
1700 cwd=None,
1697 1701 ):
1698 1702 r = self._repo
1703 if not cwd:
1704 cwd = r.getcwd()
1699 1705
1700 1706 # Only a case insensitive filesystem needs magic to translate user input
1701 1707 # to actual case in the filesystem.
1702 1708 icasefs = not util.fscasesensitive(r.root)
1703 1709 return matchmod.match(
1704 1710 r.root,
1705 r.getcwd(),
1711 cwd,
1706 1712 pats,
1707 1713 include,
1708 1714 exclude,
1709 1715 default,
1710 1716 auditor=r.auditor,
1711 1717 ctx=self,
1712 1718 listsubrepos=listsubrepos,
1713 1719 badfn=badfn,
1714 1720 icasefs=icasefs,
1715 1721 )
1716 1722
1717 1723 def _filtersuspectsymlink(self, files):
1718 1724 if not files or self._repo.dirstate._checklink:
1719 1725 return files
1720 1726
1721 1727 # Symlink placeholders may get non-symlink-like contents
1722 1728 # via user error or dereferencing by NFS or Samba servers,
1723 1729 # so we filter out any placeholders that don't look like a
1724 1730 # symlink
1725 1731 sane = []
1726 1732 for f in files:
1727 1733 if self.flags(f) == b'l':
1728 1734 d = self[f].data()
1729 1735 if (
1730 1736 d == b''
1731 1737 or len(d) >= 1024
1732 1738 or b'\n' in d
1733 1739 or stringutil.binary(d)
1734 1740 ):
1735 1741 self._repo.ui.debug(
1736 1742 b'ignoring suspect symlink placeholder "%s"\n' % f
1737 1743 )
1738 1744 continue
1739 1745 sane.append(f)
1740 1746 return sane
1741 1747
1742 1748 def _checklookup(self, files):
1743 1749 # check for any possibly clean files
1744 1750 if not files:
1745 1751 return [], [], []
1746 1752
1747 1753 modified = []
1748 1754 deleted = []
1749 1755 fixup = []
1750 1756 pctx = self._parents[0]
1751 1757 # do a full compare of any files that might have changed
1752 1758 for f in sorted(files):
1753 1759 try:
1754 1760 # This will return True for a file that got replaced by a
1755 1761 # directory in the interim, but fixing that is pretty hard.
1756 1762 if (
1757 1763 f not in pctx
1758 1764 or self.flags(f) != pctx.flags(f)
1759 1765 or pctx[f].cmp(self[f])
1760 1766 ):
1761 1767 modified.append(f)
1762 1768 else:
1763 1769 fixup.append(f)
1764 1770 except (IOError, OSError):
1765 1771 # A file become inaccessible in between? Mark it as deleted,
1766 1772 # matching dirstate behavior (issue5584).
1767 1773 # The dirstate has more complex behavior around whether a
1768 1774 # missing file matches a directory, etc, but we don't need to
1769 1775 # bother with that: if f has made it to this point, we're sure
1770 1776 # it's in the dirstate.
1771 1777 deleted.append(f)
1772 1778
1773 1779 return modified, deleted, fixup
1774 1780
1775 1781 def _poststatusfixup(self, status, fixup):
1776 1782 """update dirstate for files that are actually clean"""
1777 1783 poststatus = self._repo.postdsstatus()
1778 1784 if fixup or poststatus:
1779 1785 try:
1780 1786 oldid = self._repo.dirstate.identity()
1781 1787
1782 1788 # updating the dirstate is optional
1783 1789 # so we don't wait on the lock
1784 1790 # wlock can invalidate the dirstate, so cache normal _after_
1785 1791 # taking the lock
1786 1792 with self._repo.wlock(False):
1787 1793 if self._repo.dirstate.identity() == oldid:
1788 1794 if fixup:
1789 1795 normal = self._repo.dirstate.normal
1790 1796 for f in fixup:
1791 1797 normal(f)
1792 1798 # write changes out explicitly, because nesting
1793 1799 # wlock at runtime may prevent 'wlock.release()'
1794 1800 # after this block from doing so for subsequent
1795 1801 # changing files
1796 1802 tr = self._repo.currenttransaction()
1797 1803 self._repo.dirstate.write(tr)
1798 1804
1799 1805 if poststatus:
1800 1806 for ps in poststatus:
1801 1807 ps(self, status)
1802 1808 else:
1803 1809 # in this case, writing changes out breaks
1804 1810 # consistency, because .hg/dirstate was
1805 1811 # already changed simultaneously after last
1806 1812 # caching (see also issue5584 for detail)
1807 1813 self._repo.ui.debug(
1808 1814 b'skip updating dirstate: identity mismatch\n'
1809 1815 )
1810 1816 except error.LockError:
1811 1817 pass
1812 1818 finally:
1813 1819 # Even if the wlock couldn't be grabbed, clear out the list.
1814 1820 self._repo.clearpostdsstatus()
1815 1821
1816 1822 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1817 1823 '''Gets the status from the dirstate -- internal use only.'''
1818 1824 subrepos = []
1819 1825 if b'.hgsub' in self:
1820 1826 subrepos = sorted(self.substate)
1821 1827 cmp, s = self._repo.dirstate.status(
1822 1828 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1823 1829 )
1824 1830
1825 1831 # check for any possibly clean files
1826 1832 fixup = []
1827 1833 if cmp:
1828 1834 modified2, deleted2, fixup = self._checklookup(cmp)
1829 1835 s.modified.extend(modified2)
1830 1836 s.deleted.extend(deleted2)
1831 1837
1832 1838 if fixup and clean:
1833 1839 s.clean.extend(fixup)
1834 1840
1835 1841 self._poststatusfixup(s, fixup)
1836 1842
1837 1843 if match.always():
1838 1844 # cache for performance
1839 1845 if s.unknown or s.ignored or s.clean:
1840 1846 # "_status" is cached with list*=False in the normal route
1841 1847 self._status = scmutil.status(
1842 1848 s.modified, s.added, s.removed, s.deleted, [], [], []
1843 1849 )
1844 1850 else:
1845 1851 self._status = s
1846 1852
1847 1853 return s
1848 1854
1849 1855 @propertycache
1850 1856 def _copies(self):
1851 1857 p1copies = {}
1852 1858 p2copies = {}
1853 1859 parents = self._repo.dirstate.parents()
1854 1860 p1manifest = self._repo[parents[0]].manifest()
1855 1861 p2manifest = self._repo[parents[1]].manifest()
1856 1862 changedset = set(self.added()) | set(self.modified())
1857 1863 narrowmatch = self._repo.narrowmatch()
1858 1864 for dst, src in self._repo.dirstate.copies().items():
1859 1865 if dst not in changedset or not narrowmatch(dst):
1860 1866 continue
1861 1867 if src in p1manifest:
1862 1868 p1copies[dst] = src
1863 1869 elif src in p2manifest:
1864 1870 p2copies[dst] = src
1865 1871 return p1copies, p2copies
1866 1872
1867 1873 @propertycache
1868 1874 def _manifest(self):
1869 1875 """generate a manifest corresponding to the values in self._status
1870 1876
1871 1877 This reuse the file nodeid from parent, but we use special node
1872 1878 identifiers for added and modified files. This is used by manifests
1873 1879 merge to see that files are different and by update logic to avoid
1874 1880 deleting newly added files.
1875 1881 """
1876 1882 return self._buildstatusmanifest(self._status)
1877 1883
1878 1884 def _buildstatusmanifest(self, status):
1879 1885 """Builds a manifest that includes the given status results."""
1880 1886 parents = self.parents()
1881 1887
1882 1888 man = parents[0].manifest().copy()
1883 1889
1884 1890 ff = self._flagfunc
1885 1891 for i, l in (
1886 1892 (addednodeid, status.added),
1887 1893 (modifiednodeid, status.modified),
1888 1894 ):
1889 1895 for f in l:
1890 1896 man[f] = i
1891 1897 try:
1892 1898 man.setflag(f, ff(f))
1893 1899 except OSError:
1894 1900 pass
1895 1901
1896 1902 for f in status.deleted + status.removed:
1897 1903 if f in man:
1898 1904 del man[f]
1899 1905
1900 1906 return man
1901 1907
1902 1908 def _buildstatus(
1903 1909 self, other, s, match, listignored, listclean, listunknown
1904 1910 ):
1905 1911 """build a status with respect to another context
1906 1912
1907 1913 This includes logic for maintaining the fast path of status when
1908 1914 comparing the working directory against its parent, which is to skip
1909 1915 building a new manifest if self (working directory) is not comparing
1910 1916 against its parent (repo['.']).
1911 1917 """
1912 1918 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1913 1919 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1914 1920 # might have accidentally ended up with the entire contents of the file
1915 1921 # they are supposed to be linking to.
1916 1922 s.modified[:] = self._filtersuspectsymlink(s.modified)
1917 1923 if other != self._repo[b'.']:
1918 1924 s = super(workingctx, self)._buildstatus(
1919 1925 other, s, match, listignored, listclean, listunknown
1920 1926 )
1921 1927 return s
1922 1928
1923 1929 def _matchstatus(self, other, match):
1924 1930 """override the match method with a filter for directory patterns
1925 1931
1926 1932 We use inheritance to customize the match.bad method only in cases of
1927 1933 workingctx since it belongs only to the working directory when
1928 1934 comparing against the parent changeset.
1929 1935
1930 1936 If we aren't comparing against the working directory's parent, then we
1931 1937 just use the default match object sent to us.
1932 1938 """
1933 1939 if other != self._repo[b'.']:
1934 1940
1935 1941 def bad(f, msg):
1936 1942 # 'f' may be a directory pattern from 'match.files()',
1937 1943 # so 'f not in ctx1' is not enough
1938 1944 if f not in other and not other.hasdir(f):
1939 1945 self._repo.ui.warn(
1940 1946 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1941 1947 )
1942 1948
1943 1949 match.bad = bad
1944 1950 return match
1945 1951
1946 1952 def walk(self, match):
1947 1953 '''Generates matching file names.'''
1948 1954 return sorted(
1949 1955 self._repo.dirstate.walk(
1950 1956 self._repo.narrowmatch(match),
1951 1957 subrepos=sorted(self.substate),
1952 1958 unknown=True,
1953 1959 ignored=False,
1954 1960 )
1955 1961 )
1956 1962
1957 1963 def matches(self, match):
1958 1964 match = self._repo.narrowmatch(match)
1959 1965 ds = self._repo.dirstate
1960 1966 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1961 1967
1962 1968 def markcommitted(self, node):
1963 1969 with self._repo.dirstate.parentchange():
1964 1970 for f in self.modified() + self.added():
1965 1971 self._repo.dirstate.normal(f)
1966 1972 for f in self.removed():
1967 1973 self._repo.dirstate.drop(f)
1968 1974 self._repo.dirstate.setparents(node)
1969 1975
1970 1976 # write changes out explicitly, because nesting wlock at
1971 1977 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1972 1978 # from immediately doing so for subsequent changing files
1973 1979 self._repo.dirstate.write(self._repo.currenttransaction())
1974 1980
1975 1981 sparse.aftercommit(self._repo, node)
1976 1982
1977 1983
1978 1984 class committablefilectx(basefilectx):
1979 1985 """A committablefilectx provides common functionality for a file context
1980 1986 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1981 1987
1982 1988 def __init__(self, repo, path, filelog=None, ctx=None):
1983 1989 self._repo = repo
1984 1990 self._path = path
1985 1991 self._changeid = None
1986 1992 self._filerev = self._filenode = None
1987 1993
1988 1994 if filelog is not None:
1989 1995 self._filelog = filelog
1990 1996 if ctx:
1991 1997 self._changectx = ctx
1992 1998
1993 1999 def __nonzero__(self):
1994 2000 return True
1995 2001
1996 2002 __bool__ = __nonzero__
1997 2003
1998 2004 def linkrev(self):
1999 2005 # linked to self._changectx no matter if file is modified or not
2000 2006 return self.rev()
2001 2007
2002 2008 def renamed(self):
2003 2009 path = self.copysource()
2004 2010 if not path:
2005 2011 return None
2006 2012 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2007 2013
2008 2014 def parents(self):
2009 2015 '''return parent filectxs, following copies if necessary'''
2010 2016
2011 2017 def filenode(ctx, path):
2012 2018 return ctx._manifest.get(path, nullid)
2013 2019
2014 2020 path = self._path
2015 2021 fl = self._filelog
2016 2022 pcl = self._changectx._parents
2017 2023 renamed = self.renamed()
2018 2024
2019 2025 if renamed:
2020 2026 pl = [renamed + (None,)]
2021 2027 else:
2022 2028 pl = [(path, filenode(pcl[0], path), fl)]
2023 2029
2024 2030 for pc in pcl[1:]:
2025 2031 pl.append((path, filenode(pc, path), fl))
2026 2032
2027 2033 return [
2028 2034 self._parentfilectx(p, fileid=n, filelog=l)
2029 2035 for p, n, l in pl
2030 2036 if n != nullid
2031 2037 ]
2032 2038
2033 2039 def children(self):
2034 2040 return []
2035 2041
2036 2042
2037 2043 class workingfilectx(committablefilectx):
2038 2044 """A workingfilectx object makes access to data related to a particular
2039 2045 file in the working directory convenient."""
2040 2046
2041 2047 def __init__(self, repo, path, filelog=None, workingctx=None):
2042 2048 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2043 2049
2044 2050 @propertycache
2045 2051 def _changectx(self):
2046 2052 return workingctx(self._repo)
2047 2053
2048 2054 def data(self):
2049 2055 return self._repo.wread(self._path)
2050 2056
2051 2057 def copysource(self):
2052 2058 return self._repo.dirstate.copied(self._path)
2053 2059
2054 2060 def size(self):
2055 2061 return self._repo.wvfs.lstat(self._path).st_size
2056 2062
2057 2063 def lstat(self):
2058 2064 return self._repo.wvfs.lstat(self._path)
2059 2065
2060 2066 def date(self):
2061 2067 t, tz = self._changectx.date()
2062 2068 try:
2063 2069 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2064 2070 except OSError as err:
2065 2071 if err.errno != errno.ENOENT:
2066 2072 raise
2067 2073 return (t, tz)
2068 2074
2069 2075 def exists(self):
2070 2076 return self._repo.wvfs.exists(self._path)
2071 2077
2072 2078 def lexists(self):
2073 2079 return self._repo.wvfs.lexists(self._path)
2074 2080
2075 2081 def audit(self):
2076 2082 return self._repo.wvfs.audit(self._path)
2077 2083
2078 2084 def cmp(self, fctx):
2079 2085 """compare with other file context
2080 2086
2081 2087 returns True if different than fctx.
2082 2088 """
2083 2089 # fctx should be a filectx (not a workingfilectx)
2084 2090 # invert comparison to reuse the same code path
2085 2091 return fctx.cmp(self)
2086 2092
2087 2093 def remove(self, ignoremissing=False):
2088 2094 """wraps unlink for a repo's working directory"""
2089 2095 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2090 2096 self._repo.wvfs.unlinkpath(
2091 2097 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2092 2098 )
2093 2099
2094 2100 def write(self, data, flags, backgroundclose=False, **kwargs):
2095 2101 """wraps repo.wwrite"""
2096 2102 return self._repo.wwrite(
2097 2103 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2098 2104 )
2099 2105
2100 2106 def markcopied(self, src):
2101 2107 """marks this file a copy of `src`"""
2102 2108 self._repo.dirstate.copy(src, self._path)
2103 2109
2104 2110 def clearunknown(self):
2105 2111 """Removes conflicting items in the working directory so that
2106 2112 ``write()`` can be called successfully.
2107 2113 """
2108 2114 wvfs = self._repo.wvfs
2109 2115 f = self._path
2110 2116 wvfs.audit(f)
2111 2117 if self._repo.ui.configbool(
2112 2118 b'experimental', b'merge.checkpathconflicts'
2113 2119 ):
2114 2120 # remove files under the directory as they should already be
2115 2121 # warned and backed up
2116 2122 if wvfs.isdir(f) and not wvfs.islink(f):
2117 2123 wvfs.rmtree(f, forcibly=True)
2118 2124 for p in reversed(list(pathutil.finddirs(f))):
2119 2125 if wvfs.isfileorlink(p):
2120 2126 wvfs.unlink(p)
2121 2127 break
2122 2128 else:
2123 2129 # don't remove files if path conflicts are not processed
2124 2130 if wvfs.isdir(f) and not wvfs.islink(f):
2125 2131 wvfs.removedirs(f)
2126 2132
2127 2133 def setflags(self, l, x):
2128 2134 self._repo.wvfs.setflags(self._path, l, x)
2129 2135
2130 2136
2131 2137 class overlayworkingctx(committablectx):
2132 2138 """Wraps another mutable context with a write-back cache that can be
2133 2139 converted into a commit context.
2134 2140
2135 2141 self._cache[path] maps to a dict with keys: {
2136 2142 'exists': bool?
2137 2143 'date': date?
2138 2144 'data': str?
2139 2145 'flags': str?
2140 2146 'copied': str? (path or None)
2141 2147 }
2142 2148 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2143 2149 is `False`, the file was deleted.
2144 2150 """
2145 2151
2146 2152 def __init__(self, repo):
2147 2153 super(overlayworkingctx, self).__init__(repo)
2148 2154 self.clean()
2149 2155
2150 2156 def setbase(self, wrappedctx):
2151 2157 self._wrappedctx = wrappedctx
2152 2158 self._parents = [wrappedctx]
2153 2159 # Drop old manifest cache as it is now out of date.
2154 2160 # This is necessary when, e.g., rebasing several nodes with one
2155 2161 # ``overlayworkingctx`` (e.g. with --collapse).
2156 2162 util.clearcachedproperty(self, b'_manifest')
2157 2163
2158 2164 def data(self, path):
2159 2165 if self.isdirty(path):
2160 2166 if self._cache[path][b'exists']:
2161 2167 if self._cache[path][b'data'] is not None:
2162 2168 return self._cache[path][b'data']
2163 2169 else:
2164 2170 # Must fallback here, too, because we only set flags.
2165 2171 return self._wrappedctx[path].data()
2166 2172 else:
2167 2173 raise error.ProgrammingError(
2168 2174 b"No such file or directory: %s" % path
2169 2175 )
2170 2176 else:
2171 2177 return self._wrappedctx[path].data()
2172 2178
2173 2179 @propertycache
2174 2180 def _manifest(self):
2175 2181 parents = self.parents()
2176 2182 man = parents[0].manifest().copy()
2177 2183
2178 2184 flag = self._flagfunc
2179 2185 for path in self.added():
2180 2186 man[path] = addednodeid
2181 2187 man.setflag(path, flag(path))
2182 2188 for path in self.modified():
2183 2189 man[path] = modifiednodeid
2184 2190 man.setflag(path, flag(path))
2185 2191 for path in self.removed():
2186 2192 del man[path]
2187 2193 return man
2188 2194
2189 2195 @propertycache
2190 2196 def _flagfunc(self):
2191 2197 def f(path):
2192 2198 return self._cache[path][b'flags']
2193 2199
2194 2200 return f
2195 2201
2196 2202 def files(self):
2197 2203 return sorted(self.added() + self.modified() + self.removed())
2198 2204
2199 2205 def modified(self):
2200 2206 return [
2201 2207 f
2202 2208 for f in self._cache.keys()
2203 2209 if self._cache[f][b'exists'] and self._existsinparent(f)
2204 2210 ]
2205 2211
2206 2212 def added(self):
2207 2213 return [
2208 2214 f
2209 2215 for f in self._cache.keys()
2210 2216 if self._cache[f][b'exists'] and not self._existsinparent(f)
2211 2217 ]
2212 2218
2213 2219 def removed(self):
2214 2220 return [
2215 2221 f
2216 2222 for f in self._cache.keys()
2217 2223 if not self._cache[f][b'exists'] and self._existsinparent(f)
2218 2224 ]
2219 2225
2220 2226 def p1copies(self):
2221 2227 copies = self._repo._wrappedctx.p1copies().copy()
2222 2228 narrowmatch = self._repo.narrowmatch()
2223 2229 for f in self._cache.keys():
2224 2230 if not narrowmatch(f):
2225 2231 continue
2226 2232 copies.pop(f, None) # delete if it exists
2227 2233 source = self._cache[f][b'copied']
2228 2234 if source:
2229 2235 copies[f] = source
2230 2236 return copies
2231 2237
2232 2238 def p2copies(self):
2233 2239 copies = self._repo._wrappedctx.p2copies().copy()
2234 2240 narrowmatch = self._repo.narrowmatch()
2235 2241 for f in self._cache.keys():
2236 2242 if not narrowmatch(f):
2237 2243 continue
2238 2244 copies.pop(f, None) # delete if it exists
2239 2245 source = self._cache[f][b'copied']
2240 2246 if source:
2241 2247 copies[f] = source
2242 2248 return copies
2243 2249
2244 2250 def isinmemory(self):
2245 2251 return True
2246 2252
2247 2253 def filedate(self, path):
2248 2254 if self.isdirty(path):
2249 2255 return self._cache[path][b'date']
2250 2256 else:
2251 2257 return self._wrappedctx[path].date()
2252 2258
2253 2259 def markcopied(self, path, origin):
2254 2260 self._markdirty(
2255 2261 path,
2256 2262 exists=True,
2257 2263 date=self.filedate(path),
2258 2264 flags=self.flags(path),
2259 2265 copied=origin,
2260 2266 )
2261 2267
2262 2268 def copydata(self, path):
2263 2269 if self.isdirty(path):
2264 2270 return self._cache[path][b'copied']
2265 2271 else:
2266 2272 return None
2267 2273
2268 2274 def flags(self, path):
2269 2275 if self.isdirty(path):
2270 2276 if self._cache[path][b'exists']:
2271 2277 return self._cache[path][b'flags']
2272 2278 else:
2273 2279 raise error.ProgrammingError(
2274 2280 b"No such file or directory: %s" % self._path
2275 2281 )
2276 2282 else:
2277 2283 return self._wrappedctx[path].flags()
2278 2284
2279 2285 def __contains__(self, key):
2280 2286 if key in self._cache:
2281 2287 return self._cache[key][b'exists']
2282 2288 return key in self.p1()
2283 2289
2284 2290 def _existsinparent(self, path):
2285 2291 try:
2286 2292 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2287 2293 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2288 2294 # with an ``exists()`` function.
2289 2295 self._wrappedctx[path]
2290 2296 return True
2291 2297 except error.ManifestLookupError:
2292 2298 return False
2293 2299
2294 2300 def _auditconflicts(self, path):
2295 2301 """Replicates conflict checks done by wvfs.write().
2296 2302
2297 2303 Since we never write to the filesystem and never call `applyupdates` in
2298 2304 IMM, we'll never check that a path is actually writable -- e.g., because
2299 2305 it adds `a/foo`, but `a` is actually a file in the other commit.
2300 2306 """
2301 2307
2302 2308 def fail(path, component):
2303 2309 # p1() is the base and we're receiving "writes" for p2()'s
2304 2310 # files.
2305 2311 if b'l' in self.p1()[component].flags():
2306 2312 raise error.Abort(
2307 2313 b"error: %s conflicts with symlink %s "
2308 2314 b"in %d." % (path, component, self.p1().rev())
2309 2315 )
2310 2316 else:
2311 2317 raise error.Abort(
2312 2318 b"error: '%s' conflicts with file '%s' in "
2313 2319 b"%d." % (path, component, self.p1().rev())
2314 2320 )
2315 2321
2316 2322 # Test that each new directory to be created to write this path from p2
2317 2323 # is not a file in p1.
2318 2324 components = path.split(b'/')
2319 2325 for i in pycompat.xrange(len(components)):
2320 2326 component = b"/".join(components[0:i])
2321 2327 if component in self:
2322 2328 fail(path, component)
2323 2329
2324 2330 # Test the other direction -- that this path from p2 isn't a directory
2325 2331 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2326 2332 match = self.match([path], default=b'path')
2327 2333 matches = self.p1().manifest().matches(match)
2328 2334 mfiles = matches.keys()
2329 2335 if len(mfiles) > 0:
2330 2336 if len(mfiles) == 1 and mfiles[0] == path:
2331 2337 return
2332 2338 # omit the files which are deleted in current IMM wctx
2333 2339 mfiles = [m for m in mfiles if m in self]
2334 2340 if not mfiles:
2335 2341 return
2336 2342 raise error.Abort(
2337 2343 b"error: file '%s' cannot be written because "
2338 2344 b" '%s/' is a directory in %s (containing %d "
2339 2345 b"entries: %s)"
2340 2346 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2341 2347 )
2342 2348
2343 2349 def write(self, path, data, flags=b'', **kwargs):
2344 2350 if data is None:
2345 2351 raise error.ProgrammingError(b"data must be non-None")
2346 2352 self._auditconflicts(path)
2347 2353 self._markdirty(
2348 2354 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2349 2355 )
2350 2356
2351 2357 def setflags(self, path, l, x):
2352 2358 flag = b''
2353 2359 if l:
2354 2360 flag = b'l'
2355 2361 elif x:
2356 2362 flag = b'x'
2357 2363 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2358 2364
2359 2365 def remove(self, path):
2360 2366 self._markdirty(path, exists=False)
2361 2367
2362 2368 def exists(self, path):
2363 2369 """exists behaves like `lexists`, but needs to follow symlinks and
2364 2370 return False if they are broken.
2365 2371 """
2366 2372 if self.isdirty(path):
2367 2373 # If this path exists and is a symlink, "follow" it by calling
2368 2374 # exists on the destination path.
2369 2375 if (
2370 2376 self._cache[path][b'exists']
2371 2377 and b'l' in self._cache[path][b'flags']
2372 2378 ):
2373 2379 return self.exists(self._cache[path][b'data'].strip())
2374 2380 else:
2375 2381 return self._cache[path][b'exists']
2376 2382
2377 2383 return self._existsinparent(path)
2378 2384
2379 2385 def lexists(self, path):
2380 2386 """lexists returns True if the path exists"""
2381 2387 if self.isdirty(path):
2382 2388 return self._cache[path][b'exists']
2383 2389
2384 2390 return self._existsinparent(path)
2385 2391
2386 2392 def size(self, path):
2387 2393 if self.isdirty(path):
2388 2394 if self._cache[path][b'exists']:
2389 2395 return len(self._cache[path][b'data'])
2390 2396 else:
2391 2397 raise error.ProgrammingError(
2392 2398 b"No such file or directory: %s" % self._path
2393 2399 )
2394 2400 return self._wrappedctx[path].size()
2395 2401
2396 2402 def tomemctx(
2397 2403 self,
2398 2404 text,
2399 2405 branch=None,
2400 2406 extra=None,
2401 2407 date=None,
2402 2408 parents=None,
2403 2409 user=None,
2404 2410 editor=None,
2405 2411 ):
2406 2412 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2407 2413 committed.
2408 2414
2409 2415 ``text`` is the commit message.
2410 2416 ``parents`` (optional) are rev numbers.
2411 2417 """
2412 2418 # Default parents to the wrapped contexts' if not passed.
2413 2419 if parents is None:
2414 2420 parents = self._wrappedctx.parents()
2415 2421 if len(parents) == 1:
2416 2422 parents = (parents[0], None)
2417 2423
2418 2424 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2419 2425 if parents[1] is None:
2420 2426 parents = (self._repo[parents[0]], None)
2421 2427 else:
2422 2428 parents = (self._repo[parents[0]], self._repo[parents[1]])
2423 2429
2424 2430 files = self.files()
2425 2431
2426 2432 def getfile(repo, memctx, path):
2427 2433 if self._cache[path][b'exists']:
2428 2434 return memfilectx(
2429 2435 repo,
2430 2436 memctx,
2431 2437 path,
2432 2438 self._cache[path][b'data'],
2433 2439 b'l' in self._cache[path][b'flags'],
2434 2440 b'x' in self._cache[path][b'flags'],
2435 2441 self._cache[path][b'copied'],
2436 2442 )
2437 2443 else:
2438 2444 # Returning None, but including the path in `files`, is
2439 2445 # necessary for memctx to register a deletion.
2440 2446 return None
2441 2447
2442 2448 return memctx(
2443 2449 self._repo,
2444 2450 parents,
2445 2451 text,
2446 2452 files,
2447 2453 getfile,
2448 2454 date=date,
2449 2455 extra=extra,
2450 2456 user=user,
2451 2457 branch=branch,
2452 2458 editor=editor,
2453 2459 )
2454 2460
2455 2461 def isdirty(self, path):
2456 2462 return path in self._cache
2457 2463
2458 2464 def isempty(self):
2459 2465 # We need to discard any keys that are actually clean before the empty
2460 2466 # commit check.
2461 2467 self._compact()
2462 2468 return len(self._cache) == 0
2463 2469
2464 2470 def clean(self):
2465 2471 self._cache = {}
2466 2472
2467 2473 def _compact(self):
2468 2474 """Removes keys from the cache that are actually clean, by comparing
2469 2475 them with the underlying context.
2470 2476
2471 2477 This can occur during the merge process, e.g. by passing --tool :local
2472 2478 to resolve a conflict.
2473 2479 """
2474 2480 keys = []
2475 2481 # This won't be perfect, but can help performance significantly when
2476 2482 # using things like remotefilelog.
2477 2483 scmutil.prefetchfiles(
2478 2484 self.repo(),
2479 2485 [self.p1().rev()],
2480 2486 scmutil.matchfiles(self.repo(), self._cache.keys()),
2481 2487 )
2482 2488
2483 2489 for path in self._cache.keys():
2484 2490 cache = self._cache[path]
2485 2491 try:
2486 2492 underlying = self._wrappedctx[path]
2487 2493 if (
2488 2494 underlying.data() == cache[b'data']
2489 2495 and underlying.flags() == cache[b'flags']
2490 2496 ):
2491 2497 keys.append(path)
2492 2498 except error.ManifestLookupError:
2493 2499 # Path not in the underlying manifest (created).
2494 2500 continue
2495 2501
2496 2502 for path in keys:
2497 2503 del self._cache[path]
2498 2504 return keys
2499 2505
2500 2506 def _markdirty(
2501 2507 self, path, exists, data=None, date=None, flags=b'', copied=None
2502 2508 ):
2503 2509 # data not provided, let's see if we already have some; if not, let's
2504 2510 # grab it from our underlying context, so that we always have data if
2505 2511 # the file is marked as existing.
2506 2512 if exists and data is None:
2507 2513 oldentry = self._cache.get(path) or {}
2508 2514 data = oldentry.get(b'data')
2509 2515 if data is None:
2510 2516 data = self._wrappedctx[path].data()
2511 2517
2512 2518 self._cache[path] = {
2513 2519 b'exists': exists,
2514 2520 b'data': data,
2515 2521 b'date': date,
2516 2522 b'flags': flags,
2517 2523 b'copied': copied,
2518 2524 }
2519 2525
2520 2526 def filectx(self, path, filelog=None):
2521 2527 return overlayworkingfilectx(
2522 2528 self._repo, path, parent=self, filelog=filelog
2523 2529 )
2524 2530
2525 2531
2526 2532 class overlayworkingfilectx(committablefilectx):
2527 2533 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2528 2534 cache, which can be flushed through later by calling ``flush()``."""
2529 2535
2530 2536 def __init__(self, repo, path, filelog=None, parent=None):
2531 2537 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2532 2538 self._repo = repo
2533 2539 self._parent = parent
2534 2540 self._path = path
2535 2541
2536 2542 def cmp(self, fctx):
2537 2543 return self.data() != fctx.data()
2538 2544
2539 2545 def changectx(self):
2540 2546 return self._parent
2541 2547
2542 2548 def data(self):
2543 2549 return self._parent.data(self._path)
2544 2550
2545 2551 def date(self):
2546 2552 return self._parent.filedate(self._path)
2547 2553
2548 2554 def exists(self):
2549 2555 return self.lexists()
2550 2556
2551 2557 def lexists(self):
2552 2558 return self._parent.exists(self._path)
2553 2559
2554 2560 def copysource(self):
2555 2561 return self._parent.copydata(self._path)
2556 2562
2557 2563 def size(self):
2558 2564 return self._parent.size(self._path)
2559 2565
2560 2566 def markcopied(self, origin):
2561 2567 self._parent.markcopied(self._path, origin)
2562 2568
2563 2569 def audit(self):
2564 2570 pass
2565 2571
2566 2572 def flags(self):
2567 2573 return self._parent.flags(self._path)
2568 2574
2569 2575 def setflags(self, islink, isexec):
2570 2576 return self._parent.setflags(self._path, islink, isexec)
2571 2577
2572 2578 def write(self, data, flags, backgroundclose=False, **kwargs):
2573 2579 return self._parent.write(self._path, data, flags, **kwargs)
2574 2580
2575 2581 def remove(self, ignoremissing=False):
2576 2582 return self._parent.remove(self._path)
2577 2583
2578 2584 def clearunknown(self):
2579 2585 pass
2580 2586
2581 2587
2582 2588 class workingcommitctx(workingctx):
2583 2589 """A workingcommitctx object makes access to data related to
2584 2590 the revision being committed convenient.
2585 2591
2586 2592 This hides changes in the working directory, if they aren't
2587 2593 committed in this context.
2588 2594 """
2589 2595
2590 2596 def __init__(
2591 2597 self, repo, changes, text=b"", user=None, date=None, extra=None
2592 2598 ):
2593 2599 super(workingcommitctx, self).__init__(
2594 2600 repo, text, user, date, extra, changes
2595 2601 )
2596 2602
2597 2603 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2598 2604 """Return matched files only in ``self._status``
2599 2605
2600 2606 Uncommitted files appear "clean" via this context, even if
2601 2607 they aren't actually so in the working directory.
2602 2608 """
2603 2609 if clean:
2604 2610 clean = [f for f in self._manifest if f not in self._changedset]
2605 2611 else:
2606 2612 clean = []
2607 2613 return scmutil.status(
2608 2614 [f for f in self._status.modified if match(f)],
2609 2615 [f for f in self._status.added if match(f)],
2610 2616 [f for f in self._status.removed if match(f)],
2611 2617 [],
2612 2618 [],
2613 2619 [],
2614 2620 clean,
2615 2621 )
2616 2622
2617 2623 @propertycache
2618 2624 def _changedset(self):
2619 2625 """Return the set of files changed in this context
2620 2626 """
2621 2627 changed = set(self._status.modified)
2622 2628 changed.update(self._status.added)
2623 2629 changed.update(self._status.removed)
2624 2630 return changed
2625 2631
2626 2632
2627 2633 def makecachingfilectxfn(func):
2628 2634 """Create a filectxfn that caches based on the path.
2629 2635
2630 2636 We can't use util.cachefunc because it uses all arguments as the cache
2631 2637 key and this creates a cycle since the arguments include the repo and
2632 2638 memctx.
2633 2639 """
2634 2640 cache = {}
2635 2641
2636 2642 def getfilectx(repo, memctx, path):
2637 2643 if path not in cache:
2638 2644 cache[path] = func(repo, memctx, path)
2639 2645 return cache[path]
2640 2646
2641 2647 return getfilectx
2642 2648
2643 2649
2644 2650 def memfilefromctx(ctx):
2645 2651 """Given a context return a memfilectx for ctx[path]
2646 2652
2647 2653 This is a convenience method for building a memctx based on another
2648 2654 context.
2649 2655 """
2650 2656
2651 2657 def getfilectx(repo, memctx, path):
2652 2658 fctx = ctx[path]
2653 2659 copysource = fctx.copysource()
2654 2660 return memfilectx(
2655 2661 repo,
2656 2662 memctx,
2657 2663 path,
2658 2664 fctx.data(),
2659 2665 islink=fctx.islink(),
2660 2666 isexec=fctx.isexec(),
2661 2667 copysource=copysource,
2662 2668 )
2663 2669
2664 2670 return getfilectx
2665 2671
2666 2672
2667 2673 def memfilefrompatch(patchstore):
2668 2674 """Given a patch (e.g. patchstore object) return a memfilectx
2669 2675
2670 2676 This is a convenience method for building a memctx based on a patchstore.
2671 2677 """
2672 2678
2673 2679 def getfilectx(repo, memctx, path):
2674 2680 data, mode, copysource = patchstore.getfile(path)
2675 2681 if data is None:
2676 2682 return None
2677 2683 islink, isexec = mode
2678 2684 return memfilectx(
2679 2685 repo,
2680 2686 memctx,
2681 2687 path,
2682 2688 data,
2683 2689 islink=islink,
2684 2690 isexec=isexec,
2685 2691 copysource=copysource,
2686 2692 )
2687 2693
2688 2694 return getfilectx
2689 2695
2690 2696
2691 2697 class memctx(committablectx):
2692 2698 """Use memctx to perform in-memory commits via localrepo.commitctx().
2693 2699
2694 2700 Revision information is supplied at initialization time while
2695 2701 related files data and is made available through a callback
2696 2702 mechanism. 'repo' is the current localrepo, 'parents' is a
2697 2703 sequence of two parent revisions identifiers (pass None for every
2698 2704 missing parent), 'text' is the commit message and 'files' lists
2699 2705 names of files touched by the revision (normalized and relative to
2700 2706 repository root).
2701 2707
2702 2708 filectxfn(repo, memctx, path) is a callable receiving the
2703 2709 repository, the current memctx object and the normalized path of
2704 2710 requested file, relative to repository root. It is fired by the
2705 2711 commit function for every file in 'files', but calls order is
2706 2712 undefined. If the file is available in the revision being
2707 2713 committed (updated or added), filectxfn returns a memfilectx
2708 2714 object. If the file was removed, filectxfn return None for recent
2709 2715 Mercurial. Moved files are represented by marking the source file
2710 2716 removed and the new file added with copy information (see
2711 2717 memfilectx).
2712 2718
2713 2719 user receives the committer name and defaults to current
2714 2720 repository username, date is the commit date in any format
2715 2721 supported by dateutil.parsedate() and defaults to current date, extra
2716 2722 is a dictionary of metadata or is left empty.
2717 2723 """
2718 2724
2719 2725 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2720 2726 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2721 2727 # this field to determine what to do in filectxfn.
2722 2728 _returnnoneformissingfiles = True
2723 2729
2724 2730 def __init__(
2725 2731 self,
2726 2732 repo,
2727 2733 parents,
2728 2734 text,
2729 2735 files,
2730 2736 filectxfn,
2731 2737 user=None,
2732 2738 date=None,
2733 2739 extra=None,
2734 2740 branch=None,
2735 2741 editor=False,
2736 2742 ):
2737 2743 super(memctx, self).__init__(
2738 2744 repo, text, user, date, extra, branch=branch
2739 2745 )
2740 2746 self._rev = None
2741 2747 self._node = None
2742 2748 parents = [(p or nullid) for p in parents]
2743 2749 p1, p2 = parents
2744 2750 self._parents = [self._repo[p] for p in (p1, p2)]
2745 2751 files = sorted(set(files))
2746 2752 self._files = files
2747 2753 self.substate = {}
2748 2754
2749 2755 if isinstance(filectxfn, patch.filestore):
2750 2756 filectxfn = memfilefrompatch(filectxfn)
2751 2757 elif not callable(filectxfn):
2752 2758 # if store is not callable, wrap it in a function
2753 2759 filectxfn = memfilefromctx(filectxfn)
2754 2760
2755 2761 # memoizing increases performance for e.g. vcs convert scenarios.
2756 2762 self._filectxfn = makecachingfilectxfn(filectxfn)
2757 2763
2758 2764 if editor:
2759 2765 self._text = editor(self._repo, self, [])
2760 2766 self._repo.savecommitmessage(self._text)
2761 2767
2762 2768 def filectx(self, path, filelog=None):
2763 2769 """get a file context from the working directory
2764 2770
2765 2771 Returns None if file doesn't exist and should be removed."""
2766 2772 return self._filectxfn(self._repo, self, path)
2767 2773
2768 2774 def commit(self):
2769 2775 """commit context to the repo"""
2770 2776 return self._repo.commitctx(self)
2771 2777
2772 2778 @propertycache
2773 2779 def _manifest(self):
2774 2780 """generate a manifest based on the return values of filectxfn"""
2775 2781
2776 2782 # keep this simple for now; just worry about p1
2777 2783 pctx = self._parents[0]
2778 2784 man = pctx.manifest().copy()
2779 2785
2780 2786 for f in self._status.modified:
2781 2787 man[f] = modifiednodeid
2782 2788
2783 2789 for f in self._status.added:
2784 2790 man[f] = addednodeid
2785 2791
2786 2792 for f in self._status.removed:
2787 2793 if f in man:
2788 2794 del man[f]
2789 2795
2790 2796 return man
2791 2797
2792 2798 @propertycache
2793 2799 def _status(self):
2794 2800 """Calculate exact status from ``files`` specified at construction
2795 2801 """
2796 2802 man1 = self.p1().manifest()
2797 2803 p2 = self._parents[1]
2798 2804 # "1 < len(self._parents)" can't be used for checking
2799 2805 # existence of the 2nd parent, because "memctx._parents" is
2800 2806 # explicitly initialized by the list, of which length is 2.
2801 2807 if p2.node() != nullid:
2802 2808 man2 = p2.manifest()
2803 2809 managing = lambda f: f in man1 or f in man2
2804 2810 else:
2805 2811 managing = lambda f: f in man1
2806 2812
2807 2813 modified, added, removed = [], [], []
2808 2814 for f in self._files:
2809 2815 if not managing(f):
2810 2816 added.append(f)
2811 2817 elif self[f]:
2812 2818 modified.append(f)
2813 2819 else:
2814 2820 removed.append(f)
2815 2821
2816 2822 return scmutil.status(modified, added, removed, [], [], [], [])
2817 2823
2818 2824
2819 2825 class memfilectx(committablefilectx):
2820 2826 """memfilectx represents an in-memory file to commit.
2821 2827
2822 2828 See memctx and committablefilectx for more details.
2823 2829 """
2824 2830
2825 2831 def __init__(
2826 2832 self,
2827 2833 repo,
2828 2834 changectx,
2829 2835 path,
2830 2836 data,
2831 2837 islink=False,
2832 2838 isexec=False,
2833 2839 copysource=None,
2834 2840 ):
2835 2841 """
2836 2842 path is the normalized file path relative to repository root.
2837 2843 data is the file content as a string.
2838 2844 islink is True if the file is a symbolic link.
2839 2845 isexec is True if the file is executable.
2840 2846 copied is the source file path if current file was copied in the
2841 2847 revision being committed, or None."""
2842 2848 super(memfilectx, self).__init__(repo, path, None, changectx)
2843 2849 self._data = data
2844 2850 if islink:
2845 2851 self._flags = b'l'
2846 2852 elif isexec:
2847 2853 self._flags = b'x'
2848 2854 else:
2849 2855 self._flags = b''
2850 2856 self._copysource = copysource
2851 2857
2852 2858 def copysource(self):
2853 2859 return self._copysource
2854 2860
2855 2861 def cmp(self, fctx):
2856 2862 return self.data() != fctx.data()
2857 2863
2858 2864 def data(self):
2859 2865 return self._data
2860 2866
2861 2867 def remove(self, ignoremissing=False):
2862 2868 """wraps unlink for a repo's working directory"""
2863 2869 # need to figure out what to do here
2864 2870 del self._changectx[self._path]
2865 2871
2866 2872 def write(self, data, flags, **kwargs):
2867 2873 """wraps repo.wwrite"""
2868 2874 self._data = data
2869 2875
2870 2876
2871 2877 class metadataonlyctx(committablectx):
2872 2878 """Like memctx but it's reusing the manifest of different commit.
2873 2879 Intended to be used by lightweight operations that are creating
2874 2880 metadata-only changes.
2875 2881
2876 2882 Revision information is supplied at initialization time. 'repo' is the
2877 2883 current localrepo, 'ctx' is original revision which manifest we're reuisng
2878 2884 'parents' is a sequence of two parent revisions identifiers (pass None for
2879 2885 every missing parent), 'text' is the commit.
2880 2886
2881 2887 user receives the committer name and defaults to current repository
2882 2888 username, date is the commit date in any format supported by
2883 2889 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2884 2890 metadata or is left empty.
2885 2891 """
2886 2892
2887 2893 def __init__(
2888 2894 self,
2889 2895 repo,
2890 2896 originalctx,
2891 2897 parents=None,
2892 2898 text=None,
2893 2899 user=None,
2894 2900 date=None,
2895 2901 extra=None,
2896 2902 editor=False,
2897 2903 ):
2898 2904 if text is None:
2899 2905 text = originalctx.description()
2900 2906 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2901 2907 self._rev = None
2902 2908 self._node = None
2903 2909 self._originalctx = originalctx
2904 2910 self._manifestnode = originalctx.manifestnode()
2905 2911 if parents is None:
2906 2912 parents = originalctx.parents()
2907 2913 else:
2908 2914 parents = [repo[p] for p in parents if p is not None]
2909 2915 parents = parents[:]
2910 2916 while len(parents) < 2:
2911 2917 parents.append(repo[nullid])
2912 2918 p1, p2 = self._parents = parents
2913 2919
2914 2920 # sanity check to ensure that the reused manifest parents are
2915 2921 # manifests of our commit parents
2916 2922 mp1, mp2 = self.manifestctx().parents
2917 2923 if p1 != nullid and p1.manifestnode() != mp1:
2918 2924 raise RuntimeError(
2919 2925 r"can't reuse the manifest: its p1 "
2920 2926 r"doesn't match the new ctx p1"
2921 2927 )
2922 2928 if p2 != nullid and p2.manifestnode() != mp2:
2923 2929 raise RuntimeError(
2924 2930 r"can't reuse the manifest: "
2925 2931 r"its p2 doesn't match the new ctx p2"
2926 2932 )
2927 2933
2928 2934 self._files = originalctx.files()
2929 2935 self.substate = {}
2930 2936
2931 2937 if editor:
2932 2938 self._text = editor(self._repo, self, [])
2933 2939 self._repo.savecommitmessage(self._text)
2934 2940
2935 2941 def manifestnode(self):
2936 2942 return self._manifestnode
2937 2943
2938 2944 @property
2939 2945 def _manifestctx(self):
2940 2946 return self._repo.manifestlog[self._manifestnode]
2941 2947
2942 2948 def filectx(self, path, filelog=None):
2943 2949 return self._originalctx.filectx(path, filelog=filelog)
2944 2950
2945 2951 def commit(self):
2946 2952 """commit context to the repo"""
2947 2953 return self._repo.commitctx(self)
2948 2954
2949 2955 @property
2950 2956 def _manifest(self):
2951 2957 return self._originalctx.manifest()
2952 2958
2953 2959 @propertycache
2954 2960 def _status(self):
2955 2961 """Calculate exact status from ``files`` specified in the ``origctx``
2956 2962 and parents manifests.
2957 2963 """
2958 2964 man1 = self.p1().manifest()
2959 2965 p2 = self._parents[1]
2960 2966 # "1 < len(self._parents)" can't be used for checking
2961 2967 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2962 2968 # explicitly initialized by the list, of which length is 2.
2963 2969 if p2.node() != nullid:
2964 2970 man2 = p2.manifest()
2965 2971 managing = lambda f: f in man1 or f in man2
2966 2972 else:
2967 2973 managing = lambda f: f in man1
2968 2974
2969 2975 modified, added, removed = [], [], []
2970 2976 for f in self._files:
2971 2977 if not managing(f):
2972 2978 added.append(f)
2973 2979 elif f in self:
2974 2980 modified.append(f)
2975 2981 else:
2976 2982 removed.append(f)
2977 2983
2978 2984 return scmutil.status(modified, added, removed, [], [], [], [])
2979 2985
2980 2986
2981 2987 class arbitraryfilectx(object):
2982 2988 """Allows you to use filectx-like functions on a file in an arbitrary
2983 2989 location on disk, possibly not in the working directory.
2984 2990 """
2985 2991
2986 2992 def __init__(self, path, repo=None):
2987 2993 # Repo is optional because contrib/simplemerge uses this class.
2988 2994 self._repo = repo
2989 2995 self._path = path
2990 2996
2991 2997 def cmp(self, fctx):
2992 2998 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2993 2999 # path if either side is a symlink.
2994 3000 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2995 3001 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2996 3002 # Add a fast-path for merge if both sides are disk-backed.
2997 3003 # Note that filecmp uses the opposite return values (True if same)
2998 3004 # from our cmp functions (True if different).
2999 3005 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3000 3006 return self.data() != fctx.data()
3001 3007
3002 3008 def path(self):
3003 3009 return self._path
3004 3010
3005 3011 def flags(self):
3006 3012 return b''
3007 3013
3008 3014 def data(self):
3009 3015 return util.readfile(self._path)
3010 3016
3011 3017 def decodeddata(self):
3012 3018 with open(self._path, b"rb") as f:
3013 3019 return f.read()
3014 3020
3015 3021 def remove(self):
3016 3022 util.unlink(self._path)
3017 3023
3018 3024 def write(self, data, flags, **kwargs):
3019 3025 assert not flags
3020 3026 with open(self._path, b"wb") as f:
3021 3027 f.write(data)
@@ -1,4285 +1,4285 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import codecs
11 11 import collections
12 12 import difflib
13 13 import errno
14 14 import operator
15 15 import os
16 16 import random
17 17 import re
18 18 import socket
19 19 import ssl
20 20 import stat
21 21 import string
22 22 import subprocess
23 23 import sys
24 24 import time
25 25
26 26 from .i18n import _
27 27 from .node import (
28 28 bin,
29 29 hex,
30 30 nullhex,
31 31 nullid,
32 32 nullrev,
33 33 short,
34 34 )
35 35 from .pycompat import (
36 36 getattr,
37 37 open,
38 38 )
39 39 from . import (
40 40 bundle2,
41 41 changegroup,
42 42 cmdutil,
43 43 color,
44 44 context,
45 45 copies,
46 46 dagparser,
47 47 encoding,
48 48 error,
49 49 exchange,
50 50 extensions,
51 51 filemerge,
52 52 filesetlang,
53 53 formatter,
54 54 hg,
55 55 httppeer,
56 56 localrepo,
57 57 lock as lockmod,
58 58 logcmdutil,
59 59 merge as mergemod,
60 60 obsolete,
61 61 obsutil,
62 62 pathutil,
63 63 phases,
64 64 policy,
65 65 pvec,
66 66 pycompat,
67 67 registrar,
68 68 repair,
69 69 revlog,
70 70 revset,
71 71 revsetlang,
72 72 scmutil,
73 73 setdiscovery,
74 74 simplemerge,
75 75 sshpeer,
76 76 sslutil,
77 77 streamclone,
78 78 templater,
79 79 treediscovery,
80 80 upgrade,
81 81 url as urlmod,
82 82 util,
83 83 vfs as vfsmod,
84 84 wireprotoframing,
85 85 wireprotoserver,
86 86 wireprotov2peer,
87 87 )
88 88 from .utils import (
89 89 cborutil,
90 90 compression,
91 91 dateutil,
92 92 procutil,
93 93 stringutil,
94 94 )
95 95
96 96 from .revlogutils import deltas as deltautil
97 97
98 98 release = lockmod.release
99 99
100 100 command = registrar.command()
101 101
102 102
103 103 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
104 104 def debugancestor(ui, repo, *args):
105 105 """find the ancestor revision of two revisions in a given index"""
106 106 if len(args) == 3:
107 107 index, rev1, rev2 = args
108 108 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
109 109 lookup = r.lookup
110 110 elif len(args) == 2:
111 111 if not repo:
112 112 raise error.Abort(
113 113 _(b'there is no Mercurial repository here (.hg not found)')
114 114 )
115 115 rev1, rev2 = args
116 116 r = repo.changelog
117 117 lookup = repo.lookup
118 118 else:
119 119 raise error.Abort(_(b'either two or three arguments required'))
120 120 a = r.ancestor(lookup(rev1), lookup(rev2))
121 121 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
122 122
123 123
124 124 @command(b'debugapplystreamclonebundle', [], b'FILE')
125 125 def debugapplystreamclonebundle(ui, repo, fname):
126 126 """apply a stream clone bundle file"""
127 127 f = hg.openpath(ui, fname)
128 128 gen = exchange.readbundle(ui, f, fname)
129 129 gen.apply(repo)
130 130
131 131
132 132 @command(
133 133 b'debugbuilddag',
134 134 [
135 135 (
136 136 b'm',
137 137 b'mergeable-file',
138 138 None,
139 139 _(b'add single file mergeable changes'),
140 140 ),
141 141 (
142 142 b'o',
143 143 b'overwritten-file',
144 144 None,
145 145 _(b'add single file all revs overwrite'),
146 146 ),
147 147 (b'n', b'new-file', None, _(b'add new file at each rev')),
148 148 ],
149 149 _(b'[OPTION]... [TEXT]'),
150 150 )
151 151 def debugbuilddag(
152 152 ui,
153 153 repo,
154 154 text=None,
155 155 mergeable_file=False,
156 156 overwritten_file=False,
157 157 new_file=False,
158 158 ):
159 159 """builds a repo with a given DAG from scratch in the current empty repo
160 160
161 161 The description of the DAG is read from stdin if not given on the
162 162 command line.
163 163
164 164 Elements:
165 165
166 166 - "+n" is a linear run of n nodes based on the current default parent
167 167 - "." is a single node based on the current default parent
168 168 - "$" resets the default parent to null (implied at the start);
169 169 otherwise the default parent is always the last node created
170 170 - "<p" sets the default parent to the backref p
171 171 - "*p" is a fork at parent p, which is a backref
172 172 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
173 173 - "/p2" is a merge of the preceding node and p2
174 174 - ":tag" defines a local tag for the preceding node
175 175 - "@branch" sets the named branch for subsequent nodes
176 176 - "#...\\n" is a comment up to the end of the line
177 177
178 178 Whitespace between the above elements is ignored.
179 179
180 180 A backref is either
181 181
182 182 - a number n, which references the node curr-n, where curr is the current
183 183 node, or
184 184 - the name of a local tag you placed earlier using ":tag", or
185 185 - empty to denote the default parent.
186 186
187 187 All string valued-elements are either strictly alphanumeric, or must
188 188 be enclosed in double quotes ("..."), with "\\" as escape character.
189 189 """
190 190
191 191 if text is None:
192 192 ui.status(_(b"reading DAG from stdin\n"))
193 193 text = ui.fin.read()
194 194
195 195 cl = repo.changelog
196 196 if len(cl) > 0:
197 197 raise error.Abort(_(b'repository is not empty'))
198 198
199 199 # determine number of revs in DAG
200 200 total = 0
201 201 for type, data in dagparser.parsedag(text):
202 202 if type == b'n':
203 203 total += 1
204 204
205 205 if mergeable_file:
206 206 linesperrev = 2
207 207 # make a file with k lines per rev
208 208 initialmergedlines = [
209 209 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
210 210 ]
211 211 initialmergedlines.append(b"")
212 212
213 213 tags = []
214 214 progress = ui.makeprogress(
215 215 _(b'building'), unit=_(b'revisions'), total=total
216 216 )
217 217 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
218 218 at = -1
219 219 atbranch = b'default'
220 220 nodeids = []
221 221 id = 0
222 222 progress.update(id)
223 223 for type, data in dagparser.parsedag(text):
224 224 if type == b'n':
225 225 ui.note((b'node %s\n' % pycompat.bytestr(data)))
226 226 id, ps = data
227 227
228 228 files = []
229 229 filecontent = {}
230 230
231 231 p2 = None
232 232 if mergeable_file:
233 233 fn = b"mf"
234 234 p1 = repo[ps[0]]
235 235 if len(ps) > 1:
236 236 p2 = repo[ps[1]]
237 237 pa = p1.ancestor(p2)
238 238 base, local, other = [
239 239 x[fn].data() for x in (pa, p1, p2)
240 240 ]
241 241 m3 = simplemerge.Merge3Text(base, local, other)
242 242 ml = [l.strip() for l in m3.merge_lines()]
243 243 ml.append(b"")
244 244 elif at > 0:
245 245 ml = p1[fn].data().split(b"\n")
246 246 else:
247 247 ml = initialmergedlines
248 248 ml[id * linesperrev] += b" r%i" % id
249 249 mergedtext = b"\n".join(ml)
250 250 files.append(fn)
251 251 filecontent[fn] = mergedtext
252 252
253 253 if overwritten_file:
254 254 fn = b"of"
255 255 files.append(fn)
256 256 filecontent[fn] = b"r%i\n" % id
257 257
258 258 if new_file:
259 259 fn = b"nf%i" % id
260 260 files.append(fn)
261 261 filecontent[fn] = b"r%i\n" % id
262 262 if len(ps) > 1:
263 263 if not p2:
264 264 p2 = repo[ps[1]]
265 265 for fn in p2:
266 266 if fn.startswith(b"nf"):
267 267 files.append(fn)
268 268 filecontent[fn] = p2[fn].data()
269 269
270 270 def fctxfn(repo, cx, path):
271 271 if path in filecontent:
272 272 return context.memfilectx(
273 273 repo, cx, path, filecontent[path]
274 274 )
275 275 return None
276 276
277 277 if len(ps) == 0 or ps[0] < 0:
278 278 pars = [None, None]
279 279 elif len(ps) == 1:
280 280 pars = [nodeids[ps[0]], None]
281 281 else:
282 282 pars = [nodeids[p] for p in ps]
283 283 cx = context.memctx(
284 284 repo,
285 285 pars,
286 286 b"r%i" % id,
287 287 files,
288 288 fctxfn,
289 289 date=(id, 0),
290 290 user=b"debugbuilddag",
291 291 extra={b'branch': atbranch},
292 292 )
293 293 nodeid = repo.commitctx(cx)
294 294 nodeids.append(nodeid)
295 295 at = id
296 296 elif type == b'l':
297 297 id, name = data
298 298 ui.note((b'tag %s\n' % name))
299 299 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
300 300 elif type == b'a':
301 301 ui.note((b'branch %s\n' % data))
302 302 atbranch = data
303 303 progress.update(id)
304 304
305 305 if tags:
306 306 repo.vfs.write(b"localtags", b"".join(tags))
307 307
308 308
309 309 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
310 310 indent_string = b' ' * indent
311 311 if all:
312 312 ui.writenoi18n(
313 313 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
314 314 % indent_string
315 315 )
316 316
317 317 def showchunks(named):
318 318 ui.write(b"\n%s%s\n" % (indent_string, named))
319 319 for deltadata in gen.deltaiter():
320 320 node, p1, p2, cs, deltabase, delta, flags = deltadata
321 321 ui.write(
322 322 b"%s%s %s %s %s %s %d\n"
323 323 % (
324 324 indent_string,
325 325 hex(node),
326 326 hex(p1),
327 327 hex(p2),
328 328 hex(cs),
329 329 hex(deltabase),
330 330 len(delta),
331 331 )
332 332 )
333 333
334 334 gen.changelogheader()
335 335 showchunks(b"changelog")
336 336 gen.manifestheader()
337 337 showchunks(b"manifest")
338 338 for chunkdata in iter(gen.filelogheader, {}):
339 339 fname = chunkdata[b'filename']
340 340 showchunks(fname)
341 341 else:
342 342 if isinstance(gen, bundle2.unbundle20):
343 343 raise error.Abort(_(b'use debugbundle2 for this file'))
344 344 gen.changelogheader()
345 345 for deltadata in gen.deltaiter():
346 346 node, p1, p2, cs, deltabase, delta, flags = deltadata
347 347 ui.write(b"%s%s\n" % (indent_string, hex(node)))
348 348
349 349
350 350 def _debugobsmarkers(ui, part, indent=0, **opts):
351 351 """display version and markers contained in 'data'"""
352 352 opts = pycompat.byteskwargs(opts)
353 353 data = part.read()
354 354 indent_string = b' ' * indent
355 355 try:
356 356 version, markers = obsolete._readmarkers(data)
357 357 except error.UnknownVersion as exc:
358 358 msg = b"%sunsupported version: %s (%d bytes)\n"
359 359 msg %= indent_string, exc.version, len(data)
360 360 ui.write(msg)
361 361 else:
362 362 msg = b"%sversion: %d (%d bytes)\n"
363 363 msg %= indent_string, version, len(data)
364 364 ui.write(msg)
365 365 fm = ui.formatter(b'debugobsolete', opts)
366 366 for rawmarker in sorted(markers):
367 367 m = obsutil.marker(None, rawmarker)
368 368 fm.startitem()
369 369 fm.plain(indent_string)
370 370 cmdutil.showmarker(fm, m)
371 371 fm.end()
372 372
373 373
374 374 def _debugphaseheads(ui, data, indent=0):
375 375 """display version and markers contained in 'data'"""
376 376 indent_string = b' ' * indent
377 377 headsbyphase = phases.binarydecode(data)
378 378 for phase in phases.allphases:
379 379 for head in headsbyphase[phase]:
380 380 ui.write(indent_string)
381 381 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
382 382
383 383
384 384 def _quasirepr(thing):
385 385 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
386 386 return b'{%s}' % (
387 387 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
388 388 )
389 389 return pycompat.bytestr(repr(thing))
390 390
391 391
392 392 def _debugbundle2(ui, gen, all=None, **opts):
393 393 """lists the contents of a bundle2"""
394 394 if not isinstance(gen, bundle2.unbundle20):
395 395 raise error.Abort(_(b'not a bundle2 file'))
396 396 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
397 397 parttypes = opts.get('part_type', [])
398 398 for part in gen.iterparts():
399 399 if parttypes and part.type not in parttypes:
400 400 continue
401 401 msg = b'%s -- %s (mandatory: %r)\n'
402 402 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
403 403 if part.type == b'changegroup':
404 404 version = part.params.get(b'version', b'01')
405 405 cg = changegroup.getunbundler(version, part, b'UN')
406 406 if not ui.quiet:
407 407 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
408 408 if part.type == b'obsmarkers':
409 409 if not ui.quiet:
410 410 _debugobsmarkers(ui, part, indent=4, **opts)
411 411 if part.type == b'phase-heads':
412 412 if not ui.quiet:
413 413 _debugphaseheads(ui, part, indent=4)
414 414
415 415
416 416 @command(
417 417 b'debugbundle',
418 418 [
419 419 (b'a', b'all', None, _(b'show all details')),
420 420 (b'', b'part-type', [], _(b'show only the named part type')),
421 421 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
422 422 ],
423 423 _(b'FILE'),
424 424 norepo=True,
425 425 )
426 426 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
427 427 """lists the contents of a bundle"""
428 428 with hg.openpath(ui, bundlepath) as f:
429 429 if spec:
430 430 spec = exchange.getbundlespec(ui, f)
431 431 ui.write(b'%s\n' % spec)
432 432 return
433 433
434 434 gen = exchange.readbundle(ui, f, bundlepath)
435 435 if isinstance(gen, bundle2.unbundle20):
436 436 return _debugbundle2(ui, gen, all=all, **opts)
437 437 _debugchangegroup(ui, gen, all=all, **opts)
438 438
439 439
440 440 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
441 441 def debugcapabilities(ui, path, **opts):
442 442 """lists the capabilities of a remote peer"""
443 443 opts = pycompat.byteskwargs(opts)
444 444 peer = hg.peer(ui, opts, path)
445 445 caps = peer.capabilities()
446 446 ui.writenoi18n(b'Main capabilities:\n')
447 447 for c in sorted(caps):
448 448 ui.write(b' %s\n' % c)
449 449 b2caps = bundle2.bundle2caps(peer)
450 450 if b2caps:
451 451 ui.writenoi18n(b'Bundle2 capabilities:\n')
452 452 for key, values in sorted(pycompat.iteritems(b2caps)):
453 453 ui.write(b' %s\n' % key)
454 454 for v in values:
455 455 ui.write(b' %s\n' % v)
456 456
457 457
458 458 @command(b'debugcheckstate', [], b'')
459 459 def debugcheckstate(ui, repo):
460 460 """validate the correctness of the current dirstate"""
461 461 parent1, parent2 = repo.dirstate.parents()
462 462 m1 = repo[parent1].manifest()
463 463 m2 = repo[parent2].manifest()
464 464 errors = 0
465 465 for f in repo.dirstate:
466 466 state = repo.dirstate[f]
467 467 if state in b"nr" and f not in m1:
468 468 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
469 469 errors += 1
470 470 if state in b"a" and f in m1:
471 471 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
472 472 errors += 1
473 473 if state in b"m" and f not in m1 and f not in m2:
474 474 ui.warn(
475 475 _(b"%s in state %s, but not in either manifest\n") % (f, state)
476 476 )
477 477 errors += 1
478 478 for f in m1:
479 479 state = repo.dirstate[f]
480 480 if state not in b"nrm":
481 481 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
482 482 errors += 1
483 483 if errors:
484 484 errstr = _(b".hg/dirstate inconsistent with current parent's manifest")
485 485 raise error.Abort(errstr)
486 486
487 487
488 488 @command(
489 489 b'debugcolor',
490 490 [(b'', b'style', None, _(b'show all configured styles'))],
491 491 b'hg debugcolor',
492 492 )
493 493 def debugcolor(ui, repo, **opts):
494 494 """show available color, effects or style"""
495 495 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
496 496 if opts.get('style'):
497 497 return _debugdisplaystyle(ui)
498 498 else:
499 499 return _debugdisplaycolor(ui)
500 500
501 501
502 502 def _debugdisplaycolor(ui):
503 503 ui = ui.copy()
504 504 ui._styles.clear()
505 505 for effect in color._activeeffects(ui).keys():
506 506 ui._styles[effect] = effect
507 507 if ui._terminfoparams:
508 508 for k, v in ui.configitems(b'color'):
509 509 if k.startswith(b'color.'):
510 510 ui._styles[k] = k[6:]
511 511 elif k.startswith(b'terminfo.'):
512 512 ui._styles[k] = k[9:]
513 513 ui.write(_(b'available colors:\n'))
514 514 # sort label with a '_' after the other to group '_background' entry.
515 515 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
516 516 for colorname, label in items:
517 517 ui.write(b'%s\n' % colorname, label=label)
518 518
519 519
520 520 def _debugdisplaystyle(ui):
521 521 ui.write(_(b'available style:\n'))
522 522 if not ui._styles:
523 523 return
524 524 width = max(len(s) for s in ui._styles)
525 525 for label, effects in sorted(ui._styles.items()):
526 526 ui.write(b'%s' % label, label=label)
527 527 if effects:
528 528 # 50
529 529 ui.write(b': ')
530 530 ui.write(b' ' * (max(0, width - len(label))))
531 531 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
532 532 ui.write(b'\n')
533 533
534 534
535 535 @command(b'debugcreatestreamclonebundle', [], b'FILE')
536 536 def debugcreatestreamclonebundle(ui, repo, fname):
537 537 """create a stream clone bundle file
538 538
539 539 Stream bundles are special bundles that are essentially archives of
540 540 revlog files. They are commonly used for cloning very quickly.
541 541 """
542 542 # TODO we may want to turn this into an abort when this functionality
543 543 # is moved into `hg bundle`.
544 544 if phases.hassecret(repo):
545 545 ui.warn(
546 546 _(
547 547 b'(warning: stream clone bundle will contain secret '
548 548 b'revisions)\n'
549 549 )
550 550 )
551 551
552 552 requirements, gen = streamclone.generatebundlev1(repo)
553 553 changegroup.writechunks(ui, gen, fname)
554 554
555 555 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
556 556
557 557
558 558 @command(
559 559 b'debugdag',
560 560 [
561 561 (b't', b'tags', None, _(b'use tags as labels')),
562 562 (b'b', b'branches', None, _(b'annotate with branch names')),
563 563 (b'', b'dots', None, _(b'use dots for runs')),
564 564 (b's', b'spaces', None, _(b'separate elements by spaces')),
565 565 ],
566 566 _(b'[OPTION]... [FILE [REV]...]'),
567 567 optionalrepo=True,
568 568 )
569 569 def debugdag(ui, repo, file_=None, *revs, **opts):
570 570 """format the changelog or an index DAG as a concise textual description
571 571
572 572 If you pass a revlog index, the revlog's DAG is emitted. If you list
573 573 revision numbers, they get labeled in the output as rN.
574 574
575 575 Otherwise, the changelog DAG of the current repo is emitted.
576 576 """
577 577 spaces = opts.get('spaces')
578 578 dots = opts.get('dots')
579 579 if file_:
580 580 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
581 581 revs = set((int(r) for r in revs))
582 582
583 583 def events():
584 584 for r in rlog:
585 585 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
586 586 if r in revs:
587 587 yield b'l', (r, b"r%i" % r)
588 588
589 589 elif repo:
590 590 cl = repo.changelog
591 591 tags = opts.get('tags')
592 592 branches = opts.get('branches')
593 593 if tags:
594 594 labels = {}
595 595 for l, n in repo.tags().items():
596 596 labels.setdefault(cl.rev(n), []).append(l)
597 597
598 598 def events():
599 599 b = b"default"
600 600 for r in cl:
601 601 if branches:
602 602 newb = cl.read(cl.node(r))[5][b'branch']
603 603 if newb != b:
604 604 yield b'a', newb
605 605 b = newb
606 606 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
607 607 if tags:
608 608 ls = labels.get(r)
609 609 if ls:
610 610 for l in ls:
611 611 yield b'l', (r, l)
612 612
613 613 else:
614 614 raise error.Abort(_(b'need repo for changelog dag'))
615 615
616 616 for line in dagparser.dagtextlines(
617 617 events(),
618 618 addspaces=spaces,
619 619 wraplabels=True,
620 620 wrapannotations=True,
621 621 wrapnonlinear=dots,
622 622 usedots=dots,
623 623 maxlinewidth=70,
624 624 ):
625 625 ui.write(line)
626 626 ui.write(b"\n")
627 627
628 628
629 629 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
630 630 def debugdata(ui, repo, file_, rev=None, **opts):
631 631 """dump the contents of a data file revision"""
632 632 opts = pycompat.byteskwargs(opts)
633 633 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
634 634 if rev is not None:
635 635 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
636 636 file_, rev = None, file_
637 637 elif rev is None:
638 638 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
639 639 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
640 640 try:
641 641 ui.write(r.rawdata(r.lookup(rev)))
642 642 except KeyError:
643 643 raise error.Abort(_(b'invalid revision identifier %s') % rev)
644 644
645 645
646 646 @command(
647 647 b'debugdate',
648 648 [(b'e', b'extended', None, _(b'try extended date formats'))],
649 649 _(b'[-e] DATE [RANGE]'),
650 650 norepo=True,
651 651 optionalrepo=True,
652 652 )
653 653 def debugdate(ui, date, range=None, **opts):
654 654 """parse and display a date"""
655 655 if opts["extended"]:
656 656 d = dateutil.parsedate(date, dateutil.extendeddateformats)
657 657 else:
658 658 d = dateutil.parsedate(date)
659 659 ui.writenoi18n(b"internal: %d %d\n" % d)
660 660 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
661 661 if range:
662 662 m = dateutil.matchdate(range)
663 663 ui.writenoi18n(b"match: %s\n" % m(d[0]))
664 664
665 665
666 666 @command(
667 667 b'debugdeltachain',
668 668 cmdutil.debugrevlogopts + cmdutil.formatteropts,
669 669 _(b'-c|-m|FILE'),
670 670 optionalrepo=True,
671 671 )
672 672 def debugdeltachain(ui, repo, file_=None, **opts):
673 673 """dump information about delta chains in a revlog
674 674
675 675 Output can be templatized. Available template keywords are:
676 676
677 677 :``rev``: revision number
678 678 :``chainid``: delta chain identifier (numbered by unique base)
679 679 :``chainlen``: delta chain length to this revision
680 680 :``prevrev``: previous revision in delta chain
681 681 :``deltatype``: role of delta / how it was computed
682 682 :``compsize``: compressed size of revision
683 683 :``uncompsize``: uncompressed size of revision
684 684 :``chainsize``: total size of compressed revisions in chain
685 685 :``chainratio``: total chain size divided by uncompressed revision size
686 686 (new delta chains typically start at ratio 2.00)
687 687 :``lindist``: linear distance from base revision in delta chain to end
688 688 of this revision
689 689 :``extradist``: total size of revisions not part of this delta chain from
690 690 base of delta chain to end of this revision; a measurement
691 691 of how much extra data we need to read/seek across to read
692 692 the delta chain for this revision
693 693 :``extraratio``: extradist divided by chainsize; another representation of
694 694 how much unrelated data is needed to load this delta chain
695 695
696 696 If the repository is configured to use the sparse read, additional keywords
697 697 are available:
698 698
699 699 :``readsize``: total size of data read from the disk for a revision
700 700 (sum of the sizes of all the blocks)
701 701 :``largestblock``: size of the largest block of data read from the disk
702 702 :``readdensity``: density of useful bytes in the data read from the disk
703 703 :``srchunks``: in how many data hunks the whole revision would be read
704 704
705 705 The sparse read can be enabled with experimental.sparse-read = True
706 706 """
707 707 opts = pycompat.byteskwargs(opts)
708 708 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
709 709 index = r.index
710 710 start = r.start
711 711 length = r.length
712 712 generaldelta = r.version & revlog.FLAG_GENERALDELTA
713 713 withsparseread = getattr(r, '_withsparseread', False)
714 714
715 715 def revinfo(rev):
716 716 e = index[rev]
717 717 compsize = e[1]
718 718 uncompsize = e[2]
719 719 chainsize = 0
720 720
721 721 if generaldelta:
722 722 if e[3] == e[5]:
723 723 deltatype = b'p1'
724 724 elif e[3] == e[6]:
725 725 deltatype = b'p2'
726 726 elif e[3] == rev - 1:
727 727 deltatype = b'prev'
728 728 elif e[3] == rev:
729 729 deltatype = b'base'
730 730 else:
731 731 deltatype = b'other'
732 732 else:
733 733 if e[3] == rev:
734 734 deltatype = b'base'
735 735 else:
736 736 deltatype = b'prev'
737 737
738 738 chain = r._deltachain(rev)[0]
739 739 for iterrev in chain:
740 740 e = index[iterrev]
741 741 chainsize += e[1]
742 742
743 743 return compsize, uncompsize, deltatype, chain, chainsize
744 744
745 745 fm = ui.formatter(b'debugdeltachain', opts)
746 746
747 747 fm.plain(
748 748 b' rev chain# chainlen prev delta '
749 749 b'size rawsize chainsize ratio lindist extradist '
750 750 b'extraratio'
751 751 )
752 752 if withsparseread:
753 753 fm.plain(b' readsize largestblk rddensity srchunks')
754 754 fm.plain(b'\n')
755 755
756 756 chainbases = {}
757 757 for rev in r:
758 758 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
759 759 chainbase = chain[0]
760 760 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
761 761 basestart = start(chainbase)
762 762 revstart = start(rev)
763 763 lineardist = revstart + comp - basestart
764 764 extradist = lineardist - chainsize
765 765 try:
766 766 prevrev = chain[-2]
767 767 except IndexError:
768 768 prevrev = -1
769 769
770 770 if uncomp != 0:
771 771 chainratio = float(chainsize) / float(uncomp)
772 772 else:
773 773 chainratio = chainsize
774 774
775 775 if chainsize != 0:
776 776 extraratio = float(extradist) / float(chainsize)
777 777 else:
778 778 extraratio = extradist
779 779
780 780 fm.startitem()
781 781 fm.write(
782 782 b'rev chainid chainlen prevrev deltatype compsize '
783 783 b'uncompsize chainsize chainratio lindist extradist '
784 784 b'extraratio',
785 785 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
786 786 rev,
787 787 chainid,
788 788 len(chain),
789 789 prevrev,
790 790 deltatype,
791 791 comp,
792 792 uncomp,
793 793 chainsize,
794 794 chainratio,
795 795 lineardist,
796 796 extradist,
797 797 extraratio,
798 798 rev=rev,
799 799 chainid=chainid,
800 800 chainlen=len(chain),
801 801 prevrev=prevrev,
802 802 deltatype=deltatype,
803 803 compsize=comp,
804 804 uncompsize=uncomp,
805 805 chainsize=chainsize,
806 806 chainratio=chainratio,
807 807 lindist=lineardist,
808 808 extradist=extradist,
809 809 extraratio=extraratio,
810 810 )
811 811 if withsparseread:
812 812 readsize = 0
813 813 largestblock = 0
814 814 srchunks = 0
815 815
816 816 for revschunk in deltautil.slicechunk(r, chain):
817 817 srchunks += 1
818 818 blkend = start(revschunk[-1]) + length(revschunk[-1])
819 819 blksize = blkend - start(revschunk[0])
820 820
821 821 readsize += blksize
822 822 if largestblock < blksize:
823 823 largestblock = blksize
824 824
825 825 if readsize:
826 826 readdensity = float(chainsize) / float(readsize)
827 827 else:
828 828 readdensity = 1
829 829
830 830 fm.write(
831 831 b'readsize largestblock readdensity srchunks',
832 832 b' %10d %10d %9.5f %8d',
833 833 readsize,
834 834 largestblock,
835 835 readdensity,
836 836 srchunks,
837 837 readsize=readsize,
838 838 largestblock=largestblock,
839 839 readdensity=readdensity,
840 840 srchunks=srchunks,
841 841 )
842 842
843 843 fm.plain(b'\n')
844 844
845 845 fm.end()
846 846
847 847
848 848 @command(
849 849 b'debugdirstate|debugstate',
850 850 [
851 851 (
852 852 b'',
853 853 b'nodates',
854 854 None,
855 855 _(b'do not display the saved mtime (DEPRECATED)'),
856 856 ),
857 857 (b'', b'dates', True, _(b'display the saved mtime')),
858 858 (b'', b'datesort', None, _(b'sort by saved mtime')),
859 859 ],
860 860 _(b'[OPTION]...'),
861 861 )
862 862 def debugstate(ui, repo, **opts):
863 863 """show the contents of the current dirstate"""
864 864
865 865 nodates = not opts['dates']
866 866 if opts.get('nodates') is not None:
867 867 nodates = True
868 868 datesort = opts.get('datesort')
869 869
870 870 if datesort:
871 871 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
872 872 else:
873 873 keyfunc = None # sort by filename
874 874 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
875 875 if ent[3] == -1:
876 876 timestr = b'unset '
877 877 elif nodates:
878 878 timestr = b'set '
879 879 else:
880 880 timestr = time.strftime(
881 881 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
882 882 )
883 883 timestr = encoding.strtolocal(timestr)
884 884 if ent[1] & 0o20000:
885 885 mode = b'lnk'
886 886 else:
887 887 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
888 888 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
889 889 for f in repo.dirstate.copies():
890 890 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
891 891
892 892
893 893 @command(
894 894 b'debugdiscovery',
895 895 [
896 896 (b'', b'old', None, _(b'use old-style discovery')),
897 897 (
898 898 b'',
899 899 b'nonheads',
900 900 None,
901 901 _(b'use old-style discovery with non-heads included'),
902 902 ),
903 903 (b'', b'rev', [], b'restrict discovery to this set of revs'),
904 904 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
905 905 ]
906 906 + cmdutil.remoteopts,
907 907 _(b'[--rev REV] [OTHER]'),
908 908 )
909 909 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
910 910 """runs the changeset discovery protocol in isolation"""
911 911 opts = pycompat.byteskwargs(opts)
912 912 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
913 913 remote = hg.peer(repo, opts, remoteurl)
914 914 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
915 915
916 916 # make sure tests are repeatable
917 917 random.seed(int(opts[b'seed']))
918 918
919 919 if opts.get(b'old'):
920 920
921 921 def doit(pushedrevs, remoteheads, remote=remote):
922 922 if not util.safehasattr(remote, b'branches'):
923 923 # enable in-client legacy support
924 924 remote = localrepo.locallegacypeer(remote.local())
925 925 common, _in, hds = treediscovery.findcommonincoming(
926 926 repo, remote, force=True
927 927 )
928 928 common = set(common)
929 929 if not opts.get(b'nonheads'):
930 930 ui.writenoi18n(
931 931 b"unpruned common: %s\n"
932 932 % b" ".join(sorted(short(n) for n in common))
933 933 )
934 934
935 935 clnode = repo.changelog.node
936 936 common = repo.revs(b'heads(::%ln)', common)
937 937 common = {clnode(r) for r in common}
938 938 return common, hds
939 939
940 940 else:
941 941
942 942 def doit(pushedrevs, remoteheads, remote=remote):
943 943 nodes = None
944 944 if pushedrevs:
945 945 revs = scmutil.revrange(repo, pushedrevs)
946 946 nodes = [repo[r].node() for r in revs]
947 947 common, any, hds = setdiscovery.findcommonheads(
948 948 ui, repo, remote, ancestorsof=nodes
949 949 )
950 950 return common, hds
951 951
952 952 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
953 953 localrevs = opts[b'rev']
954 954 with util.timedcm('debug-discovery') as t:
955 955 common, hds = doit(localrevs, remoterevs)
956 956
957 957 # compute all statistics
958 958 common = set(common)
959 959 rheads = set(hds)
960 960 lheads = set(repo.heads())
961 961
962 962 data = {}
963 963 data[b'elapsed'] = t.elapsed
964 964 data[b'nb-common'] = len(common)
965 965 data[b'nb-common-local'] = len(common & lheads)
966 966 data[b'nb-common-remote'] = len(common & rheads)
967 967 data[b'nb-common-both'] = len(common & rheads & lheads)
968 968 data[b'nb-local'] = len(lheads)
969 969 data[b'nb-local-missing'] = data[b'nb-local'] - data[b'nb-common-local']
970 970 data[b'nb-remote'] = len(rheads)
971 971 data[b'nb-remote-unknown'] = data[b'nb-remote'] - data[b'nb-common-remote']
972 972 data[b'nb-revs'] = len(repo.revs(b'all()'))
973 973 data[b'nb-revs-common'] = len(repo.revs(b'::%ln', common))
974 974 data[b'nb-revs-missing'] = data[b'nb-revs'] - data[b'nb-revs-common']
975 975
976 976 # display discovery summary
977 977 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
978 978 ui.writenoi18n(b"heads summary:\n")
979 979 ui.writenoi18n(b" total common heads: %(nb-common)9d\n" % data)
980 980 ui.writenoi18n(b" also local heads: %(nb-common-local)9d\n" % data)
981 981 ui.writenoi18n(b" also remote heads: %(nb-common-remote)9d\n" % data)
982 982 ui.writenoi18n(b" both: %(nb-common-both)9d\n" % data)
983 983 ui.writenoi18n(b" local heads: %(nb-local)9d\n" % data)
984 984 ui.writenoi18n(b" common: %(nb-common-local)9d\n" % data)
985 985 ui.writenoi18n(b" missing: %(nb-local-missing)9d\n" % data)
986 986 ui.writenoi18n(b" remote heads: %(nb-remote)9d\n" % data)
987 987 ui.writenoi18n(b" common: %(nb-common-remote)9d\n" % data)
988 988 ui.writenoi18n(b" unknown: %(nb-remote-unknown)9d\n" % data)
989 989 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
990 990 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
991 991 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
992 992
993 993 if ui.verbose:
994 994 ui.writenoi18n(
995 995 b"common heads: %s\n" % b" ".join(sorted(short(n) for n in common))
996 996 )
997 997
998 998
999 999 _chunksize = 4 << 10
1000 1000
1001 1001
1002 1002 @command(
1003 1003 b'debugdownload', [(b'o', b'output', b'', _(b'path')),], optionalrepo=True
1004 1004 )
1005 1005 def debugdownload(ui, repo, url, output=None, **opts):
1006 1006 """download a resource using Mercurial logic and config
1007 1007 """
1008 1008 fh = urlmod.open(ui, url, output)
1009 1009
1010 1010 dest = ui
1011 1011 if output:
1012 1012 dest = open(output, b"wb", _chunksize)
1013 1013 try:
1014 1014 data = fh.read(_chunksize)
1015 1015 while data:
1016 1016 dest.write(data)
1017 1017 data = fh.read(_chunksize)
1018 1018 finally:
1019 1019 if output:
1020 1020 dest.close()
1021 1021
1022 1022
1023 1023 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1024 1024 def debugextensions(ui, repo, **opts):
1025 1025 '''show information about active extensions'''
1026 1026 opts = pycompat.byteskwargs(opts)
1027 1027 exts = extensions.extensions(ui)
1028 1028 hgver = util.version()
1029 1029 fm = ui.formatter(b'debugextensions', opts)
1030 1030 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1031 1031 isinternal = extensions.ismoduleinternal(extmod)
1032 1032 extsource = None
1033 1033
1034 1034 if util.safehasattr(extmod, '__file__'):
1035 1035 extsource = pycompat.fsencode(extmod.__file__)
1036 1036 elif getattr(sys, 'oxidized', False):
1037 1037 extsource = pycompat.sysexecutable
1038 1038 if isinternal:
1039 1039 exttestedwith = [] # never expose magic string to users
1040 1040 else:
1041 1041 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1042 1042 extbuglink = getattr(extmod, 'buglink', None)
1043 1043
1044 1044 fm.startitem()
1045 1045
1046 1046 if ui.quiet or ui.verbose:
1047 1047 fm.write(b'name', b'%s\n', extname)
1048 1048 else:
1049 1049 fm.write(b'name', b'%s', extname)
1050 1050 if isinternal or hgver in exttestedwith:
1051 1051 fm.plain(b'\n')
1052 1052 elif not exttestedwith:
1053 1053 fm.plain(_(b' (untested!)\n'))
1054 1054 else:
1055 1055 lasttestedversion = exttestedwith[-1]
1056 1056 fm.plain(b' (%s!)\n' % lasttestedversion)
1057 1057
1058 1058 fm.condwrite(
1059 1059 ui.verbose and extsource,
1060 1060 b'source',
1061 1061 _(b' location: %s\n'),
1062 1062 extsource or b"",
1063 1063 )
1064 1064
1065 1065 if ui.verbose:
1066 1066 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1067 1067 fm.data(bundled=isinternal)
1068 1068
1069 1069 fm.condwrite(
1070 1070 ui.verbose and exttestedwith,
1071 1071 b'testedwith',
1072 1072 _(b' tested with: %s\n'),
1073 1073 fm.formatlist(exttestedwith, name=b'ver'),
1074 1074 )
1075 1075
1076 1076 fm.condwrite(
1077 1077 ui.verbose and extbuglink,
1078 1078 b'buglink',
1079 1079 _(b' bug reporting: %s\n'),
1080 1080 extbuglink or b"",
1081 1081 )
1082 1082
1083 1083 fm.end()
1084 1084
1085 1085
1086 1086 @command(
1087 1087 b'debugfileset',
1088 1088 [
1089 1089 (
1090 1090 b'r',
1091 1091 b'rev',
1092 1092 b'',
1093 1093 _(b'apply the filespec on this revision'),
1094 1094 _(b'REV'),
1095 1095 ),
1096 1096 (
1097 1097 b'',
1098 1098 b'all-files',
1099 1099 False,
1100 1100 _(b'test files from all revisions and working directory'),
1101 1101 ),
1102 1102 (
1103 1103 b's',
1104 1104 b'show-matcher',
1105 1105 None,
1106 1106 _(b'print internal representation of matcher'),
1107 1107 ),
1108 1108 (
1109 1109 b'p',
1110 1110 b'show-stage',
1111 1111 [],
1112 1112 _(b'print parsed tree at the given stage'),
1113 1113 _(b'NAME'),
1114 1114 ),
1115 1115 ],
1116 1116 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1117 1117 )
1118 1118 def debugfileset(ui, repo, expr, **opts):
1119 1119 '''parse and apply a fileset specification'''
1120 1120 from . import fileset
1121 1121
1122 1122 fileset.symbols # force import of fileset so we have predicates to optimize
1123 1123 opts = pycompat.byteskwargs(opts)
1124 1124 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1125 1125
1126 1126 stages = [
1127 1127 (b'parsed', pycompat.identity),
1128 1128 (b'analyzed', filesetlang.analyze),
1129 1129 (b'optimized', filesetlang.optimize),
1130 1130 ]
1131 1131 stagenames = set(n for n, f in stages)
1132 1132
1133 1133 showalways = set()
1134 1134 if ui.verbose and not opts[b'show_stage']:
1135 1135 # show parsed tree by --verbose (deprecated)
1136 1136 showalways.add(b'parsed')
1137 1137 if opts[b'show_stage'] == [b'all']:
1138 1138 showalways.update(stagenames)
1139 1139 else:
1140 1140 for n in opts[b'show_stage']:
1141 1141 if n not in stagenames:
1142 1142 raise error.Abort(_(b'invalid stage name: %s') % n)
1143 1143 showalways.update(opts[b'show_stage'])
1144 1144
1145 1145 tree = filesetlang.parse(expr)
1146 1146 for n, f in stages:
1147 1147 tree = f(tree)
1148 1148 if n in showalways:
1149 1149 if opts[b'show_stage'] or n != b'parsed':
1150 1150 ui.write(b"* %s:\n" % n)
1151 1151 ui.write(filesetlang.prettyformat(tree), b"\n")
1152 1152
1153 1153 files = set()
1154 1154 if opts[b'all_files']:
1155 1155 for r in repo:
1156 1156 c = repo[r]
1157 1157 files.update(c.files())
1158 1158 files.update(c.substate)
1159 1159 if opts[b'all_files'] or ctx.rev() is None:
1160 1160 wctx = repo[None]
1161 1161 files.update(
1162 1162 repo.dirstate.walk(
1163 1163 scmutil.matchall(repo),
1164 1164 subrepos=list(wctx.substate),
1165 1165 unknown=True,
1166 1166 ignored=True,
1167 1167 )
1168 1168 )
1169 1169 files.update(wctx.substate)
1170 1170 else:
1171 1171 files.update(ctx.files())
1172 1172 files.update(ctx.substate)
1173 1173
1174 m = ctx.matchfileset(expr)
1174 m = ctx.matchfileset(repo.getcwd(), expr)
1175 1175 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1176 1176 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1177 1177 for f in sorted(files):
1178 1178 if not m(f):
1179 1179 continue
1180 1180 ui.write(b"%s\n" % f)
1181 1181
1182 1182
1183 1183 @command(b'debugformat', [] + cmdutil.formatteropts)
1184 1184 def debugformat(ui, repo, **opts):
1185 1185 """display format information about the current repository
1186 1186
1187 1187 Use --verbose to get extra information about current config value and
1188 1188 Mercurial default."""
1189 1189 opts = pycompat.byteskwargs(opts)
1190 1190 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1191 1191 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1192 1192
1193 1193 def makeformatname(name):
1194 1194 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1195 1195
1196 1196 fm = ui.formatter(b'debugformat', opts)
1197 1197 if fm.isplain():
1198 1198
1199 1199 def formatvalue(value):
1200 1200 if util.safehasattr(value, b'startswith'):
1201 1201 return value
1202 1202 if value:
1203 1203 return b'yes'
1204 1204 else:
1205 1205 return b'no'
1206 1206
1207 1207 else:
1208 1208 formatvalue = pycompat.identity
1209 1209
1210 1210 fm.plain(b'format-variant')
1211 1211 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1212 1212 fm.plain(b' repo')
1213 1213 if ui.verbose:
1214 1214 fm.plain(b' config default')
1215 1215 fm.plain(b'\n')
1216 1216 for fv in upgrade.allformatvariant:
1217 1217 fm.startitem()
1218 1218 repovalue = fv.fromrepo(repo)
1219 1219 configvalue = fv.fromconfig(repo)
1220 1220
1221 1221 if repovalue != configvalue:
1222 1222 namelabel = b'formatvariant.name.mismatchconfig'
1223 1223 repolabel = b'formatvariant.repo.mismatchconfig'
1224 1224 elif repovalue != fv.default:
1225 1225 namelabel = b'formatvariant.name.mismatchdefault'
1226 1226 repolabel = b'formatvariant.repo.mismatchdefault'
1227 1227 else:
1228 1228 namelabel = b'formatvariant.name.uptodate'
1229 1229 repolabel = b'formatvariant.repo.uptodate'
1230 1230
1231 1231 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1232 1232 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1233 1233 if fv.default != configvalue:
1234 1234 configlabel = b'formatvariant.config.special'
1235 1235 else:
1236 1236 configlabel = b'formatvariant.config.default'
1237 1237 fm.condwrite(
1238 1238 ui.verbose,
1239 1239 b'config',
1240 1240 b' %6s',
1241 1241 formatvalue(configvalue),
1242 1242 label=configlabel,
1243 1243 )
1244 1244 fm.condwrite(
1245 1245 ui.verbose,
1246 1246 b'default',
1247 1247 b' %7s',
1248 1248 formatvalue(fv.default),
1249 1249 label=b'formatvariant.default',
1250 1250 )
1251 1251 fm.plain(b'\n')
1252 1252 fm.end()
1253 1253
1254 1254
1255 1255 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1256 1256 def debugfsinfo(ui, path=b"."):
1257 1257 """show information detected about current filesystem"""
1258 1258 ui.writenoi18n(b'path: %s\n' % path)
1259 1259 ui.writenoi18n(
1260 1260 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1261 1261 )
1262 1262 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1263 1263 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1264 1264 ui.writenoi18n(
1265 1265 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1266 1266 )
1267 1267 ui.writenoi18n(
1268 1268 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1269 1269 )
1270 1270 casesensitive = b'(unknown)'
1271 1271 try:
1272 1272 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1273 1273 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1274 1274 except OSError:
1275 1275 pass
1276 1276 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1277 1277
1278 1278
1279 1279 @command(
1280 1280 b'debuggetbundle',
1281 1281 [
1282 1282 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1283 1283 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1284 1284 (
1285 1285 b't',
1286 1286 b'type',
1287 1287 b'bzip2',
1288 1288 _(b'bundle compression type to use'),
1289 1289 _(b'TYPE'),
1290 1290 ),
1291 1291 ],
1292 1292 _(b'REPO FILE [-H|-C ID]...'),
1293 1293 norepo=True,
1294 1294 )
1295 1295 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1296 1296 """retrieves a bundle from a repo
1297 1297
1298 1298 Every ID must be a full-length hex node id string. Saves the bundle to the
1299 1299 given file.
1300 1300 """
1301 1301 opts = pycompat.byteskwargs(opts)
1302 1302 repo = hg.peer(ui, opts, repopath)
1303 1303 if not repo.capable(b'getbundle'):
1304 1304 raise error.Abort(b"getbundle() not supported by target repository")
1305 1305 args = {}
1306 1306 if common:
1307 1307 args['common'] = [bin(s) for s in common]
1308 1308 if head:
1309 1309 args['heads'] = [bin(s) for s in head]
1310 1310 # TODO: get desired bundlecaps from command line.
1311 1311 args['bundlecaps'] = None
1312 1312 bundle = repo.getbundle(b'debug', **args)
1313 1313
1314 1314 bundletype = opts.get(b'type', b'bzip2').lower()
1315 1315 btypes = {
1316 1316 b'none': b'HG10UN',
1317 1317 b'bzip2': b'HG10BZ',
1318 1318 b'gzip': b'HG10GZ',
1319 1319 b'bundle2': b'HG20',
1320 1320 }
1321 1321 bundletype = btypes.get(bundletype)
1322 1322 if bundletype not in bundle2.bundletypes:
1323 1323 raise error.Abort(_(b'unknown bundle type specified with --type'))
1324 1324 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1325 1325
1326 1326
1327 1327 @command(b'debugignore', [], b'[FILE]')
1328 1328 def debugignore(ui, repo, *files, **opts):
1329 1329 """display the combined ignore pattern and information about ignored files
1330 1330
1331 1331 With no argument display the combined ignore pattern.
1332 1332
1333 1333 Given space separated file names, shows if the given file is ignored and
1334 1334 if so, show the ignore rule (file and line number) that matched it.
1335 1335 """
1336 1336 ignore = repo.dirstate._ignore
1337 1337 if not files:
1338 1338 # Show all the patterns
1339 1339 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1340 1340 else:
1341 1341 m = scmutil.match(repo[None], pats=files)
1342 1342 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1343 1343 for f in m.files():
1344 1344 nf = util.normpath(f)
1345 1345 ignored = None
1346 1346 ignoredata = None
1347 1347 if nf != b'.':
1348 1348 if ignore(nf):
1349 1349 ignored = nf
1350 1350 ignoredata = repo.dirstate._ignorefileandline(nf)
1351 1351 else:
1352 1352 for p in pathutil.finddirs(nf):
1353 1353 if ignore(p):
1354 1354 ignored = p
1355 1355 ignoredata = repo.dirstate._ignorefileandline(p)
1356 1356 break
1357 1357 if ignored:
1358 1358 if ignored == nf:
1359 1359 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1360 1360 else:
1361 1361 ui.write(
1362 1362 _(
1363 1363 b"%s is ignored because of "
1364 1364 b"containing directory %s\n"
1365 1365 )
1366 1366 % (uipathfn(f), ignored)
1367 1367 )
1368 1368 ignorefile, lineno, line = ignoredata
1369 1369 ui.write(
1370 1370 _(b"(ignore rule in %s, line %d: '%s')\n")
1371 1371 % (ignorefile, lineno, line)
1372 1372 )
1373 1373 else:
1374 1374 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1375 1375
1376 1376
1377 1377 @command(
1378 1378 b'debugindex',
1379 1379 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1380 1380 _(b'-c|-m|FILE'),
1381 1381 )
1382 1382 def debugindex(ui, repo, file_=None, **opts):
1383 1383 """dump index data for a storage primitive"""
1384 1384 opts = pycompat.byteskwargs(opts)
1385 1385 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1386 1386
1387 1387 if ui.debugflag:
1388 1388 shortfn = hex
1389 1389 else:
1390 1390 shortfn = short
1391 1391
1392 1392 idlen = 12
1393 1393 for i in store:
1394 1394 idlen = len(shortfn(store.node(i)))
1395 1395 break
1396 1396
1397 1397 fm = ui.formatter(b'debugindex', opts)
1398 1398 fm.plain(
1399 1399 b' rev linkrev %s %s p2\n'
1400 1400 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1401 1401 )
1402 1402
1403 1403 for rev in store:
1404 1404 node = store.node(rev)
1405 1405 parents = store.parents(node)
1406 1406
1407 1407 fm.startitem()
1408 1408 fm.write(b'rev', b'%6d ', rev)
1409 1409 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1410 1410 fm.write(b'node', b'%s ', shortfn(node))
1411 1411 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1412 1412 fm.write(b'p2', b'%s', shortfn(parents[1]))
1413 1413 fm.plain(b'\n')
1414 1414
1415 1415 fm.end()
1416 1416
1417 1417
1418 1418 @command(
1419 1419 b'debugindexdot',
1420 1420 cmdutil.debugrevlogopts,
1421 1421 _(b'-c|-m|FILE'),
1422 1422 optionalrepo=True,
1423 1423 )
1424 1424 def debugindexdot(ui, repo, file_=None, **opts):
1425 1425 """dump an index DAG as a graphviz dot file"""
1426 1426 opts = pycompat.byteskwargs(opts)
1427 1427 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1428 1428 ui.writenoi18n(b"digraph G {\n")
1429 1429 for i in r:
1430 1430 node = r.node(i)
1431 1431 pp = r.parents(node)
1432 1432 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1433 1433 if pp[1] != nullid:
1434 1434 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1435 1435 ui.write(b"}\n")
1436 1436
1437 1437
1438 1438 @command(b'debugindexstats', [])
1439 1439 def debugindexstats(ui, repo):
1440 1440 """show stats related to the changelog index"""
1441 1441 repo.changelog.shortest(nullid, 1)
1442 1442 index = repo.changelog.index
1443 1443 if not util.safehasattr(index, b'stats'):
1444 1444 raise error.Abort(_(b'debugindexstats only works with native code'))
1445 1445 for k, v in sorted(index.stats().items()):
1446 1446 ui.write(b'%s: %d\n' % (k, v))
1447 1447
1448 1448
1449 1449 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1450 1450 def debuginstall(ui, **opts):
1451 1451 '''test Mercurial installation
1452 1452
1453 1453 Returns 0 on success.
1454 1454 '''
1455 1455 opts = pycompat.byteskwargs(opts)
1456 1456
1457 1457 problems = 0
1458 1458
1459 1459 fm = ui.formatter(b'debuginstall', opts)
1460 1460 fm.startitem()
1461 1461
1462 1462 # encoding
1463 1463 fm.write(b'encoding', _(b"checking encoding (%s)...\n"), encoding.encoding)
1464 1464 err = None
1465 1465 try:
1466 1466 codecs.lookup(pycompat.sysstr(encoding.encoding))
1467 1467 except LookupError as inst:
1468 1468 err = stringutil.forcebytestr(inst)
1469 1469 problems += 1
1470 1470 fm.condwrite(
1471 1471 err,
1472 1472 b'encodingerror',
1473 1473 _(b" %s\n (check that your locale is properly set)\n"),
1474 1474 err,
1475 1475 )
1476 1476
1477 1477 # Python
1478 1478 pythonlib = None
1479 1479 if util.safehasattr(os, '__file__'):
1480 1480 pythonlib = os.path.dirname(pycompat.fsencode(os.__file__))
1481 1481 elif getattr(sys, 'oxidized', False):
1482 1482 pythonlib = pycompat.sysexecutable
1483 1483
1484 1484 fm.write(
1485 1485 b'pythonexe',
1486 1486 _(b"checking Python executable (%s)\n"),
1487 1487 pycompat.sysexecutable or _(b"unknown"),
1488 1488 )
1489 1489 fm.write(
1490 1490 b'pythonver',
1491 1491 _(b"checking Python version (%s)\n"),
1492 1492 (b"%d.%d.%d" % sys.version_info[:3]),
1493 1493 )
1494 1494 fm.write(
1495 1495 b'pythonlib',
1496 1496 _(b"checking Python lib (%s)...\n"),
1497 1497 pythonlib or _(b"unknown"),
1498 1498 )
1499 1499
1500 1500 security = set(sslutil.supportedprotocols)
1501 1501 if sslutil.hassni:
1502 1502 security.add(b'sni')
1503 1503
1504 1504 fm.write(
1505 1505 b'pythonsecurity',
1506 1506 _(b"checking Python security support (%s)\n"),
1507 1507 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1508 1508 )
1509 1509
1510 1510 # These are warnings, not errors. So don't increment problem count. This
1511 1511 # may change in the future.
1512 1512 if b'tls1.2' not in security:
1513 1513 fm.plain(
1514 1514 _(
1515 1515 b' TLS 1.2 not supported by Python install; '
1516 1516 b'network connections lack modern security\n'
1517 1517 )
1518 1518 )
1519 1519 if b'sni' not in security:
1520 1520 fm.plain(
1521 1521 _(
1522 1522 b' SNI not supported by Python install; may have '
1523 1523 b'connectivity issues with some servers\n'
1524 1524 )
1525 1525 )
1526 1526
1527 1527 # TODO print CA cert info
1528 1528
1529 1529 # hg version
1530 1530 hgver = util.version()
1531 1531 fm.write(
1532 1532 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1533 1533 )
1534 1534 fm.write(
1535 1535 b'hgverextra',
1536 1536 _(b"checking Mercurial custom build (%s)\n"),
1537 1537 b'+'.join(hgver.split(b'+')[1:]),
1538 1538 )
1539 1539
1540 1540 # compiled modules
1541 1541 hgmodules = None
1542 1542 if util.safehasattr(sys.modules[__name__], '__file__'):
1543 1543 hgmodules = os.path.dirname(pycompat.fsencode(__file__))
1544 1544 elif getattr(sys, 'oxidized', False):
1545 1545 hgmodules = pycompat.sysexecutable
1546 1546
1547 1547 fm.write(
1548 1548 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1549 1549 )
1550 1550 fm.write(
1551 1551 b'hgmodules',
1552 1552 _(b"checking installed modules (%s)...\n"),
1553 1553 hgmodules or _(b"unknown"),
1554 1554 )
1555 1555
1556 1556 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1557 1557 rustext = rustandc # for now, that's the only case
1558 1558 cext = policy.policy in (b'c', b'allow') or rustandc
1559 1559 nopure = cext or rustext
1560 1560 if nopure:
1561 1561 err = None
1562 1562 try:
1563 1563 if cext:
1564 1564 from .cext import ( # pytype: disable=import-error
1565 1565 base85,
1566 1566 bdiff,
1567 1567 mpatch,
1568 1568 osutil,
1569 1569 )
1570 1570
1571 1571 # quiet pyflakes
1572 1572 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1573 1573 if rustext:
1574 1574 from .rustext import ( # pytype: disable=import-error
1575 1575 ancestor,
1576 1576 dirstate,
1577 1577 )
1578 1578
1579 1579 dir(ancestor), dir(dirstate) # quiet pyflakes
1580 1580 except Exception as inst:
1581 1581 err = stringutil.forcebytestr(inst)
1582 1582 problems += 1
1583 1583 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1584 1584
1585 1585 compengines = util.compengines._engines.values()
1586 1586 fm.write(
1587 1587 b'compengines',
1588 1588 _(b'checking registered compression engines (%s)\n'),
1589 1589 fm.formatlist(
1590 1590 sorted(e.name() for e in compengines),
1591 1591 name=b'compengine',
1592 1592 fmt=b'%s',
1593 1593 sep=b', ',
1594 1594 ),
1595 1595 )
1596 1596 fm.write(
1597 1597 b'compenginesavail',
1598 1598 _(b'checking available compression engines (%s)\n'),
1599 1599 fm.formatlist(
1600 1600 sorted(e.name() for e in compengines if e.available()),
1601 1601 name=b'compengine',
1602 1602 fmt=b'%s',
1603 1603 sep=b', ',
1604 1604 ),
1605 1605 )
1606 1606 wirecompengines = compression.compengines.supportedwireengines(
1607 1607 compression.SERVERROLE
1608 1608 )
1609 1609 fm.write(
1610 1610 b'compenginesserver',
1611 1611 _(
1612 1612 b'checking available compression engines '
1613 1613 b'for wire protocol (%s)\n'
1614 1614 ),
1615 1615 fm.formatlist(
1616 1616 [e.name() for e in wirecompengines if e.wireprotosupport()],
1617 1617 name=b'compengine',
1618 1618 fmt=b'%s',
1619 1619 sep=b', ',
1620 1620 ),
1621 1621 )
1622 1622 re2 = b'missing'
1623 1623 if util._re2:
1624 1624 re2 = b'available'
1625 1625 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1626 1626 fm.data(re2=bool(util._re2))
1627 1627
1628 1628 # templates
1629 1629 p = templater.templatepaths()
1630 1630 fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p))
1631 1631 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1632 1632 if p:
1633 1633 m = templater.templatepath(b"map-cmdline.default")
1634 1634 if m:
1635 1635 # template found, check if it is working
1636 1636 err = None
1637 1637 try:
1638 1638 templater.templater.frommapfile(m)
1639 1639 except Exception as inst:
1640 1640 err = stringutil.forcebytestr(inst)
1641 1641 p = None
1642 1642 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1643 1643 else:
1644 1644 p = None
1645 1645 fm.condwrite(
1646 1646 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1647 1647 )
1648 1648 fm.condwrite(
1649 1649 not m,
1650 1650 b'defaulttemplatenotfound',
1651 1651 _(b" template '%s' not found\n"),
1652 1652 b"default",
1653 1653 )
1654 1654 if not p:
1655 1655 problems += 1
1656 1656 fm.condwrite(
1657 1657 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1658 1658 )
1659 1659
1660 1660 # editor
1661 1661 editor = ui.geteditor()
1662 1662 editor = util.expandpath(editor)
1663 1663 editorbin = procutil.shellsplit(editor)[0]
1664 1664 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1665 1665 cmdpath = procutil.findexe(editorbin)
1666 1666 fm.condwrite(
1667 1667 not cmdpath and editor == b'vi',
1668 1668 b'vinotfound',
1669 1669 _(
1670 1670 b" No commit editor set and can't find %s in PATH\n"
1671 1671 b" (specify a commit editor in your configuration"
1672 1672 b" file)\n"
1673 1673 ),
1674 1674 not cmdpath and editor == b'vi' and editorbin,
1675 1675 )
1676 1676 fm.condwrite(
1677 1677 not cmdpath and editor != b'vi',
1678 1678 b'editornotfound',
1679 1679 _(
1680 1680 b" Can't find editor '%s' in PATH\n"
1681 1681 b" (specify a commit editor in your configuration"
1682 1682 b" file)\n"
1683 1683 ),
1684 1684 not cmdpath and editorbin,
1685 1685 )
1686 1686 if not cmdpath and editor != b'vi':
1687 1687 problems += 1
1688 1688
1689 1689 # check username
1690 1690 username = None
1691 1691 err = None
1692 1692 try:
1693 1693 username = ui.username()
1694 1694 except error.Abort as e:
1695 1695 err = stringutil.forcebytestr(e)
1696 1696 problems += 1
1697 1697
1698 1698 fm.condwrite(
1699 1699 username, b'username', _(b"checking username (%s)\n"), username
1700 1700 )
1701 1701 fm.condwrite(
1702 1702 err,
1703 1703 b'usernameerror',
1704 1704 _(
1705 1705 b"checking username...\n %s\n"
1706 1706 b" (specify a username in your configuration file)\n"
1707 1707 ),
1708 1708 err,
1709 1709 )
1710 1710
1711 1711 for name, mod in extensions.extensions():
1712 1712 handler = getattr(mod, 'debuginstall', None)
1713 1713 if handler is not None:
1714 1714 problems += handler(ui, fm)
1715 1715
1716 1716 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1717 1717 if not problems:
1718 1718 fm.data(problems=problems)
1719 1719 fm.condwrite(
1720 1720 problems,
1721 1721 b'problems',
1722 1722 _(b"%d problems detected, please check your install!\n"),
1723 1723 problems,
1724 1724 )
1725 1725 fm.end()
1726 1726
1727 1727 return problems
1728 1728
1729 1729
1730 1730 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1731 1731 def debugknown(ui, repopath, *ids, **opts):
1732 1732 """test whether node ids are known to a repo
1733 1733
1734 1734 Every ID must be a full-length hex node id string. Returns a list of 0s
1735 1735 and 1s indicating unknown/known.
1736 1736 """
1737 1737 opts = pycompat.byteskwargs(opts)
1738 1738 repo = hg.peer(ui, opts, repopath)
1739 1739 if not repo.capable(b'known'):
1740 1740 raise error.Abort(b"known() not supported by target repository")
1741 1741 flags = repo.known([bin(s) for s in ids])
1742 1742 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1743 1743
1744 1744
1745 1745 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1746 1746 def debuglabelcomplete(ui, repo, *args):
1747 1747 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1748 1748 debugnamecomplete(ui, repo, *args)
1749 1749
1750 1750
1751 1751 @command(
1752 1752 b'debuglocks',
1753 1753 [
1754 1754 (b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
1755 1755 (
1756 1756 b'W',
1757 1757 b'force-wlock',
1758 1758 None,
1759 1759 _(b'free the working state lock (DANGEROUS)'),
1760 1760 ),
1761 1761 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1762 1762 (
1763 1763 b'S',
1764 1764 b'set-wlock',
1765 1765 None,
1766 1766 _(b'set the working state lock until stopped'),
1767 1767 ),
1768 1768 ],
1769 1769 _(b'[OPTION]...'),
1770 1770 )
1771 1771 def debuglocks(ui, repo, **opts):
1772 1772 """show or modify state of locks
1773 1773
1774 1774 By default, this command will show which locks are held. This
1775 1775 includes the user and process holding the lock, the amount of time
1776 1776 the lock has been held, and the machine name where the process is
1777 1777 running if it's not local.
1778 1778
1779 1779 Locks protect the integrity of Mercurial's data, so should be
1780 1780 treated with care. System crashes or other interruptions may cause
1781 1781 locks to not be properly released, though Mercurial will usually
1782 1782 detect and remove such stale locks automatically.
1783 1783
1784 1784 However, detecting stale locks may not always be possible (for
1785 1785 instance, on a shared filesystem). Removing locks may also be
1786 1786 blocked by filesystem permissions.
1787 1787
1788 1788 Setting a lock will prevent other commands from changing the data.
1789 1789 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1790 1790 The set locks are removed when the command exits.
1791 1791
1792 1792 Returns 0 if no locks are held.
1793 1793
1794 1794 """
1795 1795
1796 1796 if opts.get('force_lock'):
1797 1797 repo.svfs.unlink(b'lock')
1798 1798 if opts.get('force_wlock'):
1799 1799 repo.vfs.unlink(b'wlock')
1800 1800 if opts.get('force_lock') or opts.get('force_wlock'):
1801 1801 return 0
1802 1802
1803 1803 locks = []
1804 1804 try:
1805 1805 if opts.get('set_wlock'):
1806 1806 try:
1807 1807 locks.append(repo.wlock(False))
1808 1808 except error.LockHeld:
1809 1809 raise error.Abort(_(b'wlock is already held'))
1810 1810 if opts.get('set_lock'):
1811 1811 try:
1812 1812 locks.append(repo.lock(False))
1813 1813 except error.LockHeld:
1814 1814 raise error.Abort(_(b'lock is already held'))
1815 1815 if len(locks):
1816 1816 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
1817 1817 return 0
1818 1818 finally:
1819 1819 release(*locks)
1820 1820
1821 1821 now = time.time()
1822 1822 held = 0
1823 1823
1824 1824 def report(vfs, name, method):
1825 1825 # this causes stale locks to get reaped for more accurate reporting
1826 1826 try:
1827 1827 l = method(False)
1828 1828 except error.LockHeld:
1829 1829 l = None
1830 1830
1831 1831 if l:
1832 1832 l.release()
1833 1833 else:
1834 1834 try:
1835 1835 st = vfs.lstat(name)
1836 1836 age = now - st[stat.ST_MTIME]
1837 1837 user = util.username(st.st_uid)
1838 1838 locker = vfs.readlock(name)
1839 1839 if b":" in locker:
1840 1840 host, pid = locker.split(b':')
1841 1841 if host == socket.gethostname():
1842 1842 locker = b'user %s, process %s' % (user or b'None', pid)
1843 1843 else:
1844 1844 locker = b'user %s, process %s, host %s' % (
1845 1845 user or b'None',
1846 1846 pid,
1847 1847 host,
1848 1848 )
1849 1849 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
1850 1850 return 1
1851 1851 except OSError as e:
1852 1852 if e.errno != errno.ENOENT:
1853 1853 raise
1854 1854
1855 1855 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
1856 1856 return 0
1857 1857
1858 1858 held += report(repo.svfs, b"lock", repo.lock)
1859 1859 held += report(repo.vfs, b"wlock", repo.wlock)
1860 1860
1861 1861 return held
1862 1862
1863 1863
1864 1864 @command(
1865 1865 b'debugmanifestfulltextcache',
1866 1866 [
1867 1867 (b'', b'clear', False, _(b'clear the cache')),
1868 1868 (
1869 1869 b'a',
1870 1870 b'add',
1871 1871 [],
1872 1872 _(b'add the given manifest nodes to the cache'),
1873 1873 _(b'NODE'),
1874 1874 ),
1875 1875 ],
1876 1876 b'',
1877 1877 )
1878 1878 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1879 1879 """show, clear or amend the contents of the manifest fulltext cache"""
1880 1880
1881 1881 def getcache():
1882 1882 r = repo.manifestlog.getstorage(b'')
1883 1883 try:
1884 1884 return r._fulltextcache
1885 1885 except AttributeError:
1886 1886 msg = _(
1887 1887 b"Current revlog implementation doesn't appear to have a "
1888 1888 b"manifest fulltext cache\n"
1889 1889 )
1890 1890 raise error.Abort(msg)
1891 1891
1892 1892 if opts.get('clear'):
1893 1893 with repo.wlock():
1894 1894 cache = getcache()
1895 1895 cache.clear(clear_persisted_data=True)
1896 1896 return
1897 1897
1898 1898 if add:
1899 1899 with repo.wlock():
1900 1900 m = repo.manifestlog
1901 1901 store = m.getstorage(b'')
1902 1902 for n in add:
1903 1903 try:
1904 1904 manifest = m[store.lookup(n)]
1905 1905 except error.LookupError as e:
1906 1906 raise error.Abort(e, hint=b"Check your manifest node id")
1907 1907 manifest.read() # stores revisision in cache too
1908 1908 return
1909 1909
1910 1910 cache = getcache()
1911 1911 if not len(cache):
1912 1912 ui.write(_(b'cache empty\n'))
1913 1913 else:
1914 1914 ui.write(
1915 1915 _(
1916 1916 b'cache contains %d manifest entries, in order of most to '
1917 1917 b'least recent:\n'
1918 1918 )
1919 1919 % (len(cache),)
1920 1920 )
1921 1921 totalsize = 0
1922 1922 for nodeid in cache:
1923 1923 # Use cache.get to not update the LRU order
1924 1924 data = cache.peek(nodeid)
1925 1925 size = len(data)
1926 1926 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1927 1927 ui.write(
1928 1928 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
1929 1929 )
1930 1930 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
1931 1931 ui.write(
1932 1932 _(b'total cache data size %s, on-disk %s\n')
1933 1933 % (util.bytecount(totalsize), util.bytecount(ondisk))
1934 1934 )
1935 1935
1936 1936
1937 1937 @command(b'debugmergestate', [], b'')
1938 1938 def debugmergestate(ui, repo, *args):
1939 1939 """print merge state
1940 1940
1941 1941 Use --verbose to print out information about whether v1 or v2 merge state
1942 1942 was chosen."""
1943 1943
1944 1944 def _hashornull(h):
1945 1945 if h == nullhex:
1946 1946 return b'null'
1947 1947 else:
1948 1948 return h
1949 1949
1950 1950 def printrecords(version):
1951 1951 ui.writenoi18n(b'* version %d records\n' % version)
1952 1952 if version == 1:
1953 1953 records = v1records
1954 1954 else:
1955 1955 records = v2records
1956 1956
1957 1957 for rtype, record in records:
1958 1958 # pretty print some record types
1959 1959 if rtype == b'L':
1960 1960 ui.writenoi18n(b'local: %s\n' % record)
1961 1961 elif rtype == b'O':
1962 1962 ui.writenoi18n(b'other: %s\n' % record)
1963 1963 elif rtype == b'm':
1964 1964 driver, mdstate = record.split(b'\0', 1)
1965 1965 ui.writenoi18n(
1966 1966 b'merge driver: %s (state "%s")\n' % (driver, mdstate)
1967 1967 )
1968 1968 elif rtype in b'FDC':
1969 1969 r = record.split(b'\0')
1970 1970 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1971 1971 if version == 1:
1972 1972 onode = b'not stored in v1 format'
1973 1973 flags = r[7]
1974 1974 else:
1975 1975 onode, flags = r[7:9]
1976 1976 ui.writenoi18n(
1977 1977 b'file: %s (record type "%s", state "%s", hash %s)\n'
1978 1978 % (f, rtype, state, _hashornull(hash))
1979 1979 )
1980 1980 ui.writenoi18n(
1981 1981 b' local path: %s (flags "%s")\n' % (lfile, flags)
1982 1982 )
1983 1983 ui.writenoi18n(
1984 1984 b' ancestor path: %s (node %s)\n'
1985 1985 % (afile, _hashornull(anode))
1986 1986 )
1987 1987 ui.writenoi18n(
1988 1988 b' other path: %s (node %s)\n'
1989 1989 % (ofile, _hashornull(onode))
1990 1990 )
1991 1991 elif rtype == b'f':
1992 1992 filename, rawextras = record.split(b'\0', 1)
1993 1993 extras = rawextras.split(b'\0')
1994 1994 i = 0
1995 1995 extrastrings = []
1996 1996 while i < len(extras):
1997 1997 extrastrings.append(b'%s = %s' % (extras[i], extras[i + 1]))
1998 1998 i += 2
1999 1999
2000 2000 ui.writenoi18n(
2001 2001 b'file extras: %s (%s)\n'
2002 2002 % (filename, b', '.join(extrastrings))
2003 2003 )
2004 2004 elif rtype == b'l':
2005 2005 labels = record.split(b'\0', 2)
2006 2006 labels = [l for l in labels if len(l) > 0]
2007 2007 ui.writenoi18n(b'labels:\n')
2008 2008 ui.write((b' local: %s\n' % labels[0]))
2009 2009 ui.write((b' other: %s\n' % labels[1]))
2010 2010 if len(labels) > 2:
2011 2011 ui.write((b' base: %s\n' % labels[2]))
2012 2012 else:
2013 2013 ui.writenoi18n(
2014 2014 b'unrecognized entry: %s\t%s\n'
2015 2015 % (rtype, record.replace(b'\0', b'\t'))
2016 2016 )
2017 2017
2018 2018 # Avoid mergestate.read() since it may raise an exception for unsupported
2019 2019 # merge state records. We shouldn't be doing this, but this is OK since this
2020 2020 # command is pretty low-level.
2021 2021 ms = mergemod.mergestate(repo)
2022 2022
2023 2023 # sort so that reasonable information is on top
2024 2024 v1records = ms._readrecordsv1()
2025 2025 v2records = ms._readrecordsv2()
2026 2026 order = b'LOml'
2027 2027
2028 2028 def key(r):
2029 2029 idx = order.find(r[0])
2030 2030 if idx == -1:
2031 2031 return (1, r[1])
2032 2032 else:
2033 2033 return (0, idx)
2034 2034
2035 2035 v1records.sort(key=key)
2036 2036 v2records.sort(key=key)
2037 2037
2038 2038 if not v1records and not v2records:
2039 2039 ui.writenoi18n(b'no merge state found\n')
2040 2040 elif not v2records:
2041 2041 ui.notenoi18n(b'no version 2 merge state\n')
2042 2042 printrecords(1)
2043 2043 elif ms._v1v2match(v1records, v2records):
2044 2044 ui.notenoi18n(b'v1 and v2 states match: using v2\n')
2045 2045 printrecords(2)
2046 2046 else:
2047 2047 ui.notenoi18n(b'v1 and v2 states mismatch: using v1\n')
2048 2048 printrecords(1)
2049 2049 if ui.verbose:
2050 2050 printrecords(2)
2051 2051
2052 2052
2053 2053 @command(b'debugnamecomplete', [], _(b'NAME...'))
2054 2054 def debugnamecomplete(ui, repo, *args):
2055 2055 '''complete "names" - tags, open branch names, bookmark names'''
2056 2056
2057 2057 names = set()
2058 2058 # since we previously only listed open branches, we will handle that
2059 2059 # specially (after this for loop)
2060 2060 for name, ns in pycompat.iteritems(repo.names):
2061 2061 if name != b'branches':
2062 2062 names.update(ns.listnames(repo))
2063 2063 names.update(
2064 2064 tag
2065 2065 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2066 2066 if not closed
2067 2067 )
2068 2068 completions = set()
2069 2069 if not args:
2070 2070 args = [b'']
2071 2071 for a in args:
2072 2072 completions.update(n for n in names if n.startswith(a))
2073 2073 ui.write(b'\n'.join(sorted(completions)))
2074 2074 ui.write(b'\n')
2075 2075
2076 2076
2077 2077 @command(
2078 2078 b'debugobsolete',
2079 2079 [
2080 2080 (b'', b'flags', 0, _(b'markers flag')),
2081 2081 (
2082 2082 b'',
2083 2083 b'record-parents',
2084 2084 False,
2085 2085 _(b'record parent information for the precursor'),
2086 2086 ),
2087 2087 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2088 2088 (
2089 2089 b'',
2090 2090 b'exclusive',
2091 2091 False,
2092 2092 _(b'restrict display to markers only relevant to REV'),
2093 2093 ),
2094 2094 (b'', b'index', False, _(b'display index of the marker')),
2095 2095 (b'', b'delete', [], _(b'delete markers specified by indices')),
2096 2096 ]
2097 2097 + cmdutil.commitopts2
2098 2098 + cmdutil.formatteropts,
2099 2099 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2100 2100 )
2101 2101 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2102 2102 """create arbitrary obsolete marker
2103 2103
2104 2104 With no arguments, displays the list of obsolescence markers."""
2105 2105
2106 2106 opts = pycompat.byteskwargs(opts)
2107 2107
2108 2108 def parsenodeid(s):
2109 2109 try:
2110 2110 # We do not use revsingle/revrange functions here to accept
2111 2111 # arbitrary node identifiers, possibly not present in the
2112 2112 # local repository.
2113 2113 n = bin(s)
2114 2114 if len(n) != len(nullid):
2115 2115 raise TypeError()
2116 2116 return n
2117 2117 except TypeError:
2118 2118 raise error.Abort(
2119 2119 b'changeset references must be full hexadecimal '
2120 2120 b'node identifiers'
2121 2121 )
2122 2122
2123 2123 if opts.get(b'delete'):
2124 2124 indices = []
2125 2125 for v in opts.get(b'delete'):
2126 2126 try:
2127 2127 indices.append(int(v))
2128 2128 except ValueError:
2129 2129 raise error.Abort(
2130 2130 _(b'invalid index value: %r') % v,
2131 2131 hint=_(b'use integers for indices'),
2132 2132 )
2133 2133
2134 2134 if repo.currenttransaction():
2135 2135 raise error.Abort(
2136 2136 _(b'cannot delete obsmarkers in the middle of transaction.')
2137 2137 )
2138 2138
2139 2139 with repo.lock():
2140 2140 n = repair.deleteobsmarkers(repo.obsstore, indices)
2141 2141 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2142 2142
2143 2143 return
2144 2144
2145 2145 if precursor is not None:
2146 2146 if opts[b'rev']:
2147 2147 raise error.Abort(b'cannot select revision when creating marker')
2148 2148 metadata = {}
2149 2149 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2150 2150 succs = tuple(parsenodeid(succ) for succ in successors)
2151 2151 l = repo.lock()
2152 2152 try:
2153 2153 tr = repo.transaction(b'debugobsolete')
2154 2154 try:
2155 2155 date = opts.get(b'date')
2156 2156 if date:
2157 2157 date = dateutil.parsedate(date)
2158 2158 else:
2159 2159 date = None
2160 2160 prec = parsenodeid(precursor)
2161 2161 parents = None
2162 2162 if opts[b'record_parents']:
2163 2163 if prec not in repo.unfiltered():
2164 2164 raise error.Abort(
2165 2165 b'cannot used --record-parents on '
2166 2166 b'unknown changesets'
2167 2167 )
2168 2168 parents = repo.unfiltered()[prec].parents()
2169 2169 parents = tuple(p.node() for p in parents)
2170 2170 repo.obsstore.create(
2171 2171 tr,
2172 2172 prec,
2173 2173 succs,
2174 2174 opts[b'flags'],
2175 2175 parents=parents,
2176 2176 date=date,
2177 2177 metadata=metadata,
2178 2178 ui=ui,
2179 2179 )
2180 2180 tr.close()
2181 2181 except ValueError as exc:
2182 2182 raise error.Abort(
2183 2183 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2184 2184 )
2185 2185 finally:
2186 2186 tr.release()
2187 2187 finally:
2188 2188 l.release()
2189 2189 else:
2190 2190 if opts[b'rev']:
2191 2191 revs = scmutil.revrange(repo, opts[b'rev'])
2192 2192 nodes = [repo[r].node() for r in revs]
2193 2193 markers = list(
2194 2194 obsutil.getmarkers(
2195 2195 repo, nodes=nodes, exclusive=opts[b'exclusive']
2196 2196 )
2197 2197 )
2198 2198 markers.sort(key=lambda x: x._data)
2199 2199 else:
2200 2200 markers = obsutil.getmarkers(repo)
2201 2201
2202 2202 markerstoiter = markers
2203 2203 isrelevant = lambda m: True
2204 2204 if opts.get(b'rev') and opts.get(b'index'):
2205 2205 markerstoiter = obsutil.getmarkers(repo)
2206 2206 markerset = set(markers)
2207 2207 isrelevant = lambda m: m in markerset
2208 2208
2209 2209 fm = ui.formatter(b'debugobsolete', opts)
2210 2210 for i, m in enumerate(markerstoiter):
2211 2211 if not isrelevant(m):
2212 2212 # marker can be irrelevant when we're iterating over a set
2213 2213 # of markers (markerstoiter) which is bigger than the set
2214 2214 # of markers we want to display (markers)
2215 2215 # this can happen if both --index and --rev options are
2216 2216 # provided and thus we need to iterate over all of the markers
2217 2217 # to get the correct indices, but only display the ones that
2218 2218 # are relevant to --rev value
2219 2219 continue
2220 2220 fm.startitem()
2221 2221 ind = i if opts.get(b'index') else None
2222 2222 cmdutil.showmarker(fm, m, index=ind)
2223 2223 fm.end()
2224 2224
2225 2225
2226 2226 @command(
2227 2227 b'debugp1copies',
2228 2228 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2229 2229 _(b'[-r REV]'),
2230 2230 )
2231 2231 def debugp1copies(ui, repo, **opts):
2232 2232 """dump copy information compared to p1"""
2233 2233
2234 2234 opts = pycompat.byteskwargs(opts)
2235 2235 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2236 2236 for dst, src in ctx.p1copies().items():
2237 2237 ui.write(b'%s -> %s\n' % (src, dst))
2238 2238
2239 2239
2240 2240 @command(
2241 2241 b'debugp2copies',
2242 2242 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2243 2243 _(b'[-r REV]'),
2244 2244 )
2245 2245 def debugp1copies(ui, repo, **opts):
2246 2246 """dump copy information compared to p2"""
2247 2247
2248 2248 opts = pycompat.byteskwargs(opts)
2249 2249 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2250 2250 for dst, src in ctx.p2copies().items():
2251 2251 ui.write(b'%s -> %s\n' % (src, dst))
2252 2252
2253 2253
2254 2254 @command(
2255 2255 b'debugpathcomplete',
2256 2256 [
2257 2257 (b'f', b'full', None, _(b'complete an entire path')),
2258 2258 (b'n', b'normal', None, _(b'show only normal files')),
2259 2259 (b'a', b'added', None, _(b'show only added files')),
2260 2260 (b'r', b'removed', None, _(b'show only removed files')),
2261 2261 ],
2262 2262 _(b'FILESPEC...'),
2263 2263 )
2264 2264 def debugpathcomplete(ui, repo, *specs, **opts):
2265 2265 '''complete part or all of a tracked path
2266 2266
2267 2267 This command supports shells that offer path name completion. It
2268 2268 currently completes only files already known to the dirstate.
2269 2269
2270 2270 Completion extends only to the next path segment unless
2271 2271 --full is specified, in which case entire paths are used.'''
2272 2272
2273 2273 def complete(path, acceptable):
2274 2274 dirstate = repo.dirstate
2275 2275 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2276 2276 rootdir = repo.root + pycompat.ossep
2277 2277 if spec != repo.root and not spec.startswith(rootdir):
2278 2278 return [], []
2279 2279 if os.path.isdir(spec):
2280 2280 spec += b'/'
2281 2281 spec = spec[len(rootdir) :]
2282 2282 fixpaths = pycompat.ossep != b'/'
2283 2283 if fixpaths:
2284 2284 spec = spec.replace(pycompat.ossep, b'/')
2285 2285 speclen = len(spec)
2286 2286 fullpaths = opts['full']
2287 2287 files, dirs = set(), set()
2288 2288 adddir, addfile = dirs.add, files.add
2289 2289 for f, st in pycompat.iteritems(dirstate):
2290 2290 if f.startswith(spec) and st[0] in acceptable:
2291 2291 if fixpaths:
2292 2292 f = f.replace(b'/', pycompat.ossep)
2293 2293 if fullpaths:
2294 2294 addfile(f)
2295 2295 continue
2296 2296 s = f.find(pycompat.ossep, speclen)
2297 2297 if s >= 0:
2298 2298 adddir(f[:s])
2299 2299 else:
2300 2300 addfile(f)
2301 2301 return files, dirs
2302 2302
2303 2303 acceptable = b''
2304 2304 if opts['normal']:
2305 2305 acceptable += b'nm'
2306 2306 if opts['added']:
2307 2307 acceptable += b'a'
2308 2308 if opts['removed']:
2309 2309 acceptable += b'r'
2310 2310 cwd = repo.getcwd()
2311 2311 if not specs:
2312 2312 specs = [b'.']
2313 2313
2314 2314 files, dirs = set(), set()
2315 2315 for spec in specs:
2316 2316 f, d = complete(spec, acceptable or b'nmar')
2317 2317 files.update(f)
2318 2318 dirs.update(d)
2319 2319 files.update(dirs)
2320 2320 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2321 2321 ui.write(b'\n')
2322 2322
2323 2323
2324 2324 @command(
2325 2325 b'debugpathcopies',
2326 2326 cmdutil.walkopts,
2327 2327 b'hg debugpathcopies REV1 REV2 [FILE]',
2328 2328 inferrepo=True,
2329 2329 )
2330 2330 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2331 2331 """show copies between two revisions"""
2332 2332 ctx1 = scmutil.revsingle(repo, rev1)
2333 2333 ctx2 = scmutil.revsingle(repo, rev2)
2334 2334 m = scmutil.match(ctx1, pats, opts)
2335 2335 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2336 2336 ui.write(b'%s -> %s\n' % (src, dst))
2337 2337
2338 2338
2339 2339 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2340 2340 def debugpeer(ui, path):
2341 2341 """establish a connection to a peer repository"""
2342 2342 # Always enable peer request logging. Requires --debug to display
2343 2343 # though.
2344 2344 overrides = {
2345 2345 (b'devel', b'debug.peer-request'): True,
2346 2346 }
2347 2347
2348 2348 with ui.configoverride(overrides):
2349 2349 peer = hg.peer(ui, {}, path)
2350 2350
2351 2351 local = peer.local() is not None
2352 2352 canpush = peer.canpush()
2353 2353
2354 2354 ui.write(_(b'url: %s\n') % peer.url())
2355 2355 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2356 2356 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2357 2357
2358 2358
2359 2359 @command(
2360 2360 b'debugpickmergetool',
2361 2361 [
2362 2362 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2363 2363 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2364 2364 ]
2365 2365 + cmdutil.walkopts
2366 2366 + cmdutil.mergetoolopts,
2367 2367 _(b'[PATTERN]...'),
2368 2368 inferrepo=True,
2369 2369 )
2370 2370 def debugpickmergetool(ui, repo, *pats, **opts):
2371 2371 """examine which merge tool is chosen for specified file
2372 2372
2373 2373 As described in :hg:`help merge-tools`, Mercurial examines
2374 2374 configurations below in this order to decide which merge tool is
2375 2375 chosen for specified file.
2376 2376
2377 2377 1. ``--tool`` option
2378 2378 2. ``HGMERGE`` environment variable
2379 2379 3. configurations in ``merge-patterns`` section
2380 2380 4. configuration of ``ui.merge``
2381 2381 5. configurations in ``merge-tools`` section
2382 2382 6. ``hgmerge`` tool (for historical reason only)
2383 2383 7. default tool for fallback (``:merge`` or ``:prompt``)
2384 2384
2385 2385 This command writes out examination result in the style below::
2386 2386
2387 2387 FILE = MERGETOOL
2388 2388
2389 2389 By default, all files known in the first parent context of the
2390 2390 working directory are examined. Use file patterns and/or -I/-X
2391 2391 options to limit target files. -r/--rev is also useful to examine
2392 2392 files in another context without actual updating to it.
2393 2393
2394 2394 With --debug, this command shows warning messages while matching
2395 2395 against ``merge-patterns`` and so on, too. It is recommended to
2396 2396 use this option with explicit file patterns and/or -I/-X options,
2397 2397 because this option increases amount of output per file according
2398 2398 to configurations in hgrc.
2399 2399
2400 2400 With -v/--verbose, this command shows configurations below at
2401 2401 first (only if specified).
2402 2402
2403 2403 - ``--tool`` option
2404 2404 - ``HGMERGE`` environment variable
2405 2405 - configuration of ``ui.merge``
2406 2406
2407 2407 If merge tool is chosen before matching against
2408 2408 ``merge-patterns``, this command can't show any helpful
2409 2409 information, even with --debug. In such case, information above is
2410 2410 useful to know why a merge tool is chosen.
2411 2411 """
2412 2412 opts = pycompat.byteskwargs(opts)
2413 2413 overrides = {}
2414 2414 if opts[b'tool']:
2415 2415 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2416 2416 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2417 2417
2418 2418 with ui.configoverride(overrides, b'debugmergepatterns'):
2419 2419 hgmerge = encoding.environ.get(b"HGMERGE")
2420 2420 if hgmerge is not None:
2421 2421 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2422 2422 uimerge = ui.config(b"ui", b"merge")
2423 2423 if uimerge:
2424 2424 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2425 2425
2426 2426 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2427 2427 m = scmutil.match(ctx, pats, opts)
2428 2428 changedelete = opts[b'changedelete']
2429 2429 for path in ctx.walk(m):
2430 2430 fctx = ctx[path]
2431 2431 try:
2432 2432 if not ui.debugflag:
2433 2433 ui.pushbuffer(error=True)
2434 2434 tool, toolpath = filemerge._picktool(
2435 2435 repo,
2436 2436 ui,
2437 2437 path,
2438 2438 fctx.isbinary(),
2439 2439 b'l' in fctx.flags(),
2440 2440 changedelete,
2441 2441 )
2442 2442 finally:
2443 2443 if not ui.debugflag:
2444 2444 ui.popbuffer()
2445 2445 ui.write(b'%s = %s\n' % (path, tool))
2446 2446
2447 2447
2448 2448 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2449 2449 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2450 2450 '''access the pushkey key/value protocol
2451 2451
2452 2452 With two args, list the keys in the given namespace.
2453 2453
2454 2454 With five args, set a key to new if it currently is set to old.
2455 2455 Reports success or failure.
2456 2456 '''
2457 2457
2458 2458 target = hg.peer(ui, {}, repopath)
2459 2459 if keyinfo:
2460 2460 key, old, new = keyinfo
2461 2461 with target.commandexecutor() as e:
2462 2462 r = e.callcommand(
2463 2463 b'pushkey',
2464 2464 {
2465 2465 b'namespace': namespace,
2466 2466 b'key': key,
2467 2467 b'old': old,
2468 2468 b'new': new,
2469 2469 },
2470 2470 ).result()
2471 2471
2472 2472 ui.status(pycompat.bytestr(r) + b'\n')
2473 2473 return not r
2474 2474 else:
2475 2475 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2476 2476 ui.write(
2477 2477 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2478 2478 )
2479 2479
2480 2480
2481 2481 @command(b'debugpvec', [], _(b'A B'))
2482 2482 def debugpvec(ui, repo, a, b=None):
2483 2483 ca = scmutil.revsingle(repo, a)
2484 2484 cb = scmutil.revsingle(repo, b)
2485 2485 pa = pvec.ctxpvec(ca)
2486 2486 pb = pvec.ctxpvec(cb)
2487 2487 if pa == pb:
2488 2488 rel = b"="
2489 2489 elif pa > pb:
2490 2490 rel = b">"
2491 2491 elif pa < pb:
2492 2492 rel = b"<"
2493 2493 elif pa | pb:
2494 2494 rel = b"|"
2495 2495 ui.write(_(b"a: %s\n") % pa)
2496 2496 ui.write(_(b"b: %s\n") % pb)
2497 2497 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2498 2498 ui.write(
2499 2499 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2500 2500 % (
2501 2501 abs(pa._depth - pb._depth),
2502 2502 pvec._hamming(pa._vec, pb._vec),
2503 2503 pa.distance(pb),
2504 2504 rel,
2505 2505 )
2506 2506 )
2507 2507
2508 2508
2509 2509 @command(
2510 2510 b'debugrebuilddirstate|debugrebuildstate',
2511 2511 [
2512 2512 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2513 2513 (
2514 2514 b'',
2515 2515 b'minimal',
2516 2516 None,
2517 2517 _(
2518 2518 b'only rebuild files that are inconsistent with '
2519 2519 b'the working copy parent'
2520 2520 ),
2521 2521 ),
2522 2522 ],
2523 2523 _(b'[-r REV]'),
2524 2524 )
2525 2525 def debugrebuilddirstate(ui, repo, rev, **opts):
2526 2526 """rebuild the dirstate as it would look like for the given revision
2527 2527
2528 2528 If no revision is specified the first current parent will be used.
2529 2529
2530 2530 The dirstate will be set to the files of the given revision.
2531 2531 The actual working directory content or existing dirstate
2532 2532 information such as adds or removes is not considered.
2533 2533
2534 2534 ``minimal`` will only rebuild the dirstate status for files that claim to be
2535 2535 tracked but are not in the parent manifest, or that exist in the parent
2536 2536 manifest but are not in the dirstate. It will not change adds, removes, or
2537 2537 modified files that are in the working copy parent.
2538 2538
2539 2539 One use of this command is to make the next :hg:`status` invocation
2540 2540 check the actual file content.
2541 2541 """
2542 2542 ctx = scmutil.revsingle(repo, rev)
2543 2543 with repo.wlock():
2544 2544 dirstate = repo.dirstate
2545 2545 changedfiles = None
2546 2546 # See command doc for what minimal does.
2547 2547 if opts.get('minimal'):
2548 2548 manifestfiles = set(ctx.manifest().keys())
2549 2549 dirstatefiles = set(dirstate)
2550 2550 manifestonly = manifestfiles - dirstatefiles
2551 2551 dsonly = dirstatefiles - manifestfiles
2552 2552 dsnotadded = set(f for f in dsonly if dirstate[f] != b'a')
2553 2553 changedfiles = manifestonly | dsnotadded
2554 2554
2555 2555 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2556 2556
2557 2557
2558 2558 @command(b'debugrebuildfncache', [], b'')
2559 2559 def debugrebuildfncache(ui, repo):
2560 2560 """rebuild the fncache file"""
2561 2561 repair.rebuildfncache(ui, repo)
2562 2562
2563 2563
2564 2564 @command(
2565 2565 b'debugrename',
2566 2566 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2567 2567 _(b'[-r REV] [FILE]...'),
2568 2568 )
2569 2569 def debugrename(ui, repo, *pats, **opts):
2570 2570 """dump rename information"""
2571 2571
2572 2572 opts = pycompat.byteskwargs(opts)
2573 2573 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2574 2574 m = scmutil.match(ctx, pats, opts)
2575 2575 for abs in ctx.walk(m):
2576 2576 fctx = ctx[abs]
2577 2577 o = fctx.filelog().renamed(fctx.filenode())
2578 2578 rel = repo.pathto(abs)
2579 2579 if o:
2580 2580 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2581 2581 else:
2582 2582 ui.write(_(b"%s not renamed\n") % rel)
2583 2583
2584 2584
2585 2585 @command(
2586 2586 b'debugrevlog',
2587 2587 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2588 2588 _(b'-c|-m|FILE'),
2589 2589 optionalrepo=True,
2590 2590 )
2591 2591 def debugrevlog(ui, repo, file_=None, **opts):
2592 2592 """show data and statistics about a revlog"""
2593 2593 opts = pycompat.byteskwargs(opts)
2594 2594 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2595 2595
2596 2596 if opts.get(b"dump"):
2597 2597 numrevs = len(r)
2598 2598 ui.write(
2599 2599 (
2600 2600 b"# rev p1rev p2rev start end deltastart base p1 p2"
2601 2601 b" rawsize totalsize compression heads chainlen\n"
2602 2602 )
2603 2603 )
2604 2604 ts = 0
2605 2605 heads = set()
2606 2606
2607 2607 for rev in pycompat.xrange(numrevs):
2608 2608 dbase = r.deltaparent(rev)
2609 2609 if dbase == -1:
2610 2610 dbase = rev
2611 2611 cbase = r.chainbase(rev)
2612 2612 clen = r.chainlen(rev)
2613 2613 p1, p2 = r.parentrevs(rev)
2614 2614 rs = r.rawsize(rev)
2615 2615 ts = ts + rs
2616 2616 heads -= set(r.parentrevs(rev))
2617 2617 heads.add(rev)
2618 2618 try:
2619 2619 compression = ts / r.end(rev)
2620 2620 except ZeroDivisionError:
2621 2621 compression = 0
2622 2622 ui.write(
2623 2623 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2624 2624 b"%11d %5d %8d\n"
2625 2625 % (
2626 2626 rev,
2627 2627 p1,
2628 2628 p2,
2629 2629 r.start(rev),
2630 2630 r.end(rev),
2631 2631 r.start(dbase),
2632 2632 r.start(cbase),
2633 2633 r.start(p1),
2634 2634 r.start(p2),
2635 2635 rs,
2636 2636 ts,
2637 2637 compression,
2638 2638 len(heads),
2639 2639 clen,
2640 2640 )
2641 2641 )
2642 2642 return 0
2643 2643
2644 2644 v = r.version
2645 2645 format = v & 0xFFFF
2646 2646 flags = []
2647 2647 gdelta = False
2648 2648 if v & revlog.FLAG_INLINE_DATA:
2649 2649 flags.append(b'inline')
2650 2650 if v & revlog.FLAG_GENERALDELTA:
2651 2651 gdelta = True
2652 2652 flags.append(b'generaldelta')
2653 2653 if not flags:
2654 2654 flags = [b'(none)']
2655 2655
2656 2656 ### tracks merge vs single parent
2657 2657 nummerges = 0
2658 2658
2659 2659 ### tracks ways the "delta" are build
2660 2660 # nodelta
2661 2661 numempty = 0
2662 2662 numemptytext = 0
2663 2663 numemptydelta = 0
2664 2664 # full file content
2665 2665 numfull = 0
2666 2666 # intermediate snapshot against a prior snapshot
2667 2667 numsemi = 0
2668 2668 # snapshot count per depth
2669 2669 numsnapdepth = collections.defaultdict(lambda: 0)
2670 2670 # delta against previous revision
2671 2671 numprev = 0
2672 2672 # delta against first or second parent (not prev)
2673 2673 nump1 = 0
2674 2674 nump2 = 0
2675 2675 # delta against neither prev nor parents
2676 2676 numother = 0
2677 2677 # delta against prev that are also first or second parent
2678 2678 # (details of `numprev`)
2679 2679 nump1prev = 0
2680 2680 nump2prev = 0
2681 2681
2682 2682 # data about delta chain of each revs
2683 2683 chainlengths = []
2684 2684 chainbases = []
2685 2685 chainspans = []
2686 2686
2687 2687 # data about each revision
2688 2688 datasize = [None, 0, 0]
2689 2689 fullsize = [None, 0, 0]
2690 2690 semisize = [None, 0, 0]
2691 2691 # snapshot count per depth
2692 2692 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2693 2693 deltasize = [None, 0, 0]
2694 2694 chunktypecounts = {}
2695 2695 chunktypesizes = {}
2696 2696
2697 2697 def addsize(size, l):
2698 2698 if l[0] is None or size < l[0]:
2699 2699 l[0] = size
2700 2700 if size > l[1]:
2701 2701 l[1] = size
2702 2702 l[2] += size
2703 2703
2704 2704 numrevs = len(r)
2705 2705 for rev in pycompat.xrange(numrevs):
2706 2706 p1, p2 = r.parentrevs(rev)
2707 2707 delta = r.deltaparent(rev)
2708 2708 if format > 0:
2709 2709 addsize(r.rawsize(rev), datasize)
2710 2710 if p2 != nullrev:
2711 2711 nummerges += 1
2712 2712 size = r.length(rev)
2713 2713 if delta == nullrev:
2714 2714 chainlengths.append(0)
2715 2715 chainbases.append(r.start(rev))
2716 2716 chainspans.append(size)
2717 2717 if size == 0:
2718 2718 numempty += 1
2719 2719 numemptytext += 1
2720 2720 else:
2721 2721 numfull += 1
2722 2722 numsnapdepth[0] += 1
2723 2723 addsize(size, fullsize)
2724 2724 addsize(size, snapsizedepth[0])
2725 2725 else:
2726 2726 chainlengths.append(chainlengths[delta] + 1)
2727 2727 baseaddr = chainbases[delta]
2728 2728 revaddr = r.start(rev)
2729 2729 chainbases.append(baseaddr)
2730 2730 chainspans.append((revaddr - baseaddr) + size)
2731 2731 if size == 0:
2732 2732 numempty += 1
2733 2733 numemptydelta += 1
2734 2734 elif r.issnapshot(rev):
2735 2735 addsize(size, semisize)
2736 2736 numsemi += 1
2737 2737 depth = r.snapshotdepth(rev)
2738 2738 numsnapdepth[depth] += 1
2739 2739 addsize(size, snapsizedepth[depth])
2740 2740 else:
2741 2741 addsize(size, deltasize)
2742 2742 if delta == rev - 1:
2743 2743 numprev += 1
2744 2744 if delta == p1:
2745 2745 nump1prev += 1
2746 2746 elif delta == p2:
2747 2747 nump2prev += 1
2748 2748 elif delta == p1:
2749 2749 nump1 += 1
2750 2750 elif delta == p2:
2751 2751 nump2 += 1
2752 2752 elif delta != nullrev:
2753 2753 numother += 1
2754 2754
2755 2755 # Obtain data on the raw chunks in the revlog.
2756 2756 if util.safehasattr(r, b'_getsegmentforrevs'):
2757 2757 segment = r._getsegmentforrevs(rev, rev)[1]
2758 2758 else:
2759 2759 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2760 2760 if segment:
2761 2761 chunktype = bytes(segment[0:1])
2762 2762 else:
2763 2763 chunktype = b'empty'
2764 2764
2765 2765 if chunktype not in chunktypecounts:
2766 2766 chunktypecounts[chunktype] = 0
2767 2767 chunktypesizes[chunktype] = 0
2768 2768
2769 2769 chunktypecounts[chunktype] += 1
2770 2770 chunktypesizes[chunktype] += size
2771 2771
2772 2772 # Adjust size min value for empty cases
2773 2773 for size in (datasize, fullsize, semisize, deltasize):
2774 2774 if size[0] is None:
2775 2775 size[0] = 0
2776 2776
2777 2777 numdeltas = numrevs - numfull - numempty - numsemi
2778 2778 numoprev = numprev - nump1prev - nump2prev
2779 2779 totalrawsize = datasize[2]
2780 2780 datasize[2] /= numrevs
2781 2781 fulltotal = fullsize[2]
2782 2782 if numfull == 0:
2783 2783 fullsize[2] = 0
2784 2784 else:
2785 2785 fullsize[2] /= numfull
2786 2786 semitotal = semisize[2]
2787 2787 snaptotal = {}
2788 2788 if numsemi > 0:
2789 2789 semisize[2] /= numsemi
2790 2790 for depth in snapsizedepth:
2791 2791 snaptotal[depth] = snapsizedepth[depth][2]
2792 2792 snapsizedepth[depth][2] /= numsnapdepth[depth]
2793 2793
2794 2794 deltatotal = deltasize[2]
2795 2795 if numdeltas > 0:
2796 2796 deltasize[2] /= numdeltas
2797 2797 totalsize = fulltotal + semitotal + deltatotal
2798 2798 avgchainlen = sum(chainlengths) / numrevs
2799 2799 maxchainlen = max(chainlengths)
2800 2800 maxchainspan = max(chainspans)
2801 2801 compratio = 1
2802 2802 if totalsize:
2803 2803 compratio = totalrawsize / totalsize
2804 2804
2805 2805 basedfmtstr = b'%%%dd\n'
2806 2806 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
2807 2807
2808 2808 def dfmtstr(max):
2809 2809 return basedfmtstr % len(str(max))
2810 2810
2811 2811 def pcfmtstr(max, padding=0):
2812 2812 return basepcfmtstr % (len(str(max)), b' ' * padding)
2813 2813
2814 2814 def pcfmt(value, total):
2815 2815 if total:
2816 2816 return (value, 100 * float(value) / total)
2817 2817 else:
2818 2818 return value, 100.0
2819 2819
2820 2820 ui.writenoi18n(b'format : %d\n' % format)
2821 2821 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
2822 2822
2823 2823 ui.write(b'\n')
2824 2824 fmt = pcfmtstr(totalsize)
2825 2825 fmt2 = dfmtstr(totalsize)
2826 2826 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2827 2827 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
2828 2828 ui.writenoi18n(
2829 2829 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
2830 2830 )
2831 2831 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2832 2832 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
2833 2833 ui.writenoi18n(
2834 2834 b' text : '
2835 2835 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
2836 2836 )
2837 2837 ui.writenoi18n(
2838 2838 b' delta : '
2839 2839 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
2840 2840 )
2841 2841 ui.writenoi18n(
2842 2842 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
2843 2843 )
2844 2844 for depth in sorted(numsnapdepth):
2845 2845 ui.write(
2846 2846 (b' lvl-%-3d : ' % depth)
2847 2847 + fmt % pcfmt(numsnapdepth[depth], numrevs)
2848 2848 )
2849 2849 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
2850 2850 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
2851 2851 ui.writenoi18n(
2852 2852 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
2853 2853 )
2854 2854 for depth in sorted(numsnapdepth):
2855 2855 ui.write(
2856 2856 (b' lvl-%-3d : ' % depth)
2857 2857 + fmt % pcfmt(snaptotal[depth], totalsize)
2858 2858 )
2859 2859 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
2860 2860
2861 2861 def fmtchunktype(chunktype):
2862 2862 if chunktype == b'empty':
2863 2863 return b' %s : ' % chunktype
2864 2864 elif chunktype in pycompat.bytestr(string.ascii_letters):
2865 2865 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2866 2866 else:
2867 2867 return b' 0x%s : ' % hex(chunktype)
2868 2868
2869 2869 ui.write(b'\n')
2870 2870 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
2871 2871 for chunktype in sorted(chunktypecounts):
2872 2872 ui.write(fmtchunktype(chunktype))
2873 2873 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2874 2874 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
2875 2875 for chunktype in sorted(chunktypecounts):
2876 2876 ui.write(fmtchunktype(chunktype))
2877 2877 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2878 2878
2879 2879 ui.write(b'\n')
2880 2880 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2881 2881 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
2882 2882 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
2883 2883 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
2884 2884 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
2885 2885
2886 2886 if format > 0:
2887 2887 ui.write(b'\n')
2888 2888 ui.writenoi18n(
2889 2889 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
2890 2890 % tuple(datasize)
2891 2891 )
2892 2892 ui.writenoi18n(
2893 2893 b'full revision size (min/max/avg) : %d / %d / %d\n'
2894 2894 % tuple(fullsize)
2895 2895 )
2896 2896 ui.writenoi18n(
2897 2897 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
2898 2898 % tuple(semisize)
2899 2899 )
2900 2900 for depth in sorted(snapsizedepth):
2901 2901 if depth == 0:
2902 2902 continue
2903 2903 ui.writenoi18n(
2904 2904 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
2905 2905 % ((depth,) + tuple(snapsizedepth[depth]))
2906 2906 )
2907 2907 ui.writenoi18n(
2908 2908 b'delta size (min/max/avg) : %d / %d / %d\n'
2909 2909 % tuple(deltasize)
2910 2910 )
2911 2911
2912 2912 if numdeltas > 0:
2913 2913 ui.write(b'\n')
2914 2914 fmt = pcfmtstr(numdeltas)
2915 2915 fmt2 = pcfmtstr(numdeltas, 4)
2916 2916 ui.writenoi18n(
2917 2917 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
2918 2918 )
2919 2919 if numprev > 0:
2920 2920 ui.writenoi18n(
2921 2921 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
2922 2922 )
2923 2923 ui.writenoi18n(
2924 2924 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
2925 2925 )
2926 2926 ui.writenoi18n(
2927 2927 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
2928 2928 )
2929 2929 if gdelta:
2930 2930 ui.writenoi18n(
2931 2931 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
2932 2932 )
2933 2933 ui.writenoi18n(
2934 2934 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
2935 2935 )
2936 2936 ui.writenoi18n(
2937 2937 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
2938 2938 )
2939 2939
2940 2940
2941 2941 @command(
2942 2942 b'debugrevlogindex',
2943 2943 cmdutil.debugrevlogopts
2944 2944 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
2945 2945 _(b'[-f FORMAT] -c|-m|FILE'),
2946 2946 optionalrepo=True,
2947 2947 )
2948 2948 def debugrevlogindex(ui, repo, file_=None, **opts):
2949 2949 """dump the contents of a revlog index"""
2950 2950 opts = pycompat.byteskwargs(opts)
2951 2951 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
2952 2952 format = opts.get(b'format', 0)
2953 2953 if format not in (0, 1):
2954 2954 raise error.Abort(_(b"unknown format %d") % format)
2955 2955
2956 2956 if ui.debugflag:
2957 2957 shortfn = hex
2958 2958 else:
2959 2959 shortfn = short
2960 2960
2961 2961 # There might not be anything in r, so have a sane default
2962 2962 idlen = 12
2963 2963 for i in r:
2964 2964 idlen = len(shortfn(r.node(i)))
2965 2965 break
2966 2966
2967 2967 if format == 0:
2968 2968 if ui.verbose:
2969 2969 ui.writenoi18n(
2970 2970 b" rev offset length linkrev %s %s p2\n"
2971 2971 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
2972 2972 )
2973 2973 else:
2974 2974 ui.writenoi18n(
2975 2975 b" rev linkrev %s %s p2\n"
2976 2976 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
2977 2977 )
2978 2978 elif format == 1:
2979 2979 if ui.verbose:
2980 2980 ui.writenoi18n(
2981 2981 (
2982 2982 b" rev flag offset length size link p1"
2983 2983 b" p2 %s\n"
2984 2984 )
2985 2985 % b"nodeid".rjust(idlen)
2986 2986 )
2987 2987 else:
2988 2988 ui.writenoi18n(
2989 2989 b" rev flag size link p1 p2 %s\n"
2990 2990 % b"nodeid".rjust(idlen)
2991 2991 )
2992 2992
2993 2993 for i in r:
2994 2994 node = r.node(i)
2995 2995 if format == 0:
2996 2996 try:
2997 2997 pp = r.parents(node)
2998 2998 except Exception:
2999 2999 pp = [nullid, nullid]
3000 3000 if ui.verbose:
3001 3001 ui.write(
3002 3002 b"% 6d % 9d % 7d % 7d %s %s %s\n"
3003 3003 % (
3004 3004 i,
3005 3005 r.start(i),
3006 3006 r.length(i),
3007 3007 r.linkrev(i),
3008 3008 shortfn(node),
3009 3009 shortfn(pp[0]),
3010 3010 shortfn(pp[1]),
3011 3011 )
3012 3012 )
3013 3013 else:
3014 3014 ui.write(
3015 3015 b"% 6d % 7d %s %s %s\n"
3016 3016 % (
3017 3017 i,
3018 3018 r.linkrev(i),
3019 3019 shortfn(node),
3020 3020 shortfn(pp[0]),
3021 3021 shortfn(pp[1]),
3022 3022 )
3023 3023 )
3024 3024 elif format == 1:
3025 3025 pr = r.parentrevs(i)
3026 3026 if ui.verbose:
3027 3027 ui.write(
3028 3028 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3029 3029 % (
3030 3030 i,
3031 3031 r.flags(i),
3032 3032 r.start(i),
3033 3033 r.length(i),
3034 3034 r.rawsize(i),
3035 3035 r.linkrev(i),
3036 3036 pr[0],
3037 3037 pr[1],
3038 3038 shortfn(node),
3039 3039 )
3040 3040 )
3041 3041 else:
3042 3042 ui.write(
3043 3043 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3044 3044 % (
3045 3045 i,
3046 3046 r.flags(i),
3047 3047 r.rawsize(i),
3048 3048 r.linkrev(i),
3049 3049 pr[0],
3050 3050 pr[1],
3051 3051 shortfn(node),
3052 3052 )
3053 3053 )
3054 3054
3055 3055
3056 3056 @command(
3057 3057 b'debugrevspec',
3058 3058 [
3059 3059 (
3060 3060 b'',
3061 3061 b'optimize',
3062 3062 None,
3063 3063 _(b'print parsed tree after optimizing (DEPRECATED)'),
3064 3064 ),
3065 3065 (
3066 3066 b'',
3067 3067 b'show-revs',
3068 3068 True,
3069 3069 _(b'print list of result revisions (default)'),
3070 3070 ),
3071 3071 (
3072 3072 b's',
3073 3073 b'show-set',
3074 3074 None,
3075 3075 _(b'print internal representation of result set'),
3076 3076 ),
3077 3077 (
3078 3078 b'p',
3079 3079 b'show-stage',
3080 3080 [],
3081 3081 _(b'print parsed tree at the given stage'),
3082 3082 _(b'NAME'),
3083 3083 ),
3084 3084 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3085 3085 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3086 3086 ],
3087 3087 b'REVSPEC',
3088 3088 )
3089 3089 def debugrevspec(ui, repo, expr, **opts):
3090 3090 """parse and apply a revision specification
3091 3091
3092 3092 Use -p/--show-stage option to print the parsed tree at the given stages.
3093 3093 Use -p all to print tree at every stage.
3094 3094
3095 3095 Use --no-show-revs option with -s or -p to print only the set
3096 3096 representation or the parsed tree respectively.
3097 3097
3098 3098 Use --verify-optimized to compare the optimized result with the unoptimized
3099 3099 one. Returns 1 if the optimized result differs.
3100 3100 """
3101 3101 opts = pycompat.byteskwargs(opts)
3102 3102 aliases = ui.configitems(b'revsetalias')
3103 3103 stages = [
3104 3104 (b'parsed', lambda tree: tree),
3105 3105 (
3106 3106 b'expanded',
3107 3107 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3108 3108 ),
3109 3109 (b'concatenated', revsetlang.foldconcat),
3110 3110 (b'analyzed', revsetlang.analyze),
3111 3111 (b'optimized', revsetlang.optimize),
3112 3112 ]
3113 3113 if opts[b'no_optimized']:
3114 3114 stages = stages[:-1]
3115 3115 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3116 3116 raise error.Abort(
3117 3117 _(b'cannot use --verify-optimized with --no-optimized')
3118 3118 )
3119 3119 stagenames = set(n for n, f in stages)
3120 3120
3121 3121 showalways = set()
3122 3122 showchanged = set()
3123 3123 if ui.verbose and not opts[b'show_stage']:
3124 3124 # show parsed tree by --verbose (deprecated)
3125 3125 showalways.add(b'parsed')
3126 3126 showchanged.update([b'expanded', b'concatenated'])
3127 3127 if opts[b'optimize']:
3128 3128 showalways.add(b'optimized')
3129 3129 if opts[b'show_stage'] and opts[b'optimize']:
3130 3130 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3131 3131 if opts[b'show_stage'] == [b'all']:
3132 3132 showalways.update(stagenames)
3133 3133 else:
3134 3134 for n in opts[b'show_stage']:
3135 3135 if n not in stagenames:
3136 3136 raise error.Abort(_(b'invalid stage name: %s') % n)
3137 3137 showalways.update(opts[b'show_stage'])
3138 3138
3139 3139 treebystage = {}
3140 3140 printedtree = None
3141 3141 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3142 3142 for n, f in stages:
3143 3143 treebystage[n] = tree = f(tree)
3144 3144 if n in showalways or (n in showchanged and tree != printedtree):
3145 3145 if opts[b'show_stage'] or n != b'parsed':
3146 3146 ui.write(b"* %s:\n" % n)
3147 3147 ui.write(revsetlang.prettyformat(tree), b"\n")
3148 3148 printedtree = tree
3149 3149
3150 3150 if opts[b'verify_optimized']:
3151 3151 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3152 3152 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3153 3153 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3154 3154 ui.writenoi18n(
3155 3155 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3156 3156 )
3157 3157 ui.writenoi18n(
3158 3158 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3159 3159 )
3160 3160 arevs = list(arevs)
3161 3161 brevs = list(brevs)
3162 3162 if arevs == brevs:
3163 3163 return 0
3164 3164 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3165 3165 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3166 3166 sm = difflib.SequenceMatcher(None, arevs, brevs)
3167 3167 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3168 3168 if tag in ('delete', 'replace'):
3169 3169 for c in arevs[alo:ahi]:
3170 3170 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3171 3171 if tag in ('insert', 'replace'):
3172 3172 for c in brevs[blo:bhi]:
3173 3173 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3174 3174 if tag == 'equal':
3175 3175 for c in arevs[alo:ahi]:
3176 3176 ui.write(b' %d\n' % c)
3177 3177 return 1
3178 3178
3179 3179 func = revset.makematcher(tree)
3180 3180 revs = func(repo)
3181 3181 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3182 3182 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3183 3183 if not opts[b'show_revs']:
3184 3184 return
3185 3185 for c in revs:
3186 3186 ui.write(b"%d\n" % c)
3187 3187
3188 3188
3189 3189 @command(
3190 3190 b'debugserve',
3191 3191 [
3192 3192 (
3193 3193 b'',
3194 3194 b'sshstdio',
3195 3195 False,
3196 3196 _(b'run an SSH server bound to process handles'),
3197 3197 ),
3198 3198 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3199 3199 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3200 3200 ],
3201 3201 b'',
3202 3202 )
3203 3203 def debugserve(ui, repo, **opts):
3204 3204 """run a server with advanced settings
3205 3205
3206 3206 This command is similar to :hg:`serve`. It exists partially as a
3207 3207 workaround to the fact that ``hg serve --stdio`` must have specific
3208 3208 arguments for security reasons.
3209 3209 """
3210 3210 opts = pycompat.byteskwargs(opts)
3211 3211
3212 3212 if not opts[b'sshstdio']:
3213 3213 raise error.Abort(_(b'only --sshstdio is currently supported'))
3214 3214
3215 3215 logfh = None
3216 3216
3217 3217 if opts[b'logiofd'] and opts[b'logiofile']:
3218 3218 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3219 3219
3220 3220 if opts[b'logiofd']:
3221 3221 # Line buffered because output is line based.
3222 3222 try:
3223 3223 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 1)
3224 3224 except OSError as e:
3225 3225 if e.errno != errno.ESPIPE:
3226 3226 raise
3227 3227 # can't seek a pipe, so `ab` mode fails on py3
3228 3228 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 1)
3229 3229 elif opts[b'logiofile']:
3230 3230 logfh = open(opts[b'logiofile'], b'ab', 1)
3231 3231
3232 3232 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3233 3233 s.serve_forever()
3234 3234
3235 3235
3236 3236 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3237 3237 def debugsetparents(ui, repo, rev1, rev2=None):
3238 3238 """manually set the parents of the current working directory
3239 3239
3240 3240 This is useful for writing repository conversion tools, but should
3241 3241 be used with care. For example, neither the working directory nor the
3242 3242 dirstate is updated, so file status may be incorrect after running this
3243 3243 command.
3244 3244
3245 3245 Returns 0 on success.
3246 3246 """
3247 3247
3248 3248 node1 = scmutil.revsingle(repo, rev1).node()
3249 3249 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3250 3250
3251 3251 with repo.wlock():
3252 3252 repo.setparents(node1, node2)
3253 3253
3254 3254
3255 3255 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3256 3256 def debugsidedata(ui, repo, file_, rev=None, **opts):
3257 3257 """dump the side data for a cl/manifest/file revision
3258 3258
3259 3259 Use --verbose to dump the sidedata content."""
3260 3260 opts = pycompat.byteskwargs(opts)
3261 3261 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3262 3262 if rev is not None:
3263 3263 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3264 3264 file_, rev = None, file_
3265 3265 elif rev is None:
3266 3266 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3267 3267 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3268 3268 r = getattr(r, '_revlog', r)
3269 3269 try:
3270 3270 sidedata = r.sidedata(r.lookup(rev))
3271 3271 except KeyError:
3272 3272 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3273 3273 if sidedata:
3274 3274 sidedata = list(sidedata.items())
3275 3275 sidedata.sort()
3276 3276 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3277 3277 for key, value in sidedata:
3278 3278 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3279 3279 if ui.verbose:
3280 3280 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3281 3281
3282 3282
3283 3283 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3284 3284 def debugssl(ui, repo, source=None, **opts):
3285 3285 '''test a secure connection to a server
3286 3286
3287 3287 This builds the certificate chain for the server on Windows, installing the
3288 3288 missing intermediates and trusted root via Windows Update if necessary. It
3289 3289 does nothing on other platforms.
3290 3290
3291 3291 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3292 3292 that server is used. See :hg:`help urls` for more information.
3293 3293
3294 3294 If the update succeeds, retry the original operation. Otherwise, the cause
3295 3295 of the SSL error is likely another issue.
3296 3296 '''
3297 3297 if not pycompat.iswindows:
3298 3298 raise error.Abort(
3299 3299 _(b'certificate chain building is only possible on Windows')
3300 3300 )
3301 3301
3302 3302 if not source:
3303 3303 if not repo:
3304 3304 raise error.Abort(
3305 3305 _(
3306 3306 b"there is no Mercurial repository here, and no "
3307 3307 b"server specified"
3308 3308 )
3309 3309 )
3310 3310 source = b"default"
3311 3311
3312 3312 source, branches = hg.parseurl(ui.expandpath(source))
3313 3313 url = util.url(source)
3314 3314
3315 3315 defaultport = {b'https': 443, b'ssh': 22}
3316 3316 if url.scheme in defaultport:
3317 3317 try:
3318 3318 addr = (url.host, int(url.port or defaultport[url.scheme]))
3319 3319 except ValueError:
3320 3320 raise error.Abort(_(b"malformed port number in URL"))
3321 3321 else:
3322 3322 raise error.Abort(_(b"only https and ssh connections are supported"))
3323 3323
3324 3324 from . import win32
3325 3325
3326 3326 s = ssl.wrap_socket(
3327 3327 socket.socket(),
3328 3328 ssl_version=ssl.PROTOCOL_TLS,
3329 3329 cert_reqs=ssl.CERT_NONE,
3330 3330 ca_certs=None,
3331 3331 )
3332 3332
3333 3333 try:
3334 3334 s.connect(addr)
3335 3335 cert = s.getpeercert(True)
3336 3336
3337 3337 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3338 3338
3339 3339 complete = win32.checkcertificatechain(cert, build=False)
3340 3340
3341 3341 if not complete:
3342 3342 ui.status(_(b'certificate chain is incomplete, updating... '))
3343 3343
3344 3344 if not win32.checkcertificatechain(cert):
3345 3345 ui.status(_(b'failed.\n'))
3346 3346 else:
3347 3347 ui.status(_(b'done.\n'))
3348 3348 else:
3349 3349 ui.status(_(b'full certificate chain is available\n'))
3350 3350 finally:
3351 3351 s.close()
3352 3352
3353 3353
3354 3354 @command(
3355 3355 b'debugsub',
3356 3356 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3357 3357 _(b'[-r REV] [REV]'),
3358 3358 )
3359 3359 def debugsub(ui, repo, rev=None):
3360 3360 ctx = scmutil.revsingle(repo, rev, None)
3361 3361 for k, v in sorted(ctx.substate.items()):
3362 3362 ui.writenoi18n(b'path %s\n' % k)
3363 3363 ui.writenoi18n(b' source %s\n' % v[0])
3364 3364 ui.writenoi18n(b' revision %s\n' % v[1])
3365 3365
3366 3366
3367 3367 @command(
3368 3368 b'debugsuccessorssets',
3369 3369 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3370 3370 _(b'[REV]'),
3371 3371 )
3372 3372 def debugsuccessorssets(ui, repo, *revs, **opts):
3373 3373 """show set of successors for revision
3374 3374
3375 3375 A successors set of changeset A is a consistent group of revisions that
3376 3376 succeed A. It contains non-obsolete changesets only unless closests
3377 3377 successors set is set.
3378 3378
3379 3379 In most cases a changeset A has a single successors set containing a single
3380 3380 successor (changeset A replaced by A').
3381 3381
3382 3382 A changeset that is made obsolete with no successors are called "pruned".
3383 3383 Such changesets have no successors sets at all.
3384 3384
3385 3385 A changeset that has been "split" will have a successors set containing
3386 3386 more than one successor.
3387 3387
3388 3388 A changeset that has been rewritten in multiple different ways is called
3389 3389 "divergent". Such changesets have multiple successor sets (each of which
3390 3390 may also be split, i.e. have multiple successors).
3391 3391
3392 3392 Results are displayed as follows::
3393 3393
3394 3394 <rev1>
3395 3395 <successors-1A>
3396 3396 <rev2>
3397 3397 <successors-2A>
3398 3398 <successors-2B1> <successors-2B2> <successors-2B3>
3399 3399
3400 3400 Here rev2 has two possible (i.e. divergent) successors sets. The first
3401 3401 holds one element, whereas the second holds three (i.e. the changeset has
3402 3402 been split).
3403 3403 """
3404 3404 # passed to successorssets caching computation from one call to another
3405 3405 cache = {}
3406 3406 ctx2str = bytes
3407 3407 node2str = short
3408 3408 for rev in scmutil.revrange(repo, revs):
3409 3409 ctx = repo[rev]
3410 3410 ui.write(b'%s\n' % ctx2str(ctx))
3411 3411 for succsset in obsutil.successorssets(
3412 3412 repo, ctx.node(), closest=opts['closest'], cache=cache
3413 3413 ):
3414 3414 if succsset:
3415 3415 ui.write(b' ')
3416 3416 ui.write(node2str(succsset[0]))
3417 3417 for node in succsset[1:]:
3418 3418 ui.write(b' ')
3419 3419 ui.write(node2str(node))
3420 3420 ui.write(b'\n')
3421 3421
3422 3422
3423 3423 @command(
3424 3424 b'debugtemplate',
3425 3425 [
3426 3426 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3427 3427 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3428 3428 ],
3429 3429 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3430 3430 optionalrepo=True,
3431 3431 )
3432 3432 def debugtemplate(ui, repo, tmpl, **opts):
3433 3433 """parse and apply a template
3434 3434
3435 3435 If -r/--rev is given, the template is processed as a log template and
3436 3436 applied to the given changesets. Otherwise, it is processed as a generic
3437 3437 template.
3438 3438
3439 3439 Use --verbose to print the parsed tree.
3440 3440 """
3441 3441 revs = None
3442 3442 if opts['rev']:
3443 3443 if repo is None:
3444 3444 raise error.RepoError(
3445 3445 _(b'there is no Mercurial repository here (.hg not found)')
3446 3446 )
3447 3447 revs = scmutil.revrange(repo, opts['rev'])
3448 3448
3449 3449 props = {}
3450 3450 for d in opts['define']:
3451 3451 try:
3452 3452 k, v = (e.strip() for e in d.split(b'=', 1))
3453 3453 if not k or k == b'ui':
3454 3454 raise ValueError
3455 3455 props[k] = v
3456 3456 except ValueError:
3457 3457 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3458 3458
3459 3459 if ui.verbose:
3460 3460 aliases = ui.configitems(b'templatealias')
3461 3461 tree = templater.parse(tmpl)
3462 3462 ui.note(templater.prettyformat(tree), b'\n')
3463 3463 newtree = templater.expandaliases(tree, aliases)
3464 3464 if newtree != tree:
3465 3465 ui.notenoi18n(
3466 3466 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3467 3467 )
3468 3468
3469 3469 if revs is None:
3470 3470 tres = formatter.templateresources(ui, repo)
3471 3471 t = formatter.maketemplater(ui, tmpl, resources=tres)
3472 3472 if ui.verbose:
3473 3473 kwds, funcs = t.symbolsuseddefault()
3474 3474 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3475 3475 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3476 3476 ui.write(t.renderdefault(props))
3477 3477 else:
3478 3478 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3479 3479 if ui.verbose:
3480 3480 kwds, funcs = displayer.t.symbolsuseddefault()
3481 3481 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3482 3482 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3483 3483 for r in revs:
3484 3484 displayer.show(repo[r], **pycompat.strkwargs(props))
3485 3485 displayer.close()
3486 3486
3487 3487
3488 3488 @command(
3489 3489 b'debuguigetpass',
3490 3490 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3491 3491 _(b'[-p TEXT]'),
3492 3492 norepo=True,
3493 3493 )
3494 3494 def debuguigetpass(ui, prompt=b''):
3495 3495 """show prompt to type password"""
3496 3496 r = ui.getpass(prompt)
3497 3497 ui.writenoi18n(b'respose: %s\n' % r)
3498 3498
3499 3499
3500 3500 @command(
3501 3501 b'debuguiprompt',
3502 3502 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3503 3503 _(b'[-p TEXT]'),
3504 3504 norepo=True,
3505 3505 )
3506 3506 def debuguiprompt(ui, prompt=b''):
3507 3507 """show plain prompt"""
3508 3508 r = ui.prompt(prompt)
3509 3509 ui.writenoi18n(b'response: %s\n' % r)
3510 3510
3511 3511
3512 3512 @command(b'debugupdatecaches', [])
3513 3513 def debugupdatecaches(ui, repo, *pats, **opts):
3514 3514 """warm all known caches in the repository"""
3515 3515 with repo.wlock(), repo.lock():
3516 3516 repo.updatecaches(full=True)
3517 3517
3518 3518
3519 3519 @command(
3520 3520 b'debugupgraderepo',
3521 3521 [
3522 3522 (
3523 3523 b'o',
3524 3524 b'optimize',
3525 3525 [],
3526 3526 _(b'extra optimization to perform'),
3527 3527 _(b'NAME'),
3528 3528 ),
3529 3529 (b'', b'run', False, _(b'performs an upgrade')),
3530 3530 (b'', b'backup', True, _(b'keep the old repository content around')),
3531 3531 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
3532 3532 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
3533 3533 ],
3534 3534 )
3535 3535 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
3536 3536 """upgrade a repository to use different features
3537 3537
3538 3538 If no arguments are specified, the repository is evaluated for upgrade
3539 3539 and a list of problems and potential optimizations is printed.
3540 3540
3541 3541 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
3542 3542 can be influenced via additional arguments. More details will be provided
3543 3543 by the command output when run without ``--run``.
3544 3544
3545 3545 During the upgrade, the repository will be locked and no writes will be
3546 3546 allowed.
3547 3547
3548 3548 At the end of the upgrade, the repository may not be readable while new
3549 3549 repository data is swapped in. This window will be as long as it takes to
3550 3550 rename some directories inside the ``.hg`` directory. On most machines, this
3551 3551 should complete almost instantaneously and the chances of a consumer being
3552 3552 unable to access the repository should be low.
3553 3553
3554 3554 By default, all revlog will be upgraded. You can restrict this using flag
3555 3555 such as `--manifest`:
3556 3556
3557 3557 * `--manifest`: only optimize the manifest
3558 3558 * `--no-manifest`: optimize all revlog but the manifest
3559 3559 * `--changelog`: optimize the changelog only
3560 3560 * `--no-changelog --no-manifest`: optimize filelogs only
3561 3561 """
3562 3562 return upgrade.upgraderepo(
3563 3563 ui, repo, run=run, optimize=optimize, backup=backup, **opts
3564 3564 )
3565 3565
3566 3566
3567 3567 @command(
3568 3568 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
3569 3569 )
3570 3570 def debugwalk(ui, repo, *pats, **opts):
3571 3571 """show how files match on given patterns"""
3572 3572 opts = pycompat.byteskwargs(opts)
3573 3573 m = scmutil.match(repo[None], pats, opts)
3574 3574 if ui.verbose:
3575 3575 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
3576 3576 items = list(repo[None].walk(m))
3577 3577 if not items:
3578 3578 return
3579 3579 f = lambda fn: fn
3580 3580 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
3581 3581 f = lambda fn: util.normpath(fn)
3582 3582 fmt = b'f %%-%ds %%-%ds %%s' % (
3583 3583 max([len(abs) for abs in items]),
3584 3584 max([len(repo.pathto(abs)) for abs in items]),
3585 3585 )
3586 3586 for abs in items:
3587 3587 line = fmt % (
3588 3588 abs,
3589 3589 f(repo.pathto(abs)),
3590 3590 m.exact(abs) and b'exact' or b'',
3591 3591 )
3592 3592 ui.write(b"%s\n" % line.rstrip())
3593 3593
3594 3594
3595 3595 @command(b'debugwhyunstable', [], _(b'REV'))
3596 3596 def debugwhyunstable(ui, repo, rev):
3597 3597 """explain instabilities of a changeset"""
3598 3598 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
3599 3599 dnodes = b''
3600 3600 if entry.get(b'divergentnodes'):
3601 3601 dnodes = (
3602 3602 b' '.join(
3603 3603 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
3604 3604 for ctx in entry[b'divergentnodes']
3605 3605 )
3606 3606 + b' '
3607 3607 )
3608 3608 ui.write(
3609 3609 b'%s: %s%s %s\n'
3610 3610 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
3611 3611 )
3612 3612
3613 3613
3614 3614 @command(
3615 3615 b'debugwireargs',
3616 3616 [
3617 3617 (b'', b'three', b'', b'three'),
3618 3618 (b'', b'four', b'', b'four'),
3619 3619 (b'', b'five', b'', b'five'),
3620 3620 ]
3621 3621 + cmdutil.remoteopts,
3622 3622 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
3623 3623 norepo=True,
3624 3624 )
3625 3625 def debugwireargs(ui, repopath, *vals, **opts):
3626 3626 opts = pycompat.byteskwargs(opts)
3627 3627 repo = hg.peer(ui, opts, repopath)
3628 3628 for opt in cmdutil.remoteopts:
3629 3629 del opts[opt[1]]
3630 3630 args = {}
3631 3631 for k, v in pycompat.iteritems(opts):
3632 3632 if v:
3633 3633 args[k] = v
3634 3634 args = pycompat.strkwargs(args)
3635 3635 # run twice to check that we don't mess up the stream for the next command
3636 3636 res1 = repo.debugwireargs(*vals, **args)
3637 3637 res2 = repo.debugwireargs(*vals, **args)
3638 3638 ui.write(b"%s\n" % res1)
3639 3639 if res1 != res2:
3640 3640 ui.warn(b"%s\n" % res2)
3641 3641
3642 3642
3643 3643 def _parsewirelangblocks(fh):
3644 3644 activeaction = None
3645 3645 blocklines = []
3646 3646 lastindent = 0
3647 3647
3648 3648 for line in fh:
3649 3649 line = line.rstrip()
3650 3650 if not line:
3651 3651 continue
3652 3652
3653 3653 if line.startswith(b'#'):
3654 3654 continue
3655 3655
3656 3656 if not line.startswith(b' '):
3657 3657 # New block. Flush previous one.
3658 3658 if activeaction:
3659 3659 yield activeaction, blocklines
3660 3660
3661 3661 activeaction = line
3662 3662 blocklines = []
3663 3663 lastindent = 0
3664 3664 continue
3665 3665
3666 3666 # Else we start with an indent.
3667 3667
3668 3668 if not activeaction:
3669 3669 raise error.Abort(_(b'indented line outside of block'))
3670 3670
3671 3671 indent = len(line) - len(line.lstrip())
3672 3672
3673 3673 # If this line is indented more than the last line, concatenate it.
3674 3674 if indent > lastindent and blocklines:
3675 3675 blocklines[-1] += line.lstrip()
3676 3676 else:
3677 3677 blocklines.append(line)
3678 3678 lastindent = indent
3679 3679
3680 3680 # Flush last block.
3681 3681 if activeaction:
3682 3682 yield activeaction, blocklines
3683 3683
3684 3684
3685 3685 @command(
3686 3686 b'debugwireproto',
3687 3687 [
3688 3688 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
3689 3689 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
3690 3690 (
3691 3691 b'',
3692 3692 b'noreadstderr',
3693 3693 False,
3694 3694 _(b'do not read from stderr of the remote'),
3695 3695 ),
3696 3696 (
3697 3697 b'',
3698 3698 b'nologhandshake',
3699 3699 False,
3700 3700 _(b'do not log I/O related to the peer handshake'),
3701 3701 ),
3702 3702 ]
3703 3703 + cmdutil.remoteopts,
3704 3704 _(b'[PATH]'),
3705 3705 optionalrepo=True,
3706 3706 )
3707 3707 def debugwireproto(ui, repo, path=None, **opts):
3708 3708 """send wire protocol commands to a server
3709 3709
3710 3710 This command can be used to issue wire protocol commands to remote
3711 3711 peers and to debug the raw data being exchanged.
3712 3712
3713 3713 ``--localssh`` will start an SSH server against the current repository
3714 3714 and connect to that. By default, the connection will perform a handshake
3715 3715 and establish an appropriate peer instance.
3716 3716
3717 3717 ``--peer`` can be used to bypass the handshake protocol and construct a
3718 3718 peer instance using the specified class type. Valid values are ``raw``,
3719 3719 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
3720 3720 raw data payloads and don't support higher-level command actions.
3721 3721
3722 3722 ``--noreadstderr`` can be used to disable automatic reading from stderr
3723 3723 of the peer (for SSH connections only). Disabling automatic reading of
3724 3724 stderr is useful for making output more deterministic.
3725 3725
3726 3726 Commands are issued via a mini language which is specified via stdin.
3727 3727 The language consists of individual actions to perform. An action is
3728 3728 defined by a block. A block is defined as a line with no leading
3729 3729 space followed by 0 or more lines with leading space. Blocks are
3730 3730 effectively a high-level command with additional metadata.
3731 3731
3732 3732 Lines beginning with ``#`` are ignored.
3733 3733
3734 3734 The following sections denote available actions.
3735 3735
3736 3736 raw
3737 3737 ---
3738 3738
3739 3739 Send raw data to the server.
3740 3740
3741 3741 The block payload contains the raw data to send as one atomic send
3742 3742 operation. The data may not actually be delivered in a single system
3743 3743 call: it depends on the abilities of the transport being used.
3744 3744
3745 3745 Each line in the block is de-indented and concatenated. Then, that
3746 3746 value is evaluated as a Python b'' literal. This allows the use of
3747 3747 backslash escaping, etc.
3748 3748
3749 3749 raw+
3750 3750 ----
3751 3751
3752 3752 Behaves like ``raw`` except flushes output afterwards.
3753 3753
3754 3754 command <X>
3755 3755 -----------
3756 3756
3757 3757 Send a request to run a named command, whose name follows the ``command``
3758 3758 string.
3759 3759
3760 3760 Arguments to the command are defined as lines in this block. The format of
3761 3761 each line is ``<key> <value>``. e.g.::
3762 3762
3763 3763 command listkeys
3764 3764 namespace bookmarks
3765 3765
3766 3766 If the value begins with ``eval:``, it will be interpreted as a Python
3767 3767 literal expression. Otherwise values are interpreted as Python b'' literals.
3768 3768 This allows sending complex types and encoding special byte sequences via
3769 3769 backslash escaping.
3770 3770
3771 3771 The following arguments have special meaning:
3772 3772
3773 3773 ``PUSHFILE``
3774 3774 When defined, the *push* mechanism of the peer will be used instead
3775 3775 of the static request-response mechanism and the content of the
3776 3776 file specified in the value of this argument will be sent as the
3777 3777 command payload.
3778 3778
3779 3779 This can be used to submit a local bundle file to the remote.
3780 3780
3781 3781 batchbegin
3782 3782 ----------
3783 3783
3784 3784 Instruct the peer to begin a batched send.
3785 3785
3786 3786 All ``command`` blocks are queued for execution until the next
3787 3787 ``batchsubmit`` block.
3788 3788
3789 3789 batchsubmit
3790 3790 -----------
3791 3791
3792 3792 Submit previously queued ``command`` blocks as a batch request.
3793 3793
3794 3794 This action MUST be paired with a ``batchbegin`` action.
3795 3795
3796 3796 httprequest <method> <path>
3797 3797 ---------------------------
3798 3798
3799 3799 (HTTP peer only)
3800 3800
3801 3801 Send an HTTP request to the peer.
3802 3802
3803 3803 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3804 3804
3805 3805 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3806 3806 headers to add to the request. e.g. ``Accept: foo``.
3807 3807
3808 3808 The following arguments are special:
3809 3809
3810 3810 ``BODYFILE``
3811 3811 The content of the file defined as the value to this argument will be
3812 3812 transferred verbatim as the HTTP request body.
3813 3813
3814 3814 ``frame <type> <flags> <payload>``
3815 3815 Send a unified protocol frame as part of the request body.
3816 3816
3817 3817 All frames will be collected and sent as the body to the HTTP
3818 3818 request.
3819 3819
3820 3820 close
3821 3821 -----
3822 3822
3823 3823 Close the connection to the server.
3824 3824
3825 3825 flush
3826 3826 -----
3827 3827
3828 3828 Flush data written to the server.
3829 3829
3830 3830 readavailable
3831 3831 -------------
3832 3832
3833 3833 Close the write end of the connection and read all available data from
3834 3834 the server.
3835 3835
3836 3836 If the connection to the server encompasses multiple pipes, we poll both
3837 3837 pipes and read available data.
3838 3838
3839 3839 readline
3840 3840 --------
3841 3841
3842 3842 Read a line of output from the server. If there are multiple output
3843 3843 pipes, reads only the main pipe.
3844 3844
3845 3845 ereadline
3846 3846 ---------
3847 3847
3848 3848 Like ``readline``, but read from the stderr pipe, if available.
3849 3849
3850 3850 read <X>
3851 3851 --------
3852 3852
3853 3853 ``read()`` N bytes from the server's main output pipe.
3854 3854
3855 3855 eread <X>
3856 3856 ---------
3857 3857
3858 3858 ``read()`` N bytes from the server's stderr pipe, if available.
3859 3859
3860 3860 Specifying Unified Frame-Based Protocol Frames
3861 3861 ----------------------------------------------
3862 3862
3863 3863 It is possible to emit a *Unified Frame-Based Protocol* by using special
3864 3864 syntax.
3865 3865
3866 3866 A frame is composed as a type, flags, and payload. These can be parsed
3867 3867 from a string of the form:
3868 3868
3869 3869 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3870 3870
3871 3871 ``request-id`` and ``stream-id`` are integers defining the request and
3872 3872 stream identifiers.
3873 3873
3874 3874 ``type`` can be an integer value for the frame type or the string name
3875 3875 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3876 3876 ``command-name``.
3877 3877
3878 3878 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3879 3879 components. Each component (and there can be just one) can be an integer
3880 3880 or a flag name for stream flags or frame flags, respectively. Values are
3881 3881 resolved to integers and then bitwise OR'd together.
3882 3882
3883 3883 ``payload`` represents the raw frame payload. If it begins with
3884 3884 ``cbor:``, the following string is evaluated as Python code and the
3885 3885 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3886 3886 as a Python byte string literal.
3887 3887 """
3888 3888 opts = pycompat.byteskwargs(opts)
3889 3889
3890 3890 if opts[b'localssh'] and not repo:
3891 3891 raise error.Abort(_(b'--localssh requires a repository'))
3892 3892
3893 3893 if opts[b'peer'] and opts[b'peer'] not in (
3894 3894 b'raw',
3895 3895 b'http2',
3896 3896 b'ssh1',
3897 3897 b'ssh2',
3898 3898 ):
3899 3899 raise error.Abort(
3900 3900 _(b'invalid value for --peer'),
3901 3901 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
3902 3902 )
3903 3903
3904 3904 if path and opts[b'localssh']:
3905 3905 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
3906 3906
3907 3907 if ui.interactive():
3908 3908 ui.write(_(b'(waiting for commands on stdin)\n'))
3909 3909
3910 3910 blocks = list(_parsewirelangblocks(ui.fin))
3911 3911
3912 3912 proc = None
3913 3913 stdin = None
3914 3914 stdout = None
3915 3915 stderr = None
3916 3916 opener = None
3917 3917
3918 3918 if opts[b'localssh']:
3919 3919 # We start the SSH server in its own process so there is process
3920 3920 # separation. This prevents a whole class of potential bugs around
3921 3921 # shared state from interfering with server operation.
3922 3922 args = procutil.hgcmd() + [
3923 3923 b'-R',
3924 3924 repo.root,
3925 3925 b'debugserve',
3926 3926 b'--sshstdio',
3927 3927 ]
3928 3928 proc = subprocess.Popen(
3929 3929 pycompat.rapply(procutil.tonativestr, args),
3930 3930 stdin=subprocess.PIPE,
3931 3931 stdout=subprocess.PIPE,
3932 3932 stderr=subprocess.PIPE,
3933 3933 bufsize=0,
3934 3934 )
3935 3935
3936 3936 stdin = proc.stdin
3937 3937 stdout = proc.stdout
3938 3938 stderr = proc.stderr
3939 3939
3940 3940 # We turn the pipes into observers so we can log I/O.
3941 3941 if ui.verbose or opts[b'peer'] == b'raw':
3942 3942 stdin = util.makeloggingfileobject(
3943 3943 ui, proc.stdin, b'i', logdata=True
3944 3944 )
3945 3945 stdout = util.makeloggingfileobject(
3946 3946 ui, proc.stdout, b'o', logdata=True
3947 3947 )
3948 3948 stderr = util.makeloggingfileobject(
3949 3949 ui, proc.stderr, b'e', logdata=True
3950 3950 )
3951 3951
3952 3952 # --localssh also implies the peer connection settings.
3953 3953
3954 3954 url = b'ssh://localserver'
3955 3955 autoreadstderr = not opts[b'noreadstderr']
3956 3956
3957 3957 if opts[b'peer'] == b'ssh1':
3958 3958 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
3959 3959 peer = sshpeer.sshv1peer(
3960 3960 ui,
3961 3961 url,
3962 3962 proc,
3963 3963 stdin,
3964 3964 stdout,
3965 3965 stderr,
3966 3966 None,
3967 3967 autoreadstderr=autoreadstderr,
3968 3968 )
3969 3969 elif opts[b'peer'] == b'ssh2':
3970 3970 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
3971 3971 peer = sshpeer.sshv2peer(
3972 3972 ui,
3973 3973 url,
3974 3974 proc,
3975 3975 stdin,
3976 3976 stdout,
3977 3977 stderr,
3978 3978 None,
3979 3979 autoreadstderr=autoreadstderr,
3980 3980 )
3981 3981 elif opts[b'peer'] == b'raw':
3982 3982 ui.write(_(b'using raw connection to peer\n'))
3983 3983 peer = None
3984 3984 else:
3985 3985 ui.write(_(b'creating ssh peer from handshake results\n'))
3986 3986 peer = sshpeer.makepeer(
3987 3987 ui,
3988 3988 url,
3989 3989 proc,
3990 3990 stdin,
3991 3991 stdout,
3992 3992 stderr,
3993 3993 autoreadstderr=autoreadstderr,
3994 3994 )
3995 3995
3996 3996 elif path:
3997 3997 # We bypass hg.peer() so we can proxy the sockets.
3998 3998 # TODO consider not doing this because we skip
3999 3999 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
4000 4000 u = util.url(path)
4001 4001 if u.scheme != b'http':
4002 4002 raise error.Abort(_(b'only http:// paths are currently supported'))
4003 4003
4004 4004 url, authinfo = u.authinfo()
4005 4005 openerargs = {
4006 4006 'useragent': b'Mercurial debugwireproto',
4007 4007 }
4008 4008
4009 4009 # Turn pipes/sockets into observers so we can log I/O.
4010 4010 if ui.verbose:
4011 4011 openerargs.update(
4012 4012 {
4013 4013 'loggingfh': ui,
4014 4014 'loggingname': b's',
4015 4015 'loggingopts': {'logdata': True, 'logdataapis': False,},
4016 4016 }
4017 4017 )
4018 4018
4019 4019 if ui.debugflag:
4020 4020 openerargs['loggingopts']['logdataapis'] = True
4021 4021
4022 4022 # Don't send default headers when in raw mode. This allows us to
4023 4023 # bypass most of the behavior of our URL handling code so we can
4024 4024 # have near complete control over what's sent on the wire.
4025 4025 if opts[b'peer'] == b'raw':
4026 4026 openerargs['sendaccept'] = False
4027 4027
4028 4028 opener = urlmod.opener(ui, authinfo, **openerargs)
4029 4029
4030 4030 if opts[b'peer'] == b'http2':
4031 4031 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4032 4032 # We go through makepeer() because we need an API descriptor for
4033 4033 # the peer instance to be useful.
4034 4034 with ui.configoverride(
4035 4035 {(b'experimental', b'httppeer.advertise-v2'): True}
4036 4036 ):
4037 4037 if opts[b'nologhandshake']:
4038 4038 ui.pushbuffer()
4039 4039
4040 4040 peer = httppeer.makepeer(ui, path, opener=opener)
4041 4041
4042 4042 if opts[b'nologhandshake']:
4043 4043 ui.popbuffer()
4044 4044
4045 4045 if not isinstance(peer, httppeer.httpv2peer):
4046 4046 raise error.Abort(
4047 4047 _(
4048 4048 b'could not instantiate HTTP peer for '
4049 4049 b'wire protocol version 2'
4050 4050 ),
4051 4051 hint=_(
4052 4052 b'the server may not have the feature '
4053 4053 b'enabled or is not allowing this '
4054 4054 b'client version'
4055 4055 ),
4056 4056 )
4057 4057
4058 4058 elif opts[b'peer'] == b'raw':
4059 4059 ui.write(_(b'using raw connection to peer\n'))
4060 4060 peer = None
4061 4061 elif opts[b'peer']:
4062 4062 raise error.Abort(
4063 4063 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4064 4064 )
4065 4065 else:
4066 4066 peer = httppeer.makepeer(ui, path, opener=opener)
4067 4067
4068 4068 # We /could/ populate stdin/stdout with sock.makefile()...
4069 4069 else:
4070 4070 raise error.Abort(_(b'unsupported connection configuration'))
4071 4071
4072 4072 batchedcommands = None
4073 4073
4074 4074 # Now perform actions based on the parsed wire language instructions.
4075 4075 for action, lines in blocks:
4076 4076 if action in (b'raw', b'raw+'):
4077 4077 if not stdin:
4078 4078 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4079 4079
4080 4080 # Concatenate the data together.
4081 4081 data = b''.join(l.lstrip() for l in lines)
4082 4082 data = stringutil.unescapestr(data)
4083 4083 stdin.write(data)
4084 4084
4085 4085 if action == b'raw+':
4086 4086 stdin.flush()
4087 4087 elif action == b'flush':
4088 4088 if not stdin:
4089 4089 raise error.Abort(_(b'cannot call flush on this peer'))
4090 4090 stdin.flush()
4091 4091 elif action.startswith(b'command'):
4092 4092 if not peer:
4093 4093 raise error.Abort(
4094 4094 _(
4095 4095 b'cannot send commands unless peer instance '
4096 4096 b'is available'
4097 4097 )
4098 4098 )
4099 4099
4100 4100 command = action.split(b' ', 1)[1]
4101 4101
4102 4102 args = {}
4103 4103 for line in lines:
4104 4104 # We need to allow empty values.
4105 4105 fields = line.lstrip().split(b' ', 1)
4106 4106 if len(fields) == 1:
4107 4107 key = fields[0]
4108 4108 value = b''
4109 4109 else:
4110 4110 key, value = fields
4111 4111
4112 4112 if value.startswith(b'eval:'):
4113 4113 value = stringutil.evalpythonliteral(value[5:])
4114 4114 else:
4115 4115 value = stringutil.unescapestr(value)
4116 4116
4117 4117 args[key] = value
4118 4118
4119 4119 if batchedcommands is not None:
4120 4120 batchedcommands.append((command, args))
4121 4121 continue
4122 4122
4123 4123 ui.status(_(b'sending %s command\n') % command)
4124 4124
4125 4125 if b'PUSHFILE' in args:
4126 4126 with open(args[b'PUSHFILE'], 'rb') as fh:
4127 4127 del args[b'PUSHFILE']
4128 4128 res, output = peer._callpush(
4129 4129 command, fh, **pycompat.strkwargs(args)
4130 4130 )
4131 4131 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4132 4132 ui.status(
4133 4133 _(b'remote output: %s\n') % stringutil.escapestr(output)
4134 4134 )
4135 4135 else:
4136 4136 with peer.commandexecutor() as e:
4137 4137 res = e.callcommand(command, args).result()
4138 4138
4139 4139 if isinstance(res, wireprotov2peer.commandresponse):
4140 4140 val = res.objects()
4141 4141 ui.status(
4142 4142 _(b'response: %s\n')
4143 4143 % stringutil.pprint(val, bprefix=True, indent=2)
4144 4144 )
4145 4145 else:
4146 4146 ui.status(
4147 4147 _(b'response: %s\n')
4148 4148 % stringutil.pprint(res, bprefix=True, indent=2)
4149 4149 )
4150 4150
4151 4151 elif action == b'batchbegin':
4152 4152 if batchedcommands is not None:
4153 4153 raise error.Abort(_(b'nested batchbegin not allowed'))
4154 4154
4155 4155 batchedcommands = []
4156 4156 elif action == b'batchsubmit':
4157 4157 # There is a batching API we could go through. But it would be
4158 4158 # difficult to normalize requests into function calls. It is easier
4159 4159 # to bypass this layer and normalize to commands + args.
4160 4160 ui.status(
4161 4161 _(b'sending batch with %d sub-commands\n')
4162 4162 % len(batchedcommands)
4163 4163 )
4164 4164 assert peer is not None
4165 4165 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4166 4166 ui.status(
4167 4167 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4168 4168 )
4169 4169
4170 4170 batchedcommands = None
4171 4171
4172 4172 elif action.startswith(b'httprequest '):
4173 4173 if not opener:
4174 4174 raise error.Abort(
4175 4175 _(b'cannot use httprequest without an HTTP peer')
4176 4176 )
4177 4177
4178 4178 request = action.split(b' ', 2)
4179 4179 if len(request) != 3:
4180 4180 raise error.Abort(
4181 4181 _(
4182 4182 b'invalid httprequest: expected format is '
4183 4183 b'"httprequest <method> <path>'
4184 4184 )
4185 4185 )
4186 4186
4187 4187 method, httppath = request[1:]
4188 4188 headers = {}
4189 4189 body = None
4190 4190 frames = []
4191 4191 for line in lines:
4192 4192 line = line.lstrip()
4193 4193 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4194 4194 if m:
4195 4195 # Headers need to use native strings.
4196 4196 key = pycompat.strurl(m.group(1))
4197 4197 value = pycompat.strurl(m.group(2))
4198 4198 headers[key] = value
4199 4199 continue
4200 4200
4201 4201 if line.startswith(b'BODYFILE '):
4202 4202 with open(line.split(b' ', 1), b'rb') as fh:
4203 4203 body = fh.read()
4204 4204 elif line.startswith(b'frame '):
4205 4205 frame = wireprotoframing.makeframefromhumanstring(
4206 4206 line[len(b'frame ') :]
4207 4207 )
4208 4208
4209 4209 frames.append(frame)
4210 4210 else:
4211 4211 raise error.Abort(
4212 4212 _(b'unknown argument to httprequest: %s') % line
4213 4213 )
4214 4214
4215 4215 url = path + httppath
4216 4216
4217 4217 if frames:
4218 4218 body = b''.join(bytes(f) for f in frames)
4219 4219
4220 4220 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4221 4221
4222 4222 # urllib.Request insists on using has_data() as a proxy for
4223 4223 # determining the request method. Override that to use our
4224 4224 # explicitly requested method.
4225 4225 req.get_method = lambda: pycompat.sysstr(method)
4226 4226
4227 4227 try:
4228 4228 res = opener.open(req)
4229 4229 body = res.read()
4230 4230 except util.urlerr.urlerror as e:
4231 4231 # read() method must be called, but only exists in Python 2
4232 4232 getattr(e, 'read', lambda: None)()
4233 4233 continue
4234 4234
4235 4235 ct = res.headers.get('Content-Type')
4236 4236 if ct == 'application/mercurial-cbor':
4237 4237 ui.write(
4238 4238 _(b'cbor> %s\n')
4239 4239 % stringutil.pprint(
4240 4240 cborutil.decodeall(body), bprefix=True, indent=2
4241 4241 )
4242 4242 )
4243 4243
4244 4244 elif action == b'close':
4245 4245 assert peer is not None
4246 4246 peer.close()
4247 4247 elif action == b'readavailable':
4248 4248 if not stdout or not stderr:
4249 4249 raise error.Abort(
4250 4250 _(b'readavailable not available on this peer')
4251 4251 )
4252 4252
4253 4253 stdin.close()
4254 4254 stdout.read()
4255 4255 stderr.read()
4256 4256
4257 4257 elif action == b'readline':
4258 4258 if not stdout:
4259 4259 raise error.Abort(_(b'readline not available on this peer'))
4260 4260 stdout.readline()
4261 4261 elif action == b'ereadline':
4262 4262 if not stderr:
4263 4263 raise error.Abort(_(b'ereadline not available on this peer'))
4264 4264 stderr.readline()
4265 4265 elif action.startswith(b'read '):
4266 4266 count = int(action.split(b' ', 1)[1])
4267 4267 if not stdout:
4268 4268 raise error.Abort(_(b'read not available on this peer'))
4269 4269 stdout.read(count)
4270 4270 elif action.startswith(b'eread '):
4271 4271 count = int(action.split(b' ', 1)[1])
4272 4272 if not stderr:
4273 4273 raise error.Abort(_(b'eread not available on this peer'))
4274 4274 stderr.read(count)
4275 4275 else:
4276 4276 raise error.Abort(_(b'unknown action: %s') % action)
4277 4277
4278 4278 if batchedcommands is not None:
4279 4279 raise error.Abort(_(b'unclosed "batchbegin" request'))
4280 4280
4281 4281 if peer:
4282 4282 peer.close()
4283 4283
4284 4284 if proc:
4285 4285 proc.kill()
@@ -1,637 +1,638 b''
1 1 # fileset.py - file set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import re
12 12
13 13 from .i18n import _
14 14 from .pycompat import getattr
15 15 from . import (
16 16 error,
17 17 filesetlang,
18 18 match as matchmod,
19 19 merge,
20 20 pycompat,
21 21 registrar,
22 22 scmutil,
23 23 util,
24 24 )
25 25 from .utils import stringutil
26 26
27 27 # common weight constants
28 28 _WEIGHT_CHECK_FILENAME = filesetlang.WEIGHT_CHECK_FILENAME
29 29 _WEIGHT_READ_CONTENTS = filesetlang.WEIGHT_READ_CONTENTS
30 30 _WEIGHT_STATUS = filesetlang.WEIGHT_STATUS
31 31 _WEIGHT_STATUS_THOROUGH = filesetlang.WEIGHT_STATUS_THOROUGH
32 32
33 33 # helpers for processing parsed tree
34 34 getsymbol = filesetlang.getsymbol
35 35 getstring = filesetlang.getstring
36 36 _getkindpat = filesetlang.getkindpat
37 37 getpattern = filesetlang.getpattern
38 38 getargs = filesetlang.getargs
39 39
40 40
41 41 def getmatch(mctx, x):
42 42 if not x:
43 43 raise error.ParseError(_(b"missing argument"))
44 44 return methods[x[0]](mctx, *x[1:])
45 45
46 46
47 47 def getmatchwithstatus(mctx, x, hint):
48 48 keys = set(getstring(hint, b'status hint must be a string').split())
49 49 return getmatch(mctx.withstatus(keys), x)
50 50
51 51
52 52 def stringmatch(mctx, x):
53 53 return mctx.matcher([x])
54 54
55 55
56 56 def kindpatmatch(mctx, x, y):
57 57 return stringmatch(
58 58 mctx,
59 59 _getkindpat(
60 60 x, y, matchmod.allpatternkinds, _(b"pattern must be a string")
61 61 ),
62 62 )
63 63
64 64
65 65 def patternsmatch(mctx, *xs):
66 66 allkinds = matchmod.allpatternkinds
67 67 patterns = [
68 68 getpattern(x, allkinds, _(b"pattern must be a string")) for x in xs
69 69 ]
70 70 return mctx.matcher(patterns)
71 71
72 72
73 73 def andmatch(mctx, x, y):
74 74 xm = getmatch(mctx, x)
75 75 ym = getmatch(mctx.narrowed(xm), y)
76 76 return matchmod.intersectmatchers(xm, ym)
77 77
78 78
79 79 def ormatch(mctx, *xs):
80 80 ms = [getmatch(mctx, x) for x in xs]
81 81 return matchmod.unionmatcher(ms)
82 82
83 83
84 84 def notmatch(mctx, x):
85 85 m = getmatch(mctx, x)
86 86 return mctx.predicate(lambda f: not m(f), predrepr=(b'<not %r>', m))
87 87
88 88
89 89 def minusmatch(mctx, x, y):
90 90 xm = getmatch(mctx, x)
91 91 ym = getmatch(mctx.narrowed(xm), y)
92 92 return matchmod.differencematcher(xm, ym)
93 93
94 94
95 95 def listmatch(mctx, *xs):
96 96 raise error.ParseError(
97 97 _(b"can't use a list in this context"),
98 98 hint=_(b'see \'hg help "filesets.x or y"\''),
99 99 )
100 100
101 101
102 102 def func(mctx, a, b):
103 103 funcname = getsymbol(a)
104 104 if funcname in symbols:
105 105 return symbols[funcname](mctx, b)
106 106
107 107 keep = lambda fn: getattr(fn, '__doc__', None) is not None
108 108
109 109 syms = [s for (s, fn) in symbols.items() if keep(fn)]
110 110 raise error.UnknownIdentifier(funcname, syms)
111 111
112 112
113 113 # symbols are callable like:
114 114 # fun(mctx, x)
115 115 # with:
116 116 # mctx - current matchctx instance
117 117 # x - argument in tree form
118 118 symbols = filesetlang.symbols
119 119
120 120 predicate = registrar.filesetpredicate(symbols)
121 121
122 122
123 123 @predicate(b'modified()', callstatus=True, weight=_WEIGHT_STATUS)
124 124 def modified(mctx, x):
125 125 """File that is modified according to :hg:`status`.
126 126 """
127 127 # i18n: "modified" is a keyword
128 128 getargs(x, 0, 0, _(b"modified takes no arguments"))
129 129 s = set(mctx.status().modified)
130 130 return mctx.predicate(s.__contains__, predrepr=b'modified')
131 131
132 132
133 133 @predicate(b'added()', callstatus=True, weight=_WEIGHT_STATUS)
134 134 def added(mctx, x):
135 135 """File that is added according to :hg:`status`.
136 136 """
137 137 # i18n: "added" is a keyword
138 138 getargs(x, 0, 0, _(b"added takes no arguments"))
139 139 s = set(mctx.status().added)
140 140 return mctx.predicate(s.__contains__, predrepr=b'added')
141 141
142 142
143 143 @predicate(b'removed()', callstatus=True, weight=_WEIGHT_STATUS)
144 144 def removed(mctx, x):
145 145 """File that is removed according to :hg:`status`.
146 146 """
147 147 # i18n: "removed" is a keyword
148 148 getargs(x, 0, 0, _(b"removed takes no arguments"))
149 149 s = set(mctx.status().removed)
150 150 return mctx.predicate(s.__contains__, predrepr=b'removed')
151 151
152 152
153 153 @predicate(b'deleted()', callstatus=True, weight=_WEIGHT_STATUS)
154 154 def deleted(mctx, x):
155 155 """Alias for ``missing()``.
156 156 """
157 157 # i18n: "deleted" is a keyword
158 158 getargs(x, 0, 0, _(b"deleted takes no arguments"))
159 159 s = set(mctx.status().deleted)
160 160 return mctx.predicate(s.__contains__, predrepr=b'deleted')
161 161
162 162
163 163 @predicate(b'missing()', callstatus=True, weight=_WEIGHT_STATUS)
164 164 def missing(mctx, x):
165 165 """File that is missing according to :hg:`status`.
166 166 """
167 167 # i18n: "missing" is a keyword
168 168 getargs(x, 0, 0, _(b"missing takes no arguments"))
169 169 s = set(mctx.status().deleted)
170 170 return mctx.predicate(s.__contains__, predrepr=b'deleted')
171 171
172 172
173 173 @predicate(b'unknown()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH)
174 174 def unknown(mctx, x):
175 175 """File that is unknown according to :hg:`status`."""
176 176 # i18n: "unknown" is a keyword
177 177 getargs(x, 0, 0, _(b"unknown takes no arguments"))
178 178 s = set(mctx.status().unknown)
179 179 return mctx.predicate(s.__contains__, predrepr=b'unknown')
180 180
181 181
182 182 @predicate(b'ignored()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH)
183 183 def ignored(mctx, x):
184 184 """File that is ignored according to :hg:`status`."""
185 185 # i18n: "ignored" is a keyword
186 186 getargs(x, 0, 0, _(b"ignored takes no arguments"))
187 187 s = set(mctx.status().ignored)
188 188 return mctx.predicate(s.__contains__, predrepr=b'ignored')
189 189
190 190
191 191 @predicate(b'clean()', callstatus=True, weight=_WEIGHT_STATUS)
192 192 def clean(mctx, x):
193 193 """File that is clean according to :hg:`status`.
194 194 """
195 195 # i18n: "clean" is a keyword
196 196 getargs(x, 0, 0, _(b"clean takes no arguments"))
197 197 s = set(mctx.status().clean)
198 198 return mctx.predicate(s.__contains__, predrepr=b'clean')
199 199
200 200
201 201 @predicate(b'tracked()')
202 202 def tracked(mctx, x):
203 203 """File that is under Mercurial control."""
204 204 # i18n: "tracked" is a keyword
205 205 getargs(x, 0, 0, _(b"tracked takes no arguments"))
206 206 return mctx.predicate(mctx.ctx.__contains__, predrepr=b'tracked')
207 207
208 208
209 209 @predicate(b'binary()', weight=_WEIGHT_READ_CONTENTS)
210 210 def binary(mctx, x):
211 211 """File that appears to be binary (contains NUL bytes).
212 212 """
213 213 # i18n: "binary" is a keyword
214 214 getargs(x, 0, 0, _(b"binary takes no arguments"))
215 215 return mctx.fpredicate(
216 216 lambda fctx: fctx.isbinary(), predrepr=b'binary', cache=True
217 217 )
218 218
219 219
220 220 @predicate(b'exec()')
221 221 def exec_(mctx, x):
222 222 """File that is marked as executable.
223 223 """
224 224 # i18n: "exec" is a keyword
225 225 getargs(x, 0, 0, _(b"exec takes no arguments"))
226 226 ctx = mctx.ctx
227 227 return mctx.predicate(lambda f: ctx.flags(f) == b'x', predrepr=b'exec')
228 228
229 229
230 230 @predicate(b'symlink()')
231 231 def symlink(mctx, x):
232 232 """File that is marked as a symlink.
233 233 """
234 234 # i18n: "symlink" is a keyword
235 235 getargs(x, 0, 0, _(b"symlink takes no arguments"))
236 236 ctx = mctx.ctx
237 237 return mctx.predicate(lambda f: ctx.flags(f) == b'l', predrepr=b'symlink')
238 238
239 239
240 240 @predicate(b'resolved()', weight=_WEIGHT_STATUS)
241 241 def resolved(mctx, x):
242 242 """File that is marked resolved according to :hg:`resolve -l`.
243 243 """
244 244 # i18n: "resolved" is a keyword
245 245 getargs(x, 0, 0, _(b"resolved takes no arguments"))
246 246 if mctx.ctx.rev() is not None:
247 247 return mctx.never()
248 248 ms = merge.mergestate.read(mctx.ctx.repo())
249 249 return mctx.predicate(
250 250 lambda f: f in ms and ms[f] == b'r', predrepr=b'resolved'
251 251 )
252 252
253 253
254 254 @predicate(b'unresolved()', weight=_WEIGHT_STATUS)
255 255 def unresolved(mctx, x):
256 256 """File that is marked unresolved according to :hg:`resolve -l`.
257 257 """
258 258 # i18n: "unresolved" is a keyword
259 259 getargs(x, 0, 0, _(b"unresolved takes no arguments"))
260 260 if mctx.ctx.rev() is not None:
261 261 return mctx.never()
262 262 ms = merge.mergestate.read(mctx.ctx.repo())
263 263 return mctx.predicate(
264 264 lambda f: f in ms and ms[f] == b'u', predrepr=b'unresolved'
265 265 )
266 266
267 267
268 268 @predicate(b'hgignore()', weight=_WEIGHT_STATUS)
269 269 def hgignore(mctx, x):
270 270 """File that matches the active .hgignore pattern.
271 271 """
272 272 # i18n: "hgignore" is a keyword
273 273 getargs(x, 0, 0, _(b"hgignore takes no arguments"))
274 274 return mctx.ctx.repo().dirstate._ignore
275 275
276 276
277 277 @predicate(b'portable()', weight=_WEIGHT_CHECK_FILENAME)
278 278 def portable(mctx, x):
279 279 """File that has a portable name. (This doesn't include filenames with case
280 280 collisions.)
281 281 """
282 282 # i18n: "portable" is a keyword
283 283 getargs(x, 0, 0, _(b"portable takes no arguments"))
284 284 return mctx.predicate(
285 285 lambda f: util.checkwinfilename(f) is None, predrepr=b'portable'
286 286 )
287 287
288 288
289 289 @predicate(b'grep(regex)', weight=_WEIGHT_READ_CONTENTS)
290 290 def grep(mctx, x):
291 291 """File contains the given regular expression.
292 292 """
293 293 try:
294 294 # i18n: "grep" is a keyword
295 295 r = re.compile(getstring(x, _(b"grep requires a pattern")))
296 296 except re.error as e:
297 297 raise error.ParseError(
298 298 _(b'invalid match pattern: %s') % stringutil.forcebytestr(e)
299 299 )
300 300 return mctx.fpredicate(
301 301 lambda fctx: r.search(fctx.data()),
302 302 predrepr=(b'grep(%r)', r.pattern),
303 303 cache=True,
304 304 )
305 305
306 306
307 307 def _sizetomax(s):
308 308 try:
309 309 s = s.strip().lower()
310 310 for k, v in util._sizeunits:
311 311 if s.endswith(k):
312 312 # max(4k) = 5k - 1, max(4.5k) = 4.6k - 1
313 313 n = s[: -len(k)]
314 314 inc = 1.0
315 315 if b"." in n:
316 316 inc /= 10 ** len(n.split(b".")[1])
317 317 return int((float(n) + inc) * v) - 1
318 318 # no extension, this is a precise value
319 319 return int(s)
320 320 except ValueError:
321 321 raise error.ParseError(_(b"couldn't parse size: %s") % s)
322 322
323 323
324 324 def sizematcher(expr):
325 325 """Return a function(size) -> bool from the ``size()`` expression"""
326 326 expr = expr.strip()
327 327 if b'-' in expr: # do we have a range?
328 328 a, b = expr.split(b'-', 1)
329 329 a = util.sizetoint(a)
330 330 b = util.sizetoint(b)
331 331 return lambda x: x >= a and x <= b
332 332 elif expr.startswith(b"<="):
333 333 a = util.sizetoint(expr[2:])
334 334 return lambda x: x <= a
335 335 elif expr.startswith(b"<"):
336 336 a = util.sizetoint(expr[1:])
337 337 return lambda x: x < a
338 338 elif expr.startswith(b">="):
339 339 a = util.sizetoint(expr[2:])
340 340 return lambda x: x >= a
341 341 elif expr.startswith(b">"):
342 342 a = util.sizetoint(expr[1:])
343 343 return lambda x: x > a
344 344 else:
345 345 a = util.sizetoint(expr)
346 346 b = _sizetomax(expr)
347 347 return lambda x: x >= a and x <= b
348 348
349 349
350 350 @predicate(b'size(expression)', weight=_WEIGHT_STATUS)
351 351 def size(mctx, x):
352 352 """File size matches the given expression. Examples:
353 353
354 354 - size('1k') - files from 1024 to 2047 bytes
355 355 - size('< 20k') - files less than 20480 bytes
356 356 - size('>= .5MB') - files at least 524288 bytes
357 357 - size('4k - 1MB') - files from 4096 bytes to 1048576 bytes
358 358 """
359 359 # i18n: "size" is a keyword
360 360 expr = getstring(x, _(b"size requires an expression"))
361 361 m = sizematcher(expr)
362 362 return mctx.fpredicate(
363 363 lambda fctx: m(fctx.size()), predrepr=(b'size(%r)', expr), cache=True
364 364 )
365 365
366 366
367 367 @predicate(b'encoding(name)', weight=_WEIGHT_READ_CONTENTS)
368 368 def encoding(mctx, x):
369 369 """File can be successfully decoded with the given character
370 370 encoding. May not be useful for encodings other than ASCII and
371 371 UTF-8.
372 372 """
373 373
374 374 # i18n: "encoding" is a keyword
375 375 enc = getstring(x, _(b"encoding requires an encoding name"))
376 376
377 377 def encp(fctx):
378 378 d = fctx.data()
379 379 try:
380 380 d.decode(pycompat.sysstr(enc))
381 381 return True
382 382 except LookupError:
383 383 raise error.Abort(_(b"unknown encoding '%s'") % enc)
384 384 except UnicodeDecodeError:
385 385 return False
386 386
387 387 return mctx.fpredicate(encp, predrepr=(b'encoding(%r)', enc), cache=True)
388 388
389 389
390 390 @predicate(b'eol(style)', weight=_WEIGHT_READ_CONTENTS)
391 391 def eol(mctx, x):
392 392 """File contains newlines of the given style (dos, unix, mac). Binary
393 393 files are excluded, files with mixed line endings match multiple
394 394 styles.
395 395 """
396 396
397 397 # i18n: "eol" is a keyword
398 398 enc = getstring(x, _(b"eol requires a style name"))
399 399
400 400 def eolp(fctx):
401 401 if fctx.isbinary():
402 402 return False
403 403 d = fctx.data()
404 404 if (enc == b'dos' or enc == b'win') and b'\r\n' in d:
405 405 return True
406 406 elif enc == b'unix' and re.search(b'(?<!\r)\n', d):
407 407 return True
408 408 elif enc == b'mac' and re.search(b'\r(?!\n)', d):
409 409 return True
410 410 return False
411 411
412 412 return mctx.fpredicate(eolp, predrepr=(b'eol(%r)', enc), cache=True)
413 413
414 414
415 415 @predicate(b'copied()')
416 416 def copied(mctx, x):
417 417 """File that is recorded as being copied.
418 418 """
419 419 # i18n: "copied" is a keyword
420 420 getargs(x, 0, 0, _(b"copied takes no arguments"))
421 421
422 422 def copiedp(fctx):
423 423 p = fctx.parents()
424 424 return p and p[0].path() != fctx.path()
425 425
426 426 return mctx.fpredicate(copiedp, predrepr=b'copied', cache=True)
427 427
428 428
429 429 @predicate(b'revs(revs, pattern)', weight=_WEIGHT_STATUS)
430 430 def revs(mctx, x):
431 431 """Evaluate set in the specified revisions. If the revset match multiple
432 432 revs, this will return file matching pattern in any of the revision.
433 433 """
434 434 # i18n: "revs" is a keyword
435 435 r, x = getargs(x, 2, 2, _(b"revs takes two arguments"))
436 436 # i18n: "revs" is a keyword
437 437 revspec = getstring(r, _(b"first argument to revs must be a revision"))
438 438 repo = mctx.ctx.repo()
439 439 revs = scmutil.revrange(repo, [revspec])
440 440
441 441 matchers = []
442 442 for r in revs:
443 443 ctx = repo[r]
444 444 mc = mctx.switch(ctx.p1(), ctx)
445 445 matchers.append(getmatch(mc, x))
446 446 if not matchers:
447 447 return mctx.never()
448 448 if len(matchers) == 1:
449 449 return matchers[0]
450 450 return matchmod.unionmatcher(matchers)
451 451
452 452
453 453 @predicate(b'status(base, rev, pattern)', weight=_WEIGHT_STATUS)
454 454 def status(mctx, x):
455 455 """Evaluate predicate using status change between ``base`` and
456 456 ``rev``. Examples:
457 457
458 458 - ``status(3, 7, added())`` - matches files added from "3" to "7"
459 459 """
460 460 repo = mctx.ctx.repo()
461 461 # i18n: "status" is a keyword
462 462 b, r, x = getargs(x, 3, 3, _(b"status takes three arguments"))
463 463 # i18n: "status" is a keyword
464 464 baseerr = _(b"first argument to status must be a revision")
465 465 baserevspec = getstring(b, baseerr)
466 466 if not baserevspec:
467 467 raise error.ParseError(baseerr)
468 468 reverr = _(b"second argument to status must be a revision")
469 469 revspec = getstring(r, reverr)
470 470 if not revspec:
471 471 raise error.ParseError(reverr)
472 472 basectx, ctx = scmutil.revpair(repo, [baserevspec, revspec])
473 473 mc = mctx.switch(basectx, ctx)
474 474 return getmatch(mc, x)
475 475
476 476
477 477 @predicate(b'subrepo([pattern])')
478 478 def subrepo(mctx, x):
479 479 """Subrepositories whose paths match the given pattern.
480 480 """
481 481 # i18n: "subrepo" is a keyword
482 482 getargs(x, 0, 1, _(b"subrepo takes at most one argument"))
483 483 ctx = mctx.ctx
484 484 sstate = ctx.substate
485 485 if x:
486 486 pat = getpattern(
487 487 x,
488 488 matchmod.allpatternkinds,
489 489 # i18n: "subrepo" is a keyword
490 490 _(b"subrepo requires a pattern or no arguments"),
491 491 )
492 492 fast = not matchmod.patkind(pat)
493 493 if fast:
494 494
495 495 def m(s):
496 496 return s == pat
497 497
498 498 else:
499 499 m = matchmod.match(ctx.repo().root, b'', [pat], ctx=ctx)
500 500 return mctx.predicate(
501 501 lambda f: f in sstate and m(f), predrepr=(b'subrepo(%r)', pat)
502 502 )
503 503 else:
504 504 return mctx.predicate(sstate.__contains__, predrepr=b'subrepo')
505 505
506 506
507 507 methods = {
508 508 b'withstatus': getmatchwithstatus,
509 509 b'string': stringmatch,
510 510 b'symbol': stringmatch,
511 511 b'kindpat': kindpatmatch,
512 512 b'patterns': patternsmatch,
513 513 b'and': andmatch,
514 514 b'or': ormatch,
515 515 b'minus': minusmatch,
516 516 b'list': listmatch,
517 517 b'not': notmatch,
518 518 b'func': func,
519 519 }
520 520
521 521
522 522 class matchctx(object):
523 def __init__(self, basectx, ctx, badfn=None):
523 def __init__(self, basectx, ctx, cwd, badfn=None):
524 524 self._basectx = basectx
525 525 self.ctx = ctx
526 526 self._badfn = badfn
527 527 self._match = None
528 528 self._status = None
529 self.cwd = cwd
529 530
530 531 def narrowed(self, match):
531 532 """Create matchctx for a sub-tree narrowed by the given matcher"""
532 mctx = matchctx(self._basectx, self.ctx, self._badfn)
533 mctx = matchctx(self._basectx, self.ctx, self.cwd, self._badfn)
533 534 mctx._match = match
534 535 # leave wider status which we don't have to care
535 536 mctx._status = self._status
536 537 return mctx
537 538
538 539 def switch(self, basectx, ctx):
539 mctx = matchctx(basectx, ctx, self._badfn)
540 mctx = matchctx(basectx, ctx, self.cwd, self._badfn)
540 541 mctx._match = self._match
541 542 return mctx
542 543
543 544 def withstatus(self, keys):
544 545 """Create matchctx which has precomputed status specified by the keys"""
545 mctx = matchctx(self._basectx, self.ctx, self._badfn)
546 mctx = matchctx(self._basectx, self.ctx, self.cwd, self._badfn)
546 547 mctx._match = self._match
547 548 mctx._buildstatus(keys)
548 549 return mctx
549 550
550 551 def _buildstatus(self, keys):
551 552 self._status = self._basectx.status(
552 553 self.ctx,
553 554 self._match,
554 555 listignored=b'ignored' in keys,
555 556 listclean=b'clean' in keys,
556 557 listunknown=b'unknown' in keys,
557 558 )
558 559
559 560 def status(self):
560 561 return self._status
561 562
562 563 def matcher(self, patterns):
563 return self.ctx.match(patterns, badfn=self._badfn)
564 return self.ctx.match(patterns, badfn=self._badfn, cwd=self.cwd)
564 565
565 566 def predicate(self, predfn, predrepr=None, cache=False):
566 567 """Create a matcher to select files by predfn(filename)"""
567 568 if cache:
568 569 predfn = util.cachefunc(predfn)
569 570 return matchmod.predicatematcher(
570 571 predfn, predrepr=predrepr, badfn=self._badfn
571 572 )
572 573
573 574 def fpredicate(self, predfn, predrepr=None, cache=False):
574 575 """Create a matcher to select files by predfn(fctx) at the current
575 576 revision
576 577
577 578 Missing files are ignored.
578 579 """
579 580 ctx = self.ctx
580 581 if ctx.rev() is None:
581 582
582 583 def fctxpredfn(f):
583 584 try:
584 585 fctx = ctx[f]
585 586 except error.LookupError:
586 587 return False
587 588 try:
588 589 fctx.audit()
589 590 except error.Abort:
590 591 return False
591 592 try:
592 593 return predfn(fctx)
593 594 except (IOError, OSError) as e:
594 595 # open()-ing a directory fails with EACCES on Windows
595 596 if e.errno in (
596 597 errno.ENOENT,
597 598 errno.EACCES,
598 599 errno.ENOTDIR,
599 600 errno.EISDIR,
600 601 ):
601 602 return False
602 603 raise
603 604
604 605 else:
605 606
606 607 def fctxpredfn(f):
607 608 try:
608 609 fctx = ctx[f]
609 610 except error.LookupError:
610 611 return False
611 612 return predfn(fctx)
612 613
613 614 return self.predicate(fctxpredfn, predrepr=predrepr, cache=cache)
614 615
615 616 def never(self):
616 617 """Create a matcher to select nothing"""
617 618 return matchmod.never(badfn=self._badfn)
618 619
619 620
620 def match(ctx, expr, badfn=None):
621 def match(ctx, cwd, expr, badfn=None):
621 622 """Create a matcher for a single fileset expression"""
622 623 tree = filesetlang.parse(expr)
623 624 tree = filesetlang.analyze(tree)
624 625 tree = filesetlang.optimize(tree)
625 mctx = matchctx(ctx.p1(), ctx, badfn=badfn)
626 mctx = matchctx(ctx.p1(), ctx, cwd, badfn=badfn)
626 627 return getmatch(mctx, tree)
627 628
628 629
629 630 def loadpredicate(ui, extname, registrarobj):
630 631 """Load fileset predicates from specified registrarobj
631 632 """
632 633 for name, func in pycompat.iteritems(registrarobj._table):
633 634 symbols[name] = func
634 635
635 636
636 637 # tell hggettext to extract docstrings from these functions:
637 638 i18nfunctions = symbols.values()
@@ -1,1619 +1,1622 b''
1 1 # match.py - filename matching
2 2 #
3 3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import, print_function
9 9
10 10 import copy
11 11 import itertools
12 12 import os
13 13 import re
14 14
15 15 from .i18n import _
16 16 from .pycompat import open
17 17 from . import (
18 18 encoding,
19 19 error,
20 20 pathutil,
21 21 policy,
22 22 pycompat,
23 23 util,
24 24 )
25 25 from .utils import stringutil
26 26
27 27 rustmod = policy.importrust('filepatterns')
28 28
29 29 allpatternkinds = (
30 30 b're',
31 31 b'glob',
32 32 b'path',
33 33 b'relglob',
34 34 b'relpath',
35 35 b'relre',
36 36 b'rootglob',
37 37 b'listfile',
38 38 b'listfile0',
39 39 b'set',
40 40 b'include',
41 41 b'subinclude',
42 42 b'rootfilesin',
43 43 )
44 44 cwdrelativepatternkinds = (b'relpath', b'glob')
45 45
46 46 propertycache = util.propertycache
47 47
48 48
49 49 def _rematcher(regex):
50 50 '''compile the regexp with the best available regexp engine and return a
51 51 matcher function'''
52 52 m = util.re.compile(regex)
53 53 try:
54 54 # slightly faster, provided by facebook's re2 bindings
55 55 return m.test_match
56 56 except AttributeError:
57 57 return m.match
58 58
59 59
60 def _expandsets(kindpats, ctx=None, listsubrepos=False, badfn=None):
60 def _expandsets(cwd, kindpats, ctx=None, listsubrepos=False, badfn=None):
61 61 '''Returns the kindpats list with the 'set' patterns expanded to matchers'''
62 62 matchers = []
63 63 other = []
64 64
65 65 for kind, pat, source in kindpats:
66 66 if kind == b'set':
67 67 if ctx is None:
68 68 raise error.ProgrammingError(
69 69 b"fileset expression with no context"
70 70 )
71 matchers.append(ctx.matchfileset(pat, badfn=badfn))
71 matchers.append(ctx.matchfileset(cwd, pat, badfn=badfn))
72 72
73 73 if listsubrepos:
74 74 for subpath in ctx.substate:
75 sm = ctx.sub(subpath).matchfileset(pat, badfn=badfn)
75 sm = ctx.sub(subpath).matchfileset(cwd, pat, badfn=badfn)
76 76 pm = prefixdirmatcher(subpath, sm, badfn=badfn)
77 77 matchers.append(pm)
78 78
79 79 continue
80 80 other.append((kind, pat, source))
81 81 return matchers, other
82 82
83 83
84 84 def _expandsubinclude(kindpats, root):
85 85 '''Returns the list of subinclude matcher args and the kindpats without the
86 86 subincludes in it.'''
87 87 relmatchers = []
88 88 other = []
89 89
90 90 for kind, pat, source in kindpats:
91 91 if kind == b'subinclude':
92 92 sourceroot = pathutil.dirname(util.normpath(source))
93 93 pat = util.pconvert(pat)
94 94 path = pathutil.join(sourceroot, pat)
95 95
96 96 newroot = pathutil.dirname(path)
97 97 matcherargs = (newroot, b'', [], [b'include:%s' % path])
98 98
99 99 prefix = pathutil.canonpath(root, root, newroot)
100 100 if prefix:
101 101 prefix += b'/'
102 102 relmatchers.append((prefix, matcherargs))
103 103 else:
104 104 other.append((kind, pat, source))
105 105
106 106 return relmatchers, other
107 107
108 108
109 109 def _kindpatsalwaysmatch(kindpats):
110 110 """"Checks whether the kindspats match everything, as e.g.
111 111 'relpath:.' does.
112 112 """
113 113 for kind, pat, source in kindpats:
114 114 if pat != b'' or kind not in [b'relpath', b'glob']:
115 115 return False
116 116 return True
117 117
118 118
119 119 def _buildkindpatsmatcher(
120 matchercls, root, kindpats, ctx=None, listsubrepos=False, badfn=None
120 matchercls, root, cwd, kindpats, ctx=None, listsubrepos=False, badfn=None,
121 121 ):
122 122 matchers = []
123 123 fms, kindpats = _expandsets(
124 kindpats, ctx=ctx, listsubrepos=listsubrepos, badfn=badfn
124 cwd, kindpats, ctx=ctx, listsubrepos=listsubrepos, badfn=badfn,
125 125 )
126 126 if kindpats:
127 127 m = matchercls(root, kindpats, badfn=badfn)
128 128 matchers.append(m)
129 129 if fms:
130 130 matchers.extend(fms)
131 131 if not matchers:
132 132 return nevermatcher(badfn=badfn)
133 133 if len(matchers) == 1:
134 134 return matchers[0]
135 135 return unionmatcher(matchers)
136 136
137 137
138 138 def match(
139 139 root,
140 140 cwd,
141 141 patterns=None,
142 142 include=None,
143 143 exclude=None,
144 144 default=b'glob',
145 145 auditor=None,
146 146 ctx=None,
147 147 listsubrepos=False,
148 148 warn=None,
149 149 badfn=None,
150 150 icasefs=False,
151 151 ):
152 152 r"""build an object to match a set of file patterns
153 153
154 154 arguments:
155 155 root - the canonical root of the tree you're matching against
156 156 cwd - the current working directory, if relevant
157 157 patterns - patterns to find
158 158 include - patterns to include (unless they are excluded)
159 159 exclude - patterns to exclude (even if they are included)
160 160 default - if a pattern in patterns has no explicit type, assume this one
161 161 auditor - optional path auditor
162 162 ctx - optional changecontext
163 163 listsubrepos - if True, recurse into subrepositories
164 164 warn - optional function used for printing warnings
165 165 badfn - optional bad() callback for this matcher instead of the default
166 166 icasefs - make a matcher for wdir on case insensitive filesystems, which
167 167 normalizes the given patterns to the case in the filesystem
168 168
169 169 a pattern is one of:
170 170 'glob:<glob>' - a glob relative to cwd
171 171 're:<regexp>' - a regular expression
172 172 'path:<path>' - a path relative to repository root, which is matched
173 173 recursively
174 174 'rootfilesin:<path>' - a path relative to repository root, which is
175 175 matched non-recursively (will not match subdirectories)
176 176 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
177 177 'relpath:<path>' - a path relative to cwd
178 178 'relre:<regexp>' - a regexp that needn't match the start of a name
179 179 'set:<fileset>' - a fileset expression
180 180 'include:<path>' - a file of patterns to read and include
181 181 'subinclude:<path>' - a file of patterns to match against files under
182 182 the same directory
183 183 '<something>' - a pattern of the specified default type
184 184
185 185 >>> def _match(root, *args, **kwargs):
186 186 ... return match(util.localpath(root), *args, **kwargs)
187 187
188 188 Usually a patternmatcher is returned:
189 189 >>> _match(b'/foo', b'.', [b're:.*\.c$', b'path:foo/a', b'*.py'])
190 190 <patternmatcher patterns='.*\\.c$|foo/a(?:/|$)|[^/]*\\.py$'>
191 191
192 192 Combining 'patterns' with 'include' (resp. 'exclude') gives an
193 193 intersectionmatcher (resp. a differencematcher):
194 194 >>> type(_match(b'/foo', b'.', [b're:.*\.c$'], include=[b'path:lib']))
195 195 <class 'mercurial.match.intersectionmatcher'>
196 196 >>> type(_match(b'/foo', b'.', [b're:.*\.c$'], exclude=[b'path:build']))
197 197 <class 'mercurial.match.differencematcher'>
198 198
199 199 Notice that, if 'patterns' is empty, an alwaysmatcher is returned:
200 200 >>> _match(b'/foo', b'.', [])
201 201 <alwaysmatcher>
202 202
203 203 The 'default' argument determines which kind of pattern is assumed if a
204 204 pattern has no prefix:
205 205 >>> _match(b'/foo', b'.', [b'.*\.c$'], default=b're')
206 206 <patternmatcher patterns='.*\\.c$'>
207 207 >>> _match(b'/foo', b'.', [b'main.py'], default=b'relpath')
208 208 <patternmatcher patterns='main\\.py(?:/|$)'>
209 209 >>> _match(b'/foo', b'.', [b'main.py'], default=b're')
210 210 <patternmatcher patterns='main.py'>
211 211
212 212 The primary use of matchers is to check whether a value (usually a file
213 213 name) matches againset one of the patterns given at initialization. There
214 214 are two ways of doing this check.
215 215
216 216 >>> m = _match(b'/foo', b'', [b're:.*\.c$', b'relpath:a'])
217 217
218 218 1. Calling the matcher with a file name returns True if any pattern
219 219 matches that file name:
220 220 >>> m(b'a')
221 221 True
222 222 >>> m(b'main.c')
223 223 True
224 224 >>> m(b'test.py')
225 225 False
226 226
227 227 2. Using the exact() method only returns True if the file name matches one
228 228 of the exact patterns (i.e. not re: or glob: patterns):
229 229 >>> m.exact(b'a')
230 230 True
231 231 >>> m.exact(b'main.c')
232 232 False
233 233 """
234 234 assert os.path.isabs(root)
235 235 cwd = os.path.join(root, util.localpath(cwd))
236 236 normalize = _donormalize
237 237 if icasefs:
238 238 dirstate = ctx.repo().dirstate
239 239 dsnormalize = dirstate.normalize
240 240
241 241 def normalize(patterns, default, root, cwd, auditor, warn):
242 242 kp = _donormalize(patterns, default, root, cwd, auditor, warn)
243 243 kindpats = []
244 244 for kind, pats, source in kp:
245 245 if kind not in (b're', b'relre'): # regex can't be normalized
246 246 p = pats
247 247 pats = dsnormalize(pats)
248 248
249 249 # Preserve the original to handle a case only rename.
250 250 if p != pats and p in dirstate:
251 251 kindpats.append((kind, p, source))
252 252
253 253 kindpats.append((kind, pats, source))
254 254 return kindpats
255 255
256 256 if patterns:
257 257 kindpats = normalize(patterns, default, root, cwd, auditor, warn)
258 258 if _kindpatsalwaysmatch(kindpats):
259 259 m = alwaysmatcher(badfn)
260 260 else:
261 261 m = _buildkindpatsmatcher(
262 262 patternmatcher,
263 263 root,
264 cwd,
264 265 kindpats,
265 266 ctx=ctx,
266 267 listsubrepos=listsubrepos,
267 268 badfn=badfn,
268 269 )
269 270 else:
270 271 # It's a little strange that no patterns means to match everything.
271 272 # Consider changing this to match nothing (probably using nevermatcher).
272 273 m = alwaysmatcher(badfn)
273 274
274 275 if include:
275 276 kindpats = normalize(include, b'glob', root, cwd, auditor, warn)
276 277 im = _buildkindpatsmatcher(
277 278 includematcher,
278 279 root,
280 cwd,
279 281 kindpats,
280 282 ctx=ctx,
281 283 listsubrepos=listsubrepos,
282 284 badfn=None,
283 285 )
284 286 m = intersectmatchers(m, im)
285 287 if exclude:
286 288 kindpats = normalize(exclude, b'glob', root, cwd, auditor, warn)
287 289 em = _buildkindpatsmatcher(
288 290 includematcher,
289 291 root,
292 cwd,
290 293 kindpats,
291 294 ctx=ctx,
292 295 listsubrepos=listsubrepos,
293 296 badfn=None,
294 297 )
295 298 m = differencematcher(m, em)
296 299 return m
297 300
298 301
299 302 def exact(files, badfn=None):
300 303 return exactmatcher(files, badfn=badfn)
301 304
302 305
303 306 def always(badfn=None):
304 307 return alwaysmatcher(badfn)
305 308
306 309
307 310 def never(badfn=None):
308 311 return nevermatcher(badfn)
309 312
310 313
311 314 def badmatch(match, badfn):
312 315 """Make a copy of the given matcher, replacing its bad method with the given
313 316 one.
314 317 """
315 318 m = copy.copy(match)
316 319 m.bad = badfn
317 320 return m
318 321
319 322
320 323 def _donormalize(patterns, default, root, cwd, auditor=None, warn=None):
321 324 '''Convert 'kind:pat' from the patterns list to tuples with kind and
322 325 normalized and rooted patterns and with listfiles expanded.'''
323 326 kindpats = []
324 327 for kind, pat in [_patsplit(p, default) for p in patterns]:
325 328 if kind in cwdrelativepatternkinds:
326 329 pat = pathutil.canonpath(root, cwd, pat, auditor=auditor)
327 330 elif kind in (b'relglob', b'path', b'rootfilesin', b'rootglob'):
328 331 pat = util.normpath(pat)
329 332 elif kind in (b'listfile', b'listfile0'):
330 333 try:
331 334 files = util.readfile(pat)
332 335 if kind == b'listfile0':
333 336 files = files.split(b'\0')
334 337 else:
335 338 files = files.splitlines()
336 339 files = [f for f in files if f]
337 340 except EnvironmentError:
338 341 raise error.Abort(_(b"unable to read file list (%s)") % pat)
339 342 for k, p, source in _donormalize(
340 343 files, default, root, cwd, auditor, warn
341 344 ):
342 345 kindpats.append((k, p, pat))
343 346 continue
344 347 elif kind == b'include':
345 348 try:
346 349 fullpath = os.path.join(root, util.localpath(pat))
347 350 includepats = readpatternfile(fullpath, warn)
348 351 for k, p, source in _donormalize(
349 352 includepats, default, root, cwd, auditor, warn
350 353 ):
351 354 kindpats.append((k, p, source or pat))
352 355 except error.Abort as inst:
353 356 raise error.Abort(
354 357 b'%s: %s'
355 358 % (pat, inst[0]) # pytype: disable=unsupported-operands
356 359 )
357 360 except IOError as inst:
358 361 if warn:
359 362 warn(
360 363 _(b"skipping unreadable pattern file '%s': %s\n")
361 364 % (pat, stringutil.forcebytestr(inst.strerror))
362 365 )
363 366 continue
364 367 # else: re or relre - which cannot be normalized
365 368 kindpats.append((kind, pat, b''))
366 369 return kindpats
367 370
368 371
369 372 class basematcher(object):
370 373 def __init__(self, badfn=None):
371 374 if badfn is not None:
372 375 self.bad = badfn
373 376
374 377 def __call__(self, fn):
375 378 return self.matchfn(fn)
376 379
377 380 # Callbacks related to how the matcher is used by dirstate.walk.
378 381 # Subscribers to these events must monkeypatch the matcher object.
379 382 def bad(self, f, msg):
380 383 '''Callback from dirstate.walk for each explicit file that can't be
381 384 found/accessed, with an error message.'''
382 385
383 386 # If an traversedir is set, it will be called when a directory discovered
384 387 # by recursive traversal is visited.
385 388 traversedir = None
386 389
387 390 @propertycache
388 391 def _files(self):
389 392 return []
390 393
391 394 def files(self):
392 395 '''Explicitly listed files or patterns or roots:
393 396 if no patterns or .always(): empty list,
394 397 if exact: list exact files,
395 398 if not .anypats(): list all files and dirs,
396 399 else: optimal roots'''
397 400 return self._files
398 401
399 402 @propertycache
400 403 def _fileset(self):
401 404 return set(self._files)
402 405
403 406 def exact(self, f):
404 407 '''Returns True if f is in .files().'''
405 408 return f in self._fileset
406 409
407 410 def matchfn(self, f):
408 411 return False
409 412
410 413 def visitdir(self, dir):
411 414 '''Decides whether a directory should be visited based on whether it
412 415 has potential matches in it or one of its subdirectories. This is
413 416 based on the match's primary, included, and excluded patterns.
414 417
415 418 Returns the string 'all' if the given directory and all subdirectories
416 419 should be visited. Otherwise returns True or False indicating whether
417 420 the given directory should be visited.
418 421 '''
419 422 return True
420 423
421 424 def visitchildrenset(self, dir):
422 425 '''Decides whether a directory should be visited based on whether it
423 426 has potential matches in it or one of its subdirectories, and
424 427 potentially lists which subdirectories of that directory should be
425 428 visited. This is based on the match's primary, included, and excluded
426 429 patterns.
427 430
428 431 This function is very similar to 'visitdir', and the following mapping
429 432 can be applied:
430 433
431 434 visitdir | visitchildrenlist
432 435 ----------+-------------------
433 436 False | set()
434 437 'all' | 'all'
435 438 True | 'this' OR non-empty set of subdirs -or files- to visit
436 439
437 440 Example:
438 441 Assume matchers ['path:foo/bar', 'rootfilesin:qux'], we would return
439 442 the following values (assuming the implementation of visitchildrenset
440 443 is capable of recognizing this; some implementations are not).
441 444
442 445 '' -> {'foo', 'qux'}
443 446 'baz' -> set()
444 447 'foo' -> {'bar'}
445 448 # Ideally this would be 'all', but since the prefix nature of matchers
446 449 # is applied to the entire matcher, we have to downgrade this to
447 450 # 'this' due to the non-prefix 'rootfilesin'-kind matcher being mixed
448 451 # in.
449 452 'foo/bar' -> 'this'
450 453 'qux' -> 'this'
451 454
452 455 Important:
453 456 Most matchers do not know if they're representing files or
454 457 directories. They see ['path:dir/f'] and don't know whether 'f' is a
455 458 file or a directory, so visitchildrenset('dir') for most matchers will
456 459 return {'f'}, but if the matcher knows it's a file (like exactmatcher
457 460 does), it may return 'this'. Do not rely on the return being a set
458 461 indicating that there are no files in this dir to investigate (or
459 462 equivalently that if there are files to investigate in 'dir' that it
460 463 will always return 'this').
461 464 '''
462 465 return b'this'
463 466
464 467 def always(self):
465 468 '''Matcher will match everything and .files() will be empty --
466 469 optimization might be possible.'''
467 470 return False
468 471
469 472 def isexact(self):
470 473 '''Matcher will match exactly the list of files in .files() --
471 474 optimization might be possible.'''
472 475 return False
473 476
474 477 def prefix(self):
475 478 '''Matcher will match the paths in .files() recursively --
476 479 optimization might be possible.'''
477 480 return False
478 481
479 482 def anypats(self):
480 483 '''None of .always(), .isexact(), and .prefix() is true --
481 484 optimizations will be difficult.'''
482 485 return not self.always() and not self.isexact() and not self.prefix()
483 486
484 487
485 488 class alwaysmatcher(basematcher):
486 489 '''Matches everything.'''
487 490
488 491 def __init__(self, badfn=None):
489 492 super(alwaysmatcher, self).__init__(badfn)
490 493
491 494 def always(self):
492 495 return True
493 496
494 497 def matchfn(self, f):
495 498 return True
496 499
497 500 def visitdir(self, dir):
498 501 return b'all'
499 502
500 503 def visitchildrenset(self, dir):
501 504 return b'all'
502 505
503 506 def __repr__(self):
504 507 return r'<alwaysmatcher>'
505 508
506 509
507 510 class nevermatcher(basematcher):
508 511 '''Matches nothing.'''
509 512
510 513 def __init__(self, badfn=None):
511 514 super(nevermatcher, self).__init__(badfn)
512 515
513 516 # It's a little weird to say that the nevermatcher is an exact matcher
514 517 # or a prefix matcher, but it seems to make sense to let callers take
515 518 # fast paths based on either. There will be no exact matches, nor any
516 519 # prefixes (files() returns []), so fast paths iterating over them should
517 520 # be efficient (and correct).
518 521 def isexact(self):
519 522 return True
520 523
521 524 def prefix(self):
522 525 return True
523 526
524 527 def visitdir(self, dir):
525 528 return False
526 529
527 530 def visitchildrenset(self, dir):
528 531 return set()
529 532
530 533 def __repr__(self):
531 534 return r'<nevermatcher>'
532 535
533 536
534 537 class predicatematcher(basematcher):
535 538 """A matcher adapter for a simple boolean function"""
536 539
537 540 def __init__(self, predfn, predrepr=None, badfn=None):
538 541 super(predicatematcher, self).__init__(badfn)
539 542 self.matchfn = predfn
540 543 self._predrepr = predrepr
541 544
542 545 @encoding.strmethod
543 546 def __repr__(self):
544 547 s = stringutil.buildrepr(self._predrepr) or pycompat.byterepr(
545 548 self.matchfn
546 549 )
547 550 return b'<predicatenmatcher pred=%s>' % s
548 551
549 552
550 553 class patternmatcher(basematcher):
551 554 r"""Matches a set of (kind, pat, source) against a 'root' directory.
552 555
553 556 >>> kindpats = [
554 557 ... (b're', br'.*\.c$', b''),
555 558 ... (b'path', b'foo/a', b''),
556 559 ... (b'relpath', b'b', b''),
557 560 ... (b'glob', b'*.h', b''),
558 561 ... ]
559 562 >>> m = patternmatcher(b'foo', kindpats)
560 563 >>> m(b'main.c') # matches re:.*\.c$
561 564 True
562 565 >>> m(b'b.txt')
563 566 False
564 567 >>> m(b'foo/a') # matches path:foo/a
565 568 True
566 569 >>> m(b'a') # does not match path:b, since 'root' is 'foo'
567 570 False
568 571 >>> m(b'b') # matches relpath:b, since 'root' is 'foo'
569 572 True
570 573 >>> m(b'lib.h') # matches glob:*.h
571 574 True
572 575
573 576 >>> m.files()
574 577 ['', 'foo/a', 'b', '']
575 578 >>> m.exact(b'foo/a')
576 579 True
577 580 >>> m.exact(b'b')
578 581 True
579 582 >>> m.exact(b'lib.h') # exact matches are for (rel)path kinds
580 583 False
581 584 """
582 585
583 586 def __init__(self, root, kindpats, badfn=None):
584 587 super(patternmatcher, self).__init__(badfn)
585 588
586 589 self._files = _explicitfiles(kindpats)
587 590 self._prefix = _prefix(kindpats)
588 591 self._pats, self.matchfn = _buildmatch(kindpats, b'$', root)
589 592
590 593 @propertycache
591 594 def _dirs(self):
592 595 return set(pathutil.dirs(self._fileset))
593 596
594 597 def visitdir(self, dir):
595 598 if self._prefix and dir in self._fileset:
596 599 return b'all'
597 600 return (
598 601 dir in self._fileset
599 602 or dir in self._dirs
600 603 or any(
601 604 parentdir in self._fileset
602 605 for parentdir in pathutil.finddirs(dir)
603 606 )
604 607 )
605 608
606 609 def visitchildrenset(self, dir):
607 610 ret = self.visitdir(dir)
608 611 if ret is True:
609 612 return b'this'
610 613 elif not ret:
611 614 return set()
612 615 assert ret == b'all'
613 616 return b'all'
614 617
615 618 def prefix(self):
616 619 return self._prefix
617 620
618 621 @encoding.strmethod
619 622 def __repr__(self):
620 623 return b'<patternmatcher patterns=%r>' % pycompat.bytestr(self._pats)
621 624
622 625
623 626 # This is basically a reimplementation of pathutil.dirs that stores the
624 627 # children instead of just a count of them, plus a small optional optimization
625 628 # to avoid some directories we don't need.
626 629 class _dirchildren(object):
627 630 def __init__(self, paths, onlyinclude=None):
628 631 self._dirs = {}
629 632 self._onlyinclude = onlyinclude or []
630 633 addpath = self.addpath
631 634 for f in paths:
632 635 addpath(f)
633 636
634 637 def addpath(self, path):
635 638 if path == b'':
636 639 return
637 640 dirs = self._dirs
638 641 findsplitdirs = _dirchildren._findsplitdirs
639 642 for d, b in findsplitdirs(path):
640 643 if d not in self._onlyinclude:
641 644 continue
642 645 dirs.setdefault(d, set()).add(b)
643 646
644 647 @staticmethod
645 648 def _findsplitdirs(path):
646 649 # yields (dirname, basename) tuples, walking back to the root. This is
647 650 # very similar to pathutil.finddirs, except:
648 651 # - produces a (dirname, basename) tuple, not just 'dirname'
649 652 # Unlike manifest._splittopdir, this does not suffix `dirname` with a
650 653 # slash.
651 654 oldpos = len(path)
652 655 pos = path.rfind(b'/')
653 656 while pos != -1:
654 657 yield path[:pos], path[pos + 1 : oldpos]
655 658 oldpos = pos
656 659 pos = path.rfind(b'/', 0, pos)
657 660 yield b'', path[:oldpos]
658 661
659 662 def get(self, path):
660 663 return self._dirs.get(path, set())
661 664
662 665
663 666 class includematcher(basematcher):
664 667 def __init__(self, root, kindpats, badfn=None):
665 668 super(includematcher, self).__init__(badfn)
666 669
667 670 self._pats, self.matchfn = _buildmatch(kindpats, b'(?:/|$)', root)
668 671 self._prefix = _prefix(kindpats)
669 672 roots, dirs, parents = _rootsdirsandparents(kindpats)
670 673 # roots are directories which are recursively included.
671 674 self._roots = set(roots)
672 675 # dirs are directories which are non-recursively included.
673 676 self._dirs = set(dirs)
674 677 # parents are directories which are non-recursively included because
675 678 # they are needed to get to items in _dirs or _roots.
676 679 self._parents = parents
677 680
678 681 def visitdir(self, dir):
679 682 if self._prefix and dir in self._roots:
680 683 return b'all'
681 684 return (
682 685 dir in self._roots
683 686 or dir in self._dirs
684 687 or dir in self._parents
685 688 or any(
686 689 parentdir in self._roots for parentdir in pathutil.finddirs(dir)
687 690 )
688 691 )
689 692
690 693 @propertycache
691 694 def _allparentschildren(self):
692 695 # It may seem odd that we add dirs, roots, and parents, and then
693 696 # restrict to only parents. This is to catch the case of:
694 697 # dirs = ['foo/bar']
695 698 # parents = ['foo']
696 699 # if we asked for the children of 'foo', but had only added
697 700 # self._parents, we wouldn't be able to respond ['bar'].
698 701 return _dirchildren(
699 702 itertools.chain(self._dirs, self._roots, self._parents),
700 703 onlyinclude=self._parents,
701 704 )
702 705
703 706 def visitchildrenset(self, dir):
704 707 if self._prefix and dir in self._roots:
705 708 return b'all'
706 709 # Note: this does *not* include the 'dir in self._parents' case from
707 710 # visitdir, that's handled below.
708 711 if (
709 712 b'' in self._roots
710 713 or dir in self._roots
711 714 or dir in self._dirs
712 715 or any(
713 716 parentdir in self._roots for parentdir in pathutil.finddirs(dir)
714 717 )
715 718 ):
716 719 return b'this'
717 720
718 721 if dir in self._parents:
719 722 return self._allparentschildren.get(dir) or set()
720 723 return set()
721 724
722 725 @encoding.strmethod
723 726 def __repr__(self):
724 727 return b'<includematcher includes=%r>' % pycompat.bytestr(self._pats)
725 728
726 729
727 730 class exactmatcher(basematcher):
728 731 r'''Matches the input files exactly. They are interpreted as paths, not
729 732 patterns (so no kind-prefixes).
730 733
731 734 >>> m = exactmatcher([b'a.txt', br're:.*\.c$'])
732 735 >>> m(b'a.txt')
733 736 True
734 737 >>> m(b'b.txt')
735 738 False
736 739
737 740 Input files that would be matched are exactly those returned by .files()
738 741 >>> m.files()
739 742 ['a.txt', 're:.*\\.c$']
740 743
741 744 So pattern 're:.*\.c$' is not considered as a regex, but as a file name
742 745 >>> m(b'main.c')
743 746 False
744 747 >>> m(br're:.*\.c$')
745 748 True
746 749 '''
747 750
748 751 def __init__(self, files, badfn=None):
749 752 super(exactmatcher, self).__init__(badfn)
750 753
751 754 if isinstance(files, list):
752 755 self._files = files
753 756 else:
754 757 self._files = list(files)
755 758
756 759 matchfn = basematcher.exact
757 760
758 761 @propertycache
759 762 def _dirs(self):
760 763 return set(pathutil.dirs(self._fileset))
761 764
762 765 def visitdir(self, dir):
763 766 return dir in self._dirs
764 767
765 768 def visitchildrenset(self, dir):
766 769 if not self._fileset or dir not in self._dirs:
767 770 return set()
768 771
769 772 candidates = self._fileset | self._dirs - {b''}
770 773 if dir != b'':
771 774 d = dir + b'/'
772 775 candidates = set(c[len(d) :] for c in candidates if c.startswith(d))
773 776 # self._dirs includes all of the directories, recursively, so if
774 777 # we're attempting to match foo/bar/baz.txt, it'll have '', 'foo',
775 778 # 'foo/bar' in it. Thus we can safely ignore a candidate that has a
776 779 # '/' in it, indicating a it's for a subdir-of-a-subdir; the
777 780 # immediate subdir will be in there without a slash.
778 781 ret = {c for c in candidates if b'/' not in c}
779 782 # We really do not expect ret to be empty, since that would imply that
780 783 # there's something in _dirs that didn't have a file in _fileset.
781 784 assert ret
782 785 return ret
783 786
784 787 def isexact(self):
785 788 return True
786 789
787 790 @encoding.strmethod
788 791 def __repr__(self):
789 792 return b'<exactmatcher files=%r>' % self._files
790 793
791 794
792 795 class differencematcher(basematcher):
793 796 '''Composes two matchers by matching if the first matches and the second
794 797 does not.
795 798
796 799 The second matcher's non-matching-attributes (bad, traversedir) are ignored.
797 800 '''
798 801
799 802 def __init__(self, m1, m2):
800 803 super(differencematcher, self).__init__()
801 804 self._m1 = m1
802 805 self._m2 = m2
803 806 self.bad = m1.bad
804 807 self.traversedir = m1.traversedir
805 808
806 809 def matchfn(self, f):
807 810 return self._m1(f) and not self._m2(f)
808 811
809 812 @propertycache
810 813 def _files(self):
811 814 if self.isexact():
812 815 return [f for f in self._m1.files() if self(f)]
813 816 # If m1 is not an exact matcher, we can't easily figure out the set of
814 817 # files, because its files() are not always files. For example, if
815 818 # m1 is "path:dir" and m2 is "rootfileins:.", we don't
816 819 # want to remove "dir" from the set even though it would match m2,
817 820 # because the "dir" in m1 may not be a file.
818 821 return self._m1.files()
819 822
820 823 def visitdir(self, dir):
821 824 if self._m2.visitdir(dir) == b'all':
822 825 return False
823 826 elif not self._m2.visitdir(dir):
824 827 # m2 does not match dir, we can return 'all' here if possible
825 828 return self._m1.visitdir(dir)
826 829 return bool(self._m1.visitdir(dir))
827 830
828 831 def visitchildrenset(self, dir):
829 832 m2_set = self._m2.visitchildrenset(dir)
830 833 if m2_set == b'all':
831 834 return set()
832 835 m1_set = self._m1.visitchildrenset(dir)
833 836 # Possible values for m1: 'all', 'this', set(...), set()
834 837 # Possible values for m2: 'this', set(...), set()
835 838 # If m2 has nothing under here that we care about, return m1, even if
836 839 # it's 'all'. This is a change in behavior from visitdir, which would
837 840 # return True, not 'all', for some reason.
838 841 if not m2_set:
839 842 return m1_set
840 843 if m1_set in [b'all', b'this']:
841 844 # Never return 'all' here if m2_set is any kind of non-empty (either
842 845 # 'this' or set(foo)), since m2 might return set() for a
843 846 # subdirectory.
844 847 return b'this'
845 848 # Possible values for m1: set(...), set()
846 849 # Possible values for m2: 'this', set(...)
847 850 # We ignore m2's set results. They're possibly incorrect:
848 851 # m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset(''):
849 852 # m1 returns {'dir'}, m2 returns {'dir'}, if we subtracted we'd
850 853 # return set(), which is *not* correct, we still need to visit 'dir'!
851 854 return m1_set
852 855
853 856 def isexact(self):
854 857 return self._m1.isexact()
855 858
856 859 @encoding.strmethod
857 860 def __repr__(self):
858 861 return b'<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2)
859 862
860 863
861 864 def intersectmatchers(m1, m2):
862 865 '''Composes two matchers by matching if both of them match.
863 866
864 867 The second matcher's non-matching-attributes (bad, traversedir) are ignored.
865 868 '''
866 869 if m1 is None or m2 is None:
867 870 return m1 or m2
868 871 if m1.always():
869 872 m = copy.copy(m2)
870 873 # TODO: Consider encapsulating these things in a class so there's only
871 874 # one thing to copy from m1.
872 875 m.bad = m1.bad
873 876 m.traversedir = m1.traversedir
874 877 return m
875 878 if m2.always():
876 879 m = copy.copy(m1)
877 880 return m
878 881 return intersectionmatcher(m1, m2)
879 882
880 883
881 884 class intersectionmatcher(basematcher):
882 885 def __init__(self, m1, m2):
883 886 super(intersectionmatcher, self).__init__()
884 887 self._m1 = m1
885 888 self._m2 = m2
886 889 self.bad = m1.bad
887 890 self.traversedir = m1.traversedir
888 891
889 892 @propertycache
890 893 def _files(self):
891 894 if self.isexact():
892 895 m1, m2 = self._m1, self._m2
893 896 if not m1.isexact():
894 897 m1, m2 = m2, m1
895 898 return [f for f in m1.files() if m2(f)]
896 899 # It neither m1 nor m2 is an exact matcher, we can't easily intersect
897 900 # the set of files, because their files() are not always files. For
898 901 # example, if intersecting a matcher "-I glob:foo.txt" with matcher of
899 902 # "path:dir2", we don't want to remove "dir2" from the set.
900 903 return self._m1.files() + self._m2.files()
901 904
902 905 def matchfn(self, f):
903 906 return self._m1(f) and self._m2(f)
904 907
905 908 def visitdir(self, dir):
906 909 visit1 = self._m1.visitdir(dir)
907 910 if visit1 == b'all':
908 911 return self._m2.visitdir(dir)
909 912 # bool() because visit1=True + visit2='all' should not be 'all'
910 913 return bool(visit1 and self._m2.visitdir(dir))
911 914
912 915 def visitchildrenset(self, dir):
913 916 m1_set = self._m1.visitchildrenset(dir)
914 917 if not m1_set:
915 918 return set()
916 919 m2_set = self._m2.visitchildrenset(dir)
917 920 if not m2_set:
918 921 return set()
919 922
920 923 if m1_set == b'all':
921 924 return m2_set
922 925 elif m2_set == b'all':
923 926 return m1_set
924 927
925 928 if m1_set == b'this' or m2_set == b'this':
926 929 return b'this'
927 930
928 931 assert isinstance(m1_set, set) and isinstance(m2_set, set)
929 932 return m1_set.intersection(m2_set)
930 933
931 934 def always(self):
932 935 return self._m1.always() and self._m2.always()
933 936
934 937 def isexact(self):
935 938 return self._m1.isexact() or self._m2.isexact()
936 939
937 940 @encoding.strmethod
938 941 def __repr__(self):
939 942 return b'<intersectionmatcher m1=%r, m2=%r>' % (self._m1, self._m2)
940 943
941 944
942 945 class subdirmatcher(basematcher):
943 946 """Adapt a matcher to work on a subdirectory only.
944 947
945 948 The paths are remapped to remove/insert the path as needed:
946 949
947 950 >>> from . import pycompat
948 951 >>> m1 = match(util.localpath(b'/root'), b'', [b'a.txt', b'sub/b.txt'], auditor=lambda name: None)
949 952 >>> m2 = subdirmatcher(b'sub', m1)
950 953 >>> m2(b'a.txt')
951 954 False
952 955 >>> m2(b'b.txt')
953 956 True
954 957 >>> m2.matchfn(b'a.txt')
955 958 False
956 959 >>> m2.matchfn(b'b.txt')
957 960 True
958 961 >>> m2.files()
959 962 ['b.txt']
960 963 >>> m2.exact(b'b.txt')
961 964 True
962 965 >>> def bad(f, msg):
963 966 ... print(pycompat.sysstr(b"%s: %s" % (f, msg)))
964 967 >>> m1.bad = bad
965 968 >>> m2.bad(b'x.txt', b'No such file')
966 969 sub/x.txt: No such file
967 970 """
968 971
969 972 def __init__(self, path, matcher):
970 973 super(subdirmatcher, self).__init__()
971 974 self._path = path
972 975 self._matcher = matcher
973 976 self._always = matcher.always()
974 977
975 978 self._files = [
976 979 f[len(path) + 1 :]
977 980 for f in matcher._files
978 981 if f.startswith(path + b"/")
979 982 ]
980 983
981 984 # If the parent repo had a path to this subrepo and the matcher is
982 985 # a prefix matcher, this submatcher always matches.
983 986 if matcher.prefix():
984 987 self._always = any(f == path for f in matcher._files)
985 988
986 989 def bad(self, f, msg):
987 990 self._matcher.bad(self._path + b"/" + f, msg)
988 991
989 992 def matchfn(self, f):
990 993 # Some information is lost in the superclass's constructor, so we
991 994 # can not accurately create the matching function for the subdirectory
992 995 # from the inputs. Instead, we override matchfn() and visitdir() to
993 996 # call the original matcher with the subdirectory path prepended.
994 997 return self._matcher.matchfn(self._path + b"/" + f)
995 998
996 999 def visitdir(self, dir):
997 1000 if dir == b'':
998 1001 dir = self._path
999 1002 else:
1000 1003 dir = self._path + b"/" + dir
1001 1004 return self._matcher.visitdir(dir)
1002 1005
1003 1006 def visitchildrenset(self, dir):
1004 1007 if dir == b'':
1005 1008 dir = self._path
1006 1009 else:
1007 1010 dir = self._path + b"/" + dir
1008 1011 return self._matcher.visitchildrenset(dir)
1009 1012
1010 1013 def always(self):
1011 1014 return self._always
1012 1015
1013 1016 def prefix(self):
1014 1017 return self._matcher.prefix() and not self._always
1015 1018
1016 1019 @encoding.strmethod
1017 1020 def __repr__(self):
1018 1021 return b'<subdirmatcher path=%r, matcher=%r>' % (
1019 1022 self._path,
1020 1023 self._matcher,
1021 1024 )
1022 1025
1023 1026
1024 1027 class prefixdirmatcher(basematcher):
1025 1028 """Adapt a matcher to work on a parent directory.
1026 1029
1027 1030 The matcher's non-matching-attributes (bad, traversedir) are ignored.
1028 1031
1029 1032 The prefix path should usually be the relative path from the root of
1030 1033 this matcher to the root of the wrapped matcher.
1031 1034
1032 1035 >>> m1 = match(util.localpath(b'/root/d/e'), b'f', [b'../a.txt', b'b.txt'], auditor=lambda name: None)
1033 1036 >>> m2 = prefixdirmatcher(b'd/e', m1)
1034 1037 >>> m2(b'a.txt')
1035 1038 False
1036 1039 >>> m2(b'd/e/a.txt')
1037 1040 True
1038 1041 >>> m2(b'd/e/b.txt')
1039 1042 False
1040 1043 >>> m2.files()
1041 1044 ['d/e/a.txt', 'd/e/f/b.txt']
1042 1045 >>> m2.exact(b'd/e/a.txt')
1043 1046 True
1044 1047 >>> m2.visitdir(b'd')
1045 1048 True
1046 1049 >>> m2.visitdir(b'd/e')
1047 1050 True
1048 1051 >>> m2.visitdir(b'd/e/f')
1049 1052 True
1050 1053 >>> m2.visitdir(b'd/e/g')
1051 1054 False
1052 1055 >>> m2.visitdir(b'd/ef')
1053 1056 False
1054 1057 """
1055 1058
1056 1059 def __init__(self, path, matcher, badfn=None):
1057 1060 super(prefixdirmatcher, self).__init__(badfn)
1058 1061 if not path:
1059 1062 raise error.ProgrammingError(b'prefix path must not be empty')
1060 1063 self._path = path
1061 1064 self._pathprefix = path + b'/'
1062 1065 self._matcher = matcher
1063 1066
1064 1067 @propertycache
1065 1068 def _files(self):
1066 1069 return [self._pathprefix + f for f in self._matcher._files]
1067 1070
1068 1071 def matchfn(self, f):
1069 1072 if not f.startswith(self._pathprefix):
1070 1073 return False
1071 1074 return self._matcher.matchfn(f[len(self._pathprefix) :])
1072 1075
1073 1076 @propertycache
1074 1077 def _pathdirs(self):
1075 1078 return set(pathutil.finddirs(self._path))
1076 1079
1077 1080 def visitdir(self, dir):
1078 1081 if dir == self._path:
1079 1082 return self._matcher.visitdir(b'')
1080 1083 if dir.startswith(self._pathprefix):
1081 1084 return self._matcher.visitdir(dir[len(self._pathprefix) :])
1082 1085 return dir in self._pathdirs
1083 1086
1084 1087 def visitchildrenset(self, dir):
1085 1088 if dir == self._path:
1086 1089 return self._matcher.visitchildrenset(b'')
1087 1090 if dir.startswith(self._pathprefix):
1088 1091 return self._matcher.visitchildrenset(dir[len(self._pathprefix) :])
1089 1092 if dir in self._pathdirs:
1090 1093 return b'this'
1091 1094 return set()
1092 1095
1093 1096 def isexact(self):
1094 1097 return self._matcher.isexact()
1095 1098
1096 1099 def prefix(self):
1097 1100 return self._matcher.prefix()
1098 1101
1099 1102 @encoding.strmethod
1100 1103 def __repr__(self):
1101 1104 return b'<prefixdirmatcher path=%r, matcher=%r>' % (
1102 1105 pycompat.bytestr(self._path),
1103 1106 self._matcher,
1104 1107 )
1105 1108
1106 1109
1107 1110 class unionmatcher(basematcher):
1108 1111 """A matcher that is the union of several matchers.
1109 1112
1110 1113 The non-matching-attributes (bad, traversedir) are taken from the first
1111 1114 matcher.
1112 1115 """
1113 1116
1114 1117 def __init__(self, matchers):
1115 1118 m1 = matchers[0]
1116 1119 super(unionmatcher, self).__init__()
1117 1120 self.traversedir = m1.traversedir
1118 1121 self._matchers = matchers
1119 1122
1120 1123 def matchfn(self, f):
1121 1124 for match in self._matchers:
1122 1125 if match(f):
1123 1126 return True
1124 1127 return False
1125 1128
1126 1129 def visitdir(self, dir):
1127 1130 r = False
1128 1131 for m in self._matchers:
1129 1132 v = m.visitdir(dir)
1130 1133 if v == b'all':
1131 1134 return v
1132 1135 r |= v
1133 1136 return r
1134 1137
1135 1138 def visitchildrenset(self, dir):
1136 1139 r = set()
1137 1140 this = False
1138 1141 for m in self._matchers:
1139 1142 v = m.visitchildrenset(dir)
1140 1143 if not v:
1141 1144 continue
1142 1145 if v == b'all':
1143 1146 return v
1144 1147 if this or v == b'this':
1145 1148 this = True
1146 1149 # don't break, we might have an 'all' in here.
1147 1150 continue
1148 1151 assert isinstance(v, set)
1149 1152 r = r.union(v)
1150 1153 if this:
1151 1154 return b'this'
1152 1155 return r
1153 1156
1154 1157 @encoding.strmethod
1155 1158 def __repr__(self):
1156 1159 return b'<unionmatcher matchers=%r>' % self._matchers
1157 1160
1158 1161
1159 1162 def patkind(pattern, default=None):
1160 1163 r'''If pattern is 'kind:pat' with a known kind, return kind.
1161 1164
1162 1165 >>> patkind(br're:.*\.c$')
1163 1166 're'
1164 1167 >>> patkind(b'glob:*.c')
1165 1168 'glob'
1166 1169 >>> patkind(b'relpath:test.py')
1167 1170 'relpath'
1168 1171 >>> patkind(b'main.py')
1169 1172 >>> patkind(b'main.py', default=b're')
1170 1173 're'
1171 1174 '''
1172 1175 return _patsplit(pattern, default)[0]
1173 1176
1174 1177
1175 1178 def _patsplit(pattern, default):
1176 1179 """Split a string into the optional pattern kind prefix and the actual
1177 1180 pattern."""
1178 1181 if b':' in pattern:
1179 1182 kind, pat = pattern.split(b':', 1)
1180 1183 if kind in allpatternkinds:
1181 1184 return kind, pat
1182 1185 return default, pattern
1183 1186
1184 1187
1185 1188 def _globre(pat):
1186 1189 r'''Convert an extended glob string to a regexp string.
1187 1190
1188 1191 >>> from . import pycompat
1189 1192 >>> def bprint(s):
1190 1193 ... print(pycompat.sysstr(s))
1191 1194 >>> bprint(_globre(br'?'))
1192 1195 .
1193 1196 >>> bprint(_globre(br'*'))
1194 1197 [^/]*
1195 1198 >>> bprint(_globre(br'**'))
1196 1199 .*
1197 1200 >>> bprint(_globre(br'**/a'))
1198 1201 (?:.*/)?a
1199 1202 >>> bprint(_globre(br'a/**/b'))
1200 1203 a/(?:.*/)?b
1201 1204 >>> bprint(_globre(br'[a*?!^][^b][!c]'))
1202 1205 [a*?!^][\^b][^c]
1203 1206 >>> bprint(_globre(br'{a,b}'))
1204 1207 (?:a|b)
1205 1208 >>> bprint(_globre(br'.\*\?'))
1206 1209 \.\*\?
1207 1210 '''
1208 1211 i, n = 0, len(pat)
1209 1212 res = b''
1210 1213 group = 0
1211 1214 escape = util.stringutil.regexbytesescapemap.get
1212 1215
1213 1216 def peek():
1214 1217 return i < n and pat[i : i + 1]
1215 1218
1216 1219 while i < n:
1217 1220 c = pat[i : i + 1]
1218 1221 i += 1
1219 1222 if c not in b'*?[{},\\':
1220 1223 res += escape(c, c)
1221 1224 elif c == b'*':
1222 1225 if peek() == b'*':
1223 1226 i += 1
1224 1227 if peek() == b'/':
1225 1228 i += 1
1226 1229 res += b'(?:.*/)?'
1227 1230 else:
1228 1231 res += b'.*'
1229 1232 else:
1230 1233 res += b'[^/]*'
1231 1234 elif c == b'?':
1232 1235 res += b'.'
1233 1236 elif c == b'[':
1234 1237 j = i
1235 1238 if j < n and pat[j : j + 1] in b'!]':
1236 1239 j += 1
1237 1240 while j < n and pat[j : j + 1] != b']':
1238 1241 j += 1
1239 1242 if j >= n:
1240 1243 res += b'\\['
1241 1244 else:
1242 1245 stuff = pat[i:j].replace(b'\\', b'\\\\')
1243 1246 i = j + 1
1244 1247 if stuff[0:1] == b'!':
1245 1248 stuff = b'^' + stuff[1:]
1246 1249 elif stuff[0:1] == b'^':
1247 1250 stuff = b'\\' + stuff
1248 1251 res = b'%s[%s]' % (res, stuff)
1249 1252 elif c == b'{':
1250 1253 group += 1
1251 1254 res += b'(?:'
1252 1255 elif c == b'}' and group:
1253 1256 res += b')'
1254 1257 group -= 1
1255 1258 elif c == b',' and group:
1256 1259 res += b'|'
1257 1260 elif c == b'\\':
1258 1261 p = peek()
1259 1262 if p:
1260 1263 i += 1
1261 1264 res += escape(p, p)
1262 1265 else:
1263 1266 res += escape(c, c)
1264 1267 else:
1265 1268 res += escape(c, c)
1266 1269 return res
1267 1270
1268 1271
1269 1272 def _regex(kind, pat, globsuffix):
1270 1273 '''Convert a (normalized) pattern of any kind into a
1271 1274 regular expression.
1272 1275 globsuffix is appended to the regexp of globs.'''
1273 1276
1274 1277 if rustmod is not None:
1275 1278 try:
1276 1279 return rustmod.build_single_regex(kind, pat, globsuffix)
1277 1280 except rustmod.PatternError:
1278 1281 raise error.ProgrammingError(
1279 1282 b'not a regex pattern: %s:%s' % (kind, pat)
1280 1283 )
1281 1284
1282 1285 if not pat and kind in (b'glob', b'relpath'):
1283 1286 return b''
1284 1287 if kind == b're':
1285 1288 return pat
1286 1289 if kind in (b'path', b'relpath'):
1287 1290 if pat == b'.':
1288 1291 return b''
1289 1292 return util.stringutil.reescape(pat) + b'(?:/|$)'
1290 1293 if kind == b'rootfilesin':
1291 1294 if pat == b'.':
1292 1295 escaped = b''
1293 1296 else:
1294 1297 # Pattern is a directory name.
1295 1298 escaped = util.stringutil.reescape(pat) + b'/'
1296 1299 # Anything after the pattern must be a non-directory.
1297 1300 return escaped + b'[^/]+$'
1298 1301 if kind == b'relglob':
1299 1302 globre = _globre(pat)
1300 1303 if globre.startswith(b'[^/]*'):
1301 1304 # When pat has the form *XYZ (common), make the returned regex more
1302 1305 # legible by returning the regex for **XYZ instead of **/*XYZ.
1303 1306 return b'.*' + globre[len(b'[^/]*') :] + globsuffix
1304 1307 return b'(?:|.*/)' + globre + globsuffix
1305 1308 if kind == b'relre':
1306 1309 if pat.startswith(b'^'):
1307 1310 return pat
1308 1311 return b'.*' + pat
1309 1312 if kind in (b'glob', b'rootglob'):
1310 1313 return _globre(pat) + globsuffix
1311 1314 raise error.ProgrammingError(b'not a regex pattern: %s:%s' % (kind, pat))
1312 1315
1313 1316
1314 1317 def _buildmatch(kindpats, globsuffix, root):
1315 1318 '''Return regexp string and a matcher function for kindpats.
1316 1319 globsuffix is appended to the regexp of globs.'''
1317 1320 matchfuncs = []
1318 1321
1319 1322 subincludes, kindpats = _expandsubinclude(kindpats, root)
1320 1323 if subincludes:
1321 1324 submatchers = {}
1322 1325
1323 1326 def matchsubinclude(f):
1324 1327 for prefix, matcherargs in subincludes:
1325 1328 if f.startswith(prefix):
1326 1329 mf = submatchers.get(prefix)
1327 1330 if mf is None:
1328 1331 mf = match(*matcherargs)
1329 1332 submatchers[prefix] = mf
1330 1333
1331 1334 if mf(f[len(prefix) :]):
1332 1335 return True
1333 1336 return False
1334 1337
1335 1338 matchfuncs.append(matchsubinclude)
1336 1339
1337 1340 regex = b''
1338 1341 if kindpats:
1339 1342 if all(k == b'rootfilesin' for k, p, s in kindpats):
1340 1343 dirs = {p for k, p, s in kindpats}
1341 1344
1342 1345 def mf(f):
1343 1346 i = f.rfind(b'/')
1344 1347 if i >= 0:
1345 1348 dir = f[:i]
1346 1349 else:
1347 1350 dir = b'.'
1348 1351 return dir in dirs
1349 1352
1350 1353 regex = b'rootfilesin: %s' % stringutil.pprint(list(sorted(dirs)))
1351 1354 matchfuncs.append(mf)
1352 1355 else:
1353 1356 regex, mf = _buildregexmatch(kindpats, globsuffix)
1354 1357 matchfuncs.append(mf)
1355 1358
1356 1359 if len(matchfuncs) == 1:
1357 1360 return regex, matchfuncs[0]
1358 1361 else:
1359 1362 return regex, lambda f: any(mf(f) for mf in matchfuncs)
1360 1363
1361 1364
1362 1365 MAX_RE_SIZE = 20000
1363 1366
1364 1367
1365 1368 def _joinregexes(regexps):
1366 1369 """gather multiple regular expressions into a single one"""
1367 1370 return b'|'.join(regexps)
1368 1371
1369 1372
1370 1373 def _buildregexmatch(kindpats, globsuffix):
1371 1374 """Build a match function from a list of kinds and kindpats,
1372 1375 return regexp string and a matcher function.
1373 1376
1374 1377 Test too large input
1375 1378 >>> _buildregexmatch([
1376 1379 ... (b'relglob', b'?' * MAX_RE_SIZE, b'')
1377 1380 ... ], b'$')
1378 1381 Traceback (most recent call last):
1379 1382 ...
1380 1383 Abort: matcher pattern is too long (20009 bytes)
1381 1384 """
1382 1385 try:
1383 1386 allgroups = []
1384 1387 regexps = [_regex(k, p, globsuffix) for (k, p, s) in kindpats]
1385 1388 fullregexp = _joinregexes(regexps)
1386 1389
1387 1390 startidx = 0
1388 1391 groupsize = 0
1389 1392 for idx, r in enumerate(regexps):
1390 1393 piecesize = len(r)
1391 1394 if piecesize > MAX_RE_SIZE:
1392 1395 msg = _(b"matcher pattern is too long (%d bytes)") % piecesize
1393 1396 raise error.Abort(msg)
1394 1397 elif (groupsize + piecesize) > MAX_RE_SIZE:
1395 1398 group = regexps[startidx:idx]
1396 1399 allgroups.append(_joinregexes(group))
1397 1400 startidx = idx
1398 1401 groupsize = 0
1399 1402 groupsize += piecesize + 1
1400 1403
1401 1404 if startidx == 0:
1402 1405 matcher = _rematcher(fullregexp)
1403 1406 func = lambda s: bool(matcher(s))
1404 1407 else:
1405 1408 group = regexps[startidx:]
1406 1409 allgroups.append(_joinregexes(group))
1407 1410 allmatchers = [_rematcher(g) for g in allgroups]
1408 1411 func = lambda s: any(m(s) for m in allmatchers)
1409 1412 return fullregexp, func
1410 1413 except re.error:
1411 1414 for k, p, s in kindpats:
1412 1415 try:
1413 1416 _rematcher(_regex(k, p, globsuffix))
1414 1417 except re.error:
1415 1418 if s:
1416 1419 raise error.Abort(
1417 1420 _(b"%s: invalid pattern (%s): %s") % (s, k, p)
1418 1421 )
1419 1422 else:
1420 1423 raise error.Abort(_(b"invalid pattern (%s): %s") % (k, p))
1421 1424 raise error.Abort(_(b"invalid pattern"))
1422 1425
1423 1426
1424 1427 def _patternrootsanddirs(kindpats):
1425 1428 '''Returns roots and directories corresponding to each pattern.
1426 1429
1427 1430 This calculates the roots and directories exactly matching the patterns and
1428 1431 returns a tuple of (roots, dirs) for each. It does not return other
1429 1432 directories which may also need to be considered, like the parent
1430 1433 directories.
1431 1434 '''
1432 1435 r = []
1433 1436 d = []
1434 1437 for kind, pat, source in kindpats:
1435 1438 if kind in (b'glob', b'rootglob'): # find the non-glob prefix
1436 1439 root = []
1437 1440 for p in pat.split(b'/'):
1438 1441 if b'[' in p or b'{' in p or b'*' in p or b'?' in p:
1439 1442 break
1440 1443 root.append(p)
1441 1444 r.append(b'/'.join(root))
1442 1445 elif kind in (b'relpath', b'path'):
1443 1446 if pat == b'.':
1444 1447 pat = b''
1445 1448 r.append(pat)
1446 1449 elif kind in (b'rootfilesin',):
1447 1450 if pat == b'.':
1448 1451 pat = b''
1449 1452 d.append(pat)
1450 1453 else: # relglob, re, relre
1451 1454 r.append(b'')
1452 1455 return r, d
1453 1456
1454 1457
1455 1458 def _roots(kindpats):
1456 1459 '''Returns root directories to match recursively from the given patterns.'''
1457 1460 roots, dirs = _patternrootsanddirs(kindpats)
1458 1461 return roots
1459 1462
1460 1463
1461 1464 def _rootsdirsandparents(kindpats):
1462 1465 '''Returns roots and exact directories from patterns.
1463 1466
1464 1467 `roots` are directories to match recursively, `dirs` should
1465 1468 be matched non-recursively, and `parents` are the implicitly required
1466 1469 directories to walk to items in either roots or dirs.
1467 1470
1468 1471 Returns a tuple of (roots, dirs, parents).
1469 1472
1470 1473 >>> r = _rootsdirsandparents(
1471 1474 ... [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''),
1472 1475 ... (b'glob', b'g*', b'')])
1473 1476 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1474 1477 (['g/h', 'g/h', ''], []) ['', 'g']
1475 1478 >>> r = _rootsdirsandparents(
1476 1479 ... [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')])
1477 1480 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1478 1481 ([], ['g/h', '']) ['', 'g']
1479 1482 >>> r = _rootsdirsandparents(
1480 1483 ... [(b'relpath', b'r', b''), (b'path', b'p/p', b''),
1481 1484 ... (b'path', b'', b'')])
1482 1485 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1483 1486 (['r', 'p/p', ''], []) ['', 'p']
1484 1487 >>> r = _rootsdirsandparents(
1485 1488 ... [(b'relglob', b'rg*', b''), (b're', b're/', b''),
1486 1489 ... (b'relre', b'rr', b'')])
1487 1490 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1488 1491 (['', '', ''], []) ['']
1489 1492 '''
1490 1493 r, d = _patternrootsanddirs(kindpats)
1491 1494
1492 1495 p = set()
1493 1496 # Add the parents as non-recursive/exact directories, since they must be
1494 1497 # scanned to get to either the roots or the other exact directories.
1495 1498 p.update(pathutil.dirs(d))
1496 1499 p.update(pathutil.dirs(r))
1497 1500
1498 1501 # FIXME: all uses of this function convert these to sets, do so before
1499 1502 # returning.
1500 1503 # FIXME: all uses of this function do not need anything in 'roots' and
1501 1504 # 'dirs' to also be in 'parents', consider removing them before returning.
1502 1505 return r, d, p
1503 1506
1504 1507
1505 1508 def _explicitfiles(kindpats):
1506 1509 '''Returns the potential explicit filenames from the patterns.
1507 1510
1508 1511 >>> _explicitfiles([(b'path', b'foo/bar', b'')])
1509 1512 ['foo/bar']
1510 1513 >>> _explicitfiles([(b'rootfilesin', b'foo/bar', b'')])
1511 1514 []
1512 1515 '''
1513 1516 # Keep only the pattern kinds where one can specify filenames (vs only
1514 1517 # directory names).
1515 1518 filable = [kp for kp in kindpats if kp[0] not in (b'rootfilesin',)]
1516 1519 return _roots(filable)
1517 1520
1518 1521
1519 1522 def _prefix(kindpats):
1520 1523 '''Whether all the patterns match a prefix (i.e. recursively)'''
1521 1524 for kind, pat, source in kindpats:
1522 1525 if kind not in (b'path', b'relpath'):
1523 1526 return False
1524 1527 return True
1525 1528
1526 1529
1527 1530 _commentre = None
1528 1531
1529 1532
1530 1533 def readpatternfile(filepath, warn, sourceinfo=False):
1531 1534 '''parse a pattern file, returning a list of
1532 1535 patterns. These patterns should be given to compile()
1533 1536 to be validated and converted into a match function.
1534 1537
1535 1538 trailing white space is dropped.
1536 1539 the escape character is backslash.
1537 1540 comments start with #.
1538 1541 empty lines are skipped.
1539 1542
1540 1543 lines can be of the following formats:
1541 1544
1542 1545 syntax: regexp # defaults following lines to non-rooted regexps
1543 1546 syntax: glob # defaults following lines to non-rooted globs
1544 1547 re:pattern # non-rooted regular expression
1545 1548 glob:pattern # non-rooted glob
1546 1549 rootglob:pat # rooted glob (same root as ^ in regexps)
1547 1550 pattern # pattern of the current default type
1548 1551
1549 1552 if sourceinfo is set, returns a list of tuples:
1550 1553 (pattern, lineno, originalline).
1551 1554 This is useful to debug ignore patterns.
1552 1555 '''
1553 1556
1554 1557 if rustmod is not None:
1555 1558 result, warnings = rustmod.read_pattern_file(
1556 1559 filepath, bool(warn), sourceinfo,
1557 1560 )
1558 1561
1559 1562 for warning_params in warnings:
1560 1563 # Can't be easily emitted from Rust, because it would require
1561 1564 # a mechanism for both gettext and calling the `warn` function.
1562 1565 warn(_(b"%s: ignoring invalid syntax '%s'\n") % warning_params)
1563 1566
1564 1567 return result
1565 1568
1566 1569 syntaxes = {
1567 1570 b're': b'relre:',
1568 1571 b'regexp': b'relre:',
1569 1572 b'glob': b'relglob:',
1570 1573 b'rootglob': b'rootglob:',
1571 1574 b'include': b'include',
1572 1575 b'subinclude': b'subinclude',
1573 1576 }
1574 1577 syntax = b'relre:'
1575 1578 patterns = []
1576 1579
1577 1580 fp = open(filepath, b'rb')
1578 1581 for lineno, line in enumerate(util.iterfile(fp), start=1):
1579 1582 if b"#" in line:
1580 1583 global _commentre
1581 1584 if not _commentre:
1582 1585 _commentre = util.re.compile(br'((?:^|[^\\])(?:\\\\)*)#.*')
1583 1586 # remove comments prefixed by an even number of escapes
1584 1587 m = _commentre.search(line)
1585 1588 if m:
1586 1589 line = line[: m.end(1)]
1587 1590 # fixup properly escaped comments that survived the above
1588 1591 line = line.replace(b"\\#", b"#")
1589 1592 line = line.rstrip()
1590 1593 if not line:
1591 1594 continue
1592 1595
1593 1596 if line.startswith(b'syntax:'):
1594 1597 s = line[7:].strip()
1595 1598 try:
1596 1599 syntax = syntaxes[s]
1597 1600 except KeyError:
1598 1601 if warn:
1599 1602 warn(
1600 1603 _(b"%s: ignoring invalid syntax '%s'\n") % (filepath, s)
1601 1604 )
1602 1605 continue
1603 1606
1604 1607 linesyntax = syntax
1605 1608 for s, rels in pycompat.iteritems(syntaxes):
1606 1609 if line.startswith(rels):
1607 1610 linesyntax = rels
1608 1611 line = line[len(rels) :]
1609 1612 break
1610 1613 elif line.startswith(s + b':'):
1611 1614 linesyntax = rels
1612 1615 line = line[len(s) + 1 :]
1613 1616 break
1614 1617 if sourceinfo:
1615 1618 patterns.append((linesyntax + line, lineno, line))
1616 1619 else:
1617 1620 patterns.append(linesyntax + line)
1618 1621 fp.close()
1619 1622 return patterns
@@ -1,2052 +1,2052 b''
1 1 # subrepo.py - sub-repository classes and factory
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import stat
16 16 import subprocess
17 17 import sys
18 18 import tarfile
19 19 import xml.dom.minidom
20 20
21 21 from .i18n import _
22 22 from . import (
23 23 cmdutil,
24 24 encoding,
25 25 error,
26 26 exchange,
27 27 logcmdutil,
28 28 match as matchmod,
29 29 node,
30 30 pathutil,
31 31 phases,
32 32 pycompat,
33 33 scmutil,
34 34 subrepoutil,
35 35 util,
36 36 vfs as vfsmod,
37 37 )
38 38 from .utils import (
39 39 dateutil,
40 40 procutil,
41 41 stringutil,
42 42 )
43 43
44 44 hg = None
45 45 reporelpath = subrepoutil.reporelpath
46 46 subrelpath = subrepoutil.subrelpath
47 47 _abssource = subrepoutil._abssource
48 48 propertycache = util.propertycache
49 49
50 50
51 51 def _expandedabspath(path):
52 52 '''
53 53 get a path or url and if it is a path expand it and return an absolute path
54 54 '''
55 55 expandedpath = util.urllocalpath(util.expandpath(path))
56 56 u = util.url(expandedpath)
57 57 if not u.scheme:
58 58 path = util.normpath(os.path.abspath(u.path))
59 59 return path
60 60
61 61
62 62 def _getstorehashcachename(remotepath):
63 63 '''get a unique filename for the store hash cache of a remote repository'''
64 64 return node.hex(hashlib.sha1(_expandedabspath(remotepath)).digest())[0:12]
65 65
66 66
67 67 class SubrepoAbort(error.Abort):
68 68 """Exception class used to avoid handling a subrepo error more than once"""
69 69
70 70 def __init__(self, *args, **kw):
71 71 self.subrepo = kw.pop('subrepo', None)
72 72 self.cause = kw.pop('cause', None)
73 73 error.Abort.__init__(self, *args, **kw)
74 74
75 75
76 76 def annotatesubrepoerror(func):
77 77 def decoratedmethod(self, *args, **kargs):
78 78 try:
79 79 res = func(self, *args, **kargs)
80 80 except SubrepoAbort as ex:
81 81 # This exception has already been handled
82 82 raise ex
83 83 except error.Abort as ex:
84 84 subrepo = subrelpath(self)
85 85 errormsg = (
86 86 stringutil.forcebytestr(ex)
87 87 + b' '
88 88 + _(b'(in subrepository "%s")') % subrepo
89 89 )
90 90 # avoid handling this exception by raising a SubrepoAbort exception
91 91 raise SubrepoAbort(
92 92 errormsg, hint=ex.hint, subrepo=subrepo, cause=sys.exc_info()
93 93 )
94 94 return res
95 95
96 96 return decoratedmethod
97 97
98 98
99 99 def _updateprompt(ui, sub, dirty, local, remote):
100 100 if dirty:
101 101 msg = _(
102 102 b' subrepository sources for %s differ\n'
103 103 b'you can use (l)ocal source (%s) or (r)emote source (%s).\n'
104 104 b'what do you want to do?'
105 105 b'$$ &Local $$ &Remote'
106 106 ) % (subrelpath(sub), local, remote)
107 107 else:
108 108 msg = _(
109 109 b' subrepository sources for %s differ (in checked out '
110 110 b'version)\n'
111 111 b'you can use (l)ocal source (%s) or (r)emote source (%s).\n'
112 112 b'what do you want to do?'
113 113 b'$$ &Local $$ &Remote'
114 114 ) % (subrelpath(sub), local, remote)
115 115 return ui.promptchoice(msg, 0)
116 116
117 117
118 118 def _sanitize(ui, vfs, ignore):
119 119 for dirname, dirs, names in vfs.walk():
120 120 for i, d in enumerate(dirs):
121 121 if d.lower() == ignore:
122 122 del dirs[i]
123 123 break
124 124 if vfs.basename(dirname).lower() != b'.hg':
125 125 continue
126 126 for f in names:
127 127 if f.lower() == b'hgrc':
128 128 ui.warn(
129 129 _(
130 130 b"warning: removing potentially hostile 'hgrc' "
131 131 b"in '%s'\n"
132 132 )
133 133 % vfs.join(dirname)
134 134 )
135 135 vfs.unlink(vfs.reljoin(dirname, f))
136 136
137 137
138 138 def _auditsubrepopath(repo, path):
139 139 # sanity check for potentially unsafe paths such as '~' and '$FOO'
140 140 if path.startswith(b'~') or b'$' in path or util.expandpath(path) != path:
141 141 raise error.Abort(
142 142 _(b'subrepo path contains illegal component: %s') % path
143 143 )
144 144 # auditor doesn't check if the path itself is a symlink
145 145 pathutil.pathauditor(repo.root)(path)
146 146 if repo.wvfs.islink(path):
147 147 raise error.Abort(_(b"subrepo '%s' traverses symbolic link") % path)
148 148
149 149
150 150 SUBREPO_ALLOWED_DEFAULTS = {
151 151 b'hg': True,
152 152 b'git': False,
153 153 b'svn': False,
154 154 }
155 155
156 156
157 157 def _checktype(ui, kind):
158 158 # subrepos.allowed is a master kill switch. If disabled, subrepos are
159 159 # disabled period.
160 160 if not ui.configbool(b'subrepos', b'allowed', True):
161 161 raise error.Abort(
162 162 _(b'subrepos not enabled'),
163 163 hint=_(b"see 'hg help config.subrepos' for details"),
164 164 )
165 165
166 166 default = SUBREPO_ALLOWED_DEFAULTS.get(kind, False)
167 167 if not ui.configbool(b'subrepos', b'%s:allowed' % kind, default):
168 168 raise error.Abort(
169 169 _(b'%s subrepos not allowed') % kind,
170 170 hint=_(b"see 'hg help config.subrepos' for details"),
171 171 )
172 172
173 173 if kind not in types:
174 174 raise error.Abort(_(b'unknown subrepo type %s') % kind)
175 175
176 176
177 177 def subrepo(ctx, path, allowwdir=False, allowcreate=True):
178 178 """return instance of the right subrepo class for subrepo in path"""
179 179 # subrepo inherently violates our import layering rules
180 180 # because it wants to make repo objects from deep inside the stack
181 181 # so we manually delay the circular imports to not break
182 182 # scripts that don't use our demand-loading
183 183 global hg
184 184 from . import hg as h
185 185
186 186 hg = h
187 187
188 188 repo = ctx.repo()
189 189 _auditsubrepopath(repo, path)
190 190 state = ctx.substate[path]
191 191 _checktype(repo.ui, state[2])
192 192 if allowwdir:
193 193 state = (state[0], ctx.subrev(path), state[2])
194 194 return types[state[2]](ctx, path, state[:2], allowcreate)
195 195
196 196
197 197 def nullsubrepo(ctx, path, pctx):
198 198 """return an empty subrepo in pctx for the extant subrepo in ctx"""
199 199 # subrepo inherently violates our import layering rules
200 200 # because it wants to make repo objects from deep inside the stack
201 201 # so we manually delay the circular imports to not break
202 202 # scripts that don't use our demand-loading
203 203 global hg
204 204 from . import hg as h
205 205
206 206 hg = h
207 207
208 208 repo = ctx.repo()
209 209 _auditsubrepopath(repo, path)
210 210 state = ctx.substate[path]
211 211 _checktype(repo.ui, state[2])
212 212 subrev = b''
213 213 if state[2] == b'hg':
214 214 subrev = b"0" * 40
215 215 return types[state[2]](pctx, path, (state[0], subrev), True)
216 216
217 217
218 218 # subrepo classes need to implement the following abstract class:
219 219
220 220
221 221 class abstractsubrepo(object):
222 222 def __init__(self, ctx, path):
223 223 """Initialize abstractsubrepo part
224 224
225 225 ``ctx`` is the context referring this subrepository in the
226 226 parent repository.
227 227
228 228 ``path`` is the path to this subrepository as seen from
229 229 innermost repository.
230 230 """
231 231 self.ui = ctx.repo().ui
232 232 self._ctx = ctx
233 233 self._path = path
234 234
235 235 def addwebdirpath(self, serverpath, webconf):
236 236 """Add the hgwebdir entries for this subrepo, and any of its subrepos.
237 237
238 238 ``serverpath`` is the path component of the URL for this repo.
239 239
240 240 ``webconf`` is the dictionary of hgwebdir entries.
241 241 """
242 242 pass
243 243
244 244 def storeclean(self, path):
245 245 """
246 246 returns true if the repository has not changed since it was last
247 247 cloned from or pushed to a given repository.
248 248 """
249 249 return False
250 250
251 251 def dirty(self, ignoreupdate=False, missing=False):
252 252 """returns true if the dirstate of the subrepo is dirty or does not
253 253 match current stored state. If ignoreupdate is true, only check
254 254 whether the subrepo has uncommitted changes in its dirstate. If missing
255 255 is true, check for deleted files.
256 256 """
257 257 raise NotImplementedError
258 258
259 259 def dirtyreason(self, ignoreupdate=False, missing=False):
260 260 """return reason string if it is ``dirty()``
261 261
262 262 Returned string should have enough information for the message
263 263 of exception.
264 264
265 265 This returns None, otherwise.
266 266 """
267 267 if self.dirty(ignoreupdate=ignoreupdate, missing=missing):
268 268 return _(b'uncommitted changes in subrepository "%s"') % subrelpath(
269 269 self
270 270 )
271 271
272 272 def bailifchanged(self, ignoreupdate=False, hint=None):
273 273 """raise Abort if subrepository is ``dirty()``
274 274 """
275 275 dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate, missing=True)
276 276 if dirtyreason:
277 277 raise error.Abort(dirtyreason, hint=hint)
278 278
279 279 def basestate(self):
280 280 """current working directory base state, disregarding .hgsubstate
281 281 state and working directory modifications"""
282 282 raise NotImplementedError
283 283
284 284 def checknested(self, path):
285 285 """check if path is a subrepository within this repository"""
286 286 return False
287 287
288 288 def commit(self, text, user, date):
289 289 """commit the current changes to the subrepo with the given
290 290 log message. Use given user and date if possible. Return the
291 291 new state of the subrepo.
292 292 """
293 293 raise NotImplementedError
294 294
295 295 def phase(self, state):
296 296 """returns phase of specified state in the subrepository.
297 297 """
298 298 return phases.public
299 299
300 300 def remove(self):
301 301 """remove the subrepo
302 302
303 303 (should verify the dirstate is not dirty first)
304 304 """
305 305 raise NotImplementedError
306 306
307 307 def get(self, state, overwrite=False):
308 308 """run whatever commands are needed to put the subrepo into
309 309 this state
310 310 """
311 311 raise NotImplementedError
312 312
313 313 def merge(self, state):
314 314 """merge currently-saved state with the new state."""
315 315 raise NotImplementedError
316 316
317 317 def push(self, opts):
318 318 """perform whatever action is analogous to 'hg push'
319 319
320 320 This may be a no-op on some systems.
321 321 """
322 322 raise NotImplementedError
323 323
324 324 def add(self, ui, match, prefix, uipathfn, explicitonly, **opts):
325 325 return []
326 326
327 327 def addremove(self, matcher, prefix, uipathfn, opts):
328 328 self.ui.warn(b"%s: %s" % (prefix, _(b"addremove is not supported")))
329 329 return 1
330 330
331 331 def cat(self, match, fm, fntemplate, prefix, **opts):
332 332 return 1
333 333
334 334 def status(self, rev2, **opts):
335 335 return scmutil.status([], [], [], [], [], [], [])
336 336
337 337 def diff(self, ui, diffopts, node2, match, prefix, **opts):
338 338 pass
339 339
340 340 def outgoing(self, ui, dest, opts):
341 341 return 1
342 342
343 343 def incoming(self, ui, source, opts):
344 344 return 1
345 345
346 346 def files(self):
347 347 """return filename iterator"""
348 348 raise NotImplementedError
349 349
350 350 def filedata(self, name, decode):
351 351 """return file data, optionally passed through repo decoders"""
352 352 raise NotImplementedError
353 353
354 354 def fileflags(self, name):
355 355 """return file flags"""
356 356 return b''
357 357
358 def matchfileset(self, expr, badfn=None):
358 def matchfileset(self, cwd, expr, badfn=None):
359 359 """Resolve the fileset expression for this repo"""
360 360 return matchmod.never(badfn=badfn)
361 361
362 362 def printfiles(self, ui, m, uipathfn, fm, fmt, subrepos):
363 363 """handle the files command for this subrepo"""
364 364 return 1
365 365
366 366 def archive(self, archiver, prefix, match=None, decode=True):
367 367 if match is not None:
368 368 files = [f for f in self.files() if match(f)]
369 369 else:
370 370 files = self.files()
371 371 total = len(files)
372 372 relpath = subrelpath(self)
373 373 progress = self.ui.makeprogress(
374 374 _(b'archiving (%s)') % relpath, unit=_(b'files'), total=total
375 375 )
376 376 progress.update(0)
377 377 for name in files:
378 378 flags = self.fileflags(name)
379 379 mode = b'x' in flags and 0o755 or 0o644
380 380 symlink = b'l' in flags
381 381 archiver.addfile(
382 382 prefix + name, mode, symlink, self.filedata(name, decode)
383 383 )
384 384 progress.increment()
385 385 progress.complete()
386 386 return total
387 387
388 388 def walk(self, match):
389 389 '''
390 390 walk recursively through the directory tree, finding all files
391 391 matched by the match function
392 392 '''
393 393
394 394 def forget(self, match, prefix, uipathfn, dryrun, interactive):
395 395 return ([], [])
396 396
397 397 def removefiles(
398 398 self,
399 399 matcher,
400 400 prefix,
401 401 uipathfn,
402 402 after,
403 403 force,
404 404 subrepos,
405 405 dryrun,
406 406 warnings,
407 407 ):
408 408 """remove the matched files from the subrepository and the filesystem,
409 409 possibly by force and/or after the file has been removed from the
410 410 filesystem. Return 0 on success, 1 on any warning.
411 411 """
412 412 warnings.append(
413 413 _(b"warning: removefiles not implemented (%s)") % self._path
414 414 )
415 415 return 1
416 416
417 417 def revert(self, substate, *pats, **opts):
418 418 self.ui.warn(
419 419 _(b'%s: reverting %s subrepos is unsupported\n')
420 420 % (substate[0], substate[2])
421 421 )
422 422 return []
423 423
424 424 def shortid(self, revid):
425 425 return revid
426 426
427 427 def unshare(self):
428 428 '''
429 429 convert this repository from shared to normal storage.
430 430 '''
431 431
432 432 def verify(self, onpush=False):
433 433 """verify the revision of this repository that is held in `_state` is
434 434 present and not hidden. Return 0 on success or warning, 1 on any
435 435 error. In the case of ``onpush``, warnings or errors will raise an
436 436 exception if the result of pushing would be a broken remote repository.
437 437 """
438 438 return 0
439 439
440 440 @propertycache
441 441 def wvfs(self):
442 442 """return vfs to access the working directory of this subrepository
443 443 """
444 444 return vfsmod.vfs(self._ctx.repo().wvfs.join(self._path))
445 445
446 446 @propertycache
447 447 def _relpath(self):
448 448 """return path to this subrepository as seen from outermost repository
449 449 """
450 450 return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path)
451 451
452 452
453 453 class hgsubrepo(abstractsubrepo):
454 454 def __init__(self, ctx, path, state, allowcreate):
455 455 super(hgsubrepo, self).__init__(ctx, path)
456 456 self._state = state
457 457 r = ctx.repo()
458 458 root = r.wjoin(util.localpath(path))
459 459 create = allowcreate and not r.wvfs.exists(b'%s/.hg' % path)
460 460 # repository constructor does expand variables in path, which is
461 461 # unsafe since subrepo path might come from untrusted source.
462 462 if os.path.realpath(util.expandpath(root)) != root:
463 463 raise error.Abort(
464 464 _(b'subrepo path contains illegal component: %s') % path
465 465 )
466 466 self._repo = hg.repository(r.baseui, root, create=create)
467 467 if self._repo.root != root:
468 468 raise error.ProgrammingError(
469 469 b'failed to reject unsafe subrepo '
470 470 b'path: %s (expanded to %s)' % (root, self._repo.root)
471 471 )
472 472
473 473 # Propagate the parent's --hidden option
474 474 if r is r.unfiltered():
475 475 self._repo = self._repo.unfiltered()
476 476
477 477 self.ui = self._repo.ui
478 478 for s, k in [(b'ui', b'commitsubrepos')]:
479 479 v = r.ui.config(s, k)
480 480 if v:
481 481 self.ui.setconfig(s, k, v, b'subrepo')
482 482 # internal config: ui._usedassubrepo
483 483 self.ui.setconfig(b'ui', b'_usedassubrepo', b'True', b'subrepo')
484 484 self._initrepo(r, state[0], create)
485 485
486 486 @annotatesubrepoerror
487 487 def addwebdirpath(self, serverpath, webconf):
488 488 cmdutil.addwebdirpath(self._repo, subrelpath(self), webconf)
489 489
490 490 def storeclean(self, path):
491 491 with self._repo.lock():
492 492 return self._storeclean(path)
493 493
494 494 def _storeclean(self, path):
495 495 clean = True
496 496 itercache = self._calcstorehash(path)
497 497 for filehash in self._readstorehashcache(path):
498 498 if filehash != next(itercache, None):
499 499 clean = False
500 500 break
501 501 if clean:
502 502 # if not empty:
503 503 # the cached and current pull states have a different size
504 504 clean = next(itercache, None) is None
505 505 return clean
506 506
507 507 def _calcstorehash(self, remotepath):
508 508 '''calculate a unique "store hash"
509 509
510 510 This method is used to to detect when there are changes that may
511 511 require a push to a given remote path.'''
512 512 # sort the files that will be hashed in increasing (likely) file size
513 513 filelist = (b'bookmarks', b'store/phaseroots', b'store/00changelog.i')
514 514 yield b'# %s\n' % _expandedabspath(remotepath)
515 515 vfs = self._repo.vfs
516 516 for relname in filelist:
517 517 filehash = node.hex(hashlib.sha1(vfs.tryread(relname)).digest())
518 518 yield b'%s = %s\n' % (relname, filehash)
519 519
520 520 @propertycache
521 521 def _cachestorehashvfs(self):
522 522 return vfsmod.vfs(self._repo.vfs.join(b'cache/storehash'))
523 523
524 524 def _readstorehashcache(self, remotepath):
525 525 '''read the store hash cache for a given remote repository'''
526 526 cachefile = _getstorehashcachename(remotepath)
527 527 return self._cachestorehashvfs.tryreadlines(cachefile, b'r')
528 528
529 529 def _cachestorehash(self, remotepath):
530 530 '''cache the current store hash
531 531
532 532 Each remote repo requires its own store hash cache, because a subrepo
533 533 store may be "clean" versus a given remote repo, but not versus another
534 534 '''
535 535 cachefile = _getstorehashcachename(remotepath)
536 536 with self._repo.lock():
537 537 storehash = list(self._calcstorehash(remotepath))
538 538 vfs = self._cachestorehashvfs
539 539 vfs.writelines(cachefile, storehash, mode=b'wb', notindexed=True)
540 540
541 541 def _getctx(self):
542 542 '''fetch the context for this subrepo revision, possibly a workingctx
543 543 '''
544 544 if self._ctx.rev() is None:
545 545 return self._repo[None] # workingctx if parent is workingctx
546 546 else:
547 547 rev = self._state[1]
548 548 return self._repo[rev]
549 549
550 550 @annotatesubrepoerror
551 551 def _initrepo(self, parentrepo, source, create):
552 552 self._repo._subparent = parentrepo
553 553 self._repo._subsource = source
554 554
555 555 if create:
556 556 lines = [b'[paths]\n']
557 557
558 558 def addpathconfig(key, value):
559 559 if value:
560 560 lines.append(b'%s = %s\n' % (key, value))
561 561 self.ui.setconfig(b'paths', key, value, b'subrepo')
562 562
563 563 defpath = _abssource(self._repo, abort=False)
564 564 defpushpath = _abssource(self._repo, True, abort=False)
565 565 addpathconfig(b'default', defpath)
566 566 if defpath != defpushpath:
567 567 addpathconfig(b'default-push', defpushpath)
568 568
569 569 self._repo.vfs.write(b'hgrc', util.tonativeeol(b''.join(lines)))
570 570
571 571 @annotatesubrepoerror
572 572 def add(self, ui, match, prefix, uipathfn, explicitonly, **opts):
573 573 return cmdutil.add(
574 574 ui, self._repo, match, prefix, uipathfn, explicitonly, **opts
575 575 )
576 576
577 577 @annotatesubrepoerror
578 578 def addremove(self, m, prefix, uipathfn, opts):
579 579 # In the same way as sub directories are processed, once in a subrepo,
580 580 # always entry any of its subrepos. Don't corrupt the options that will
581 581 # be used to process sibling subrepos however.
582 582 opts = copy.copy(opts)
583 583 opts[b'subrepos'] = True
584 584 return scmutil.addremove(self._repo, m, prefix, uipathfn, opts)
585 585
586 586 @annotatesubrepoerror
587 587 def cat(self, match, fm, fntemplate, prefix, **opts):
588 588 rev = self._state[1]
589 589 ctx = self._repo[rev]
590 590 return cmdutil.cat(
591 591 self.ui, self._repo, ctx, match, fm, fntemplate, prefix, **opts
592 592 )
593 593
594 594 @annotatesubrepoerror
595 595 def status(self, rev2, **opts):
596 596 try:
597 597 rev1 = self._state[1]
598 598 ctx1 = self._repo[rev1]
599 599 ctx2 = self._repo[rev2]
600 600 return self._repo.status(ctx1, ctx2, **opts)
601 601 except error.RepoLookupError as inst:
602 602 self.ui.warn(
603 603 _(b'warning: error "%s" in subrepository "%s"\n')
604 604 % (inst, subrelpath(self))
605 605 )
606 606 return scmutil.status([], [], [], [], [], [], [])
607 607
608 608 @annotatesubrepoerror
609 609 def diff(self, ui, diffopts, node2, match, prefix, **opts):
610 610 try:
611 611 node1 = node.bin(self._state[1])
612 612 # We currently expect node2 to come from substate and be
613 613 # in hex format
614 614 if node2 is not None:
615 615 node2 = node.bin(node2)
616 616 logcmdutil.diffordiffstat(
617 617 ui,
618 618 self._repo,
619 619 diffopts,
620 620 node1,
621 621 node2,
622 622 match,
623 623 prefix=prefix,
624 624 listsubrepos=True,
625 625 **opts
626 626 )
627 627 except error.RepoLookupError as inst:
628 628 self.ui.warn(
629 629 _(b'warning: error "%s" in subrepository "%s"\n')
630 630 % (inst, subrelpath(self))
631 631 )
632 632
633 633 @annotatesubrepoerror
634 634 def archive(self, archiver, prefix, match=None, decode=True):
635 635 self._get(self._state + (b'hg',))
636 636 files = self.files()
637 637 if match:
638 638 files = [f for f in files if match(f)]
639 639 rev = self._state[1]
640 640 ctx = self._repo[rev]
641 641 scmutil.prefetchfiles(
642 642 self._repo, [ctx.rev()], scmutil.matchfiles(self._repo, files)
643 643 )
644 644 total = abstractsubrepo.archive(self, archiver, prefix, match)
645 645 for subpath in ctx.substate:
646 646 s = subrepo(ctx, subpath, True)
647 647 submatch = matchmod.subdirmatcher(subpath, match)
648 648 subprefix = prefix + subpath + b'/'
649 649 total += s.archive(archiver, subprefix, submatch, decode)
650 650 return total
651 651
652 652 @annotatesubrepoerror
653 653 def dirty(self, ignoreupdate=False, missing=False):
654 654 r = self._state[1]
655 655 if r == b'' and not ignoreupdate: # no state recorded
656 656 return True
657 657 w = self._repo[None]
658 658 if r != w.p1().hex() and not ignoreupdate:
659 659 # different version checked out
660 660 return True
661 661 return w.dirty(missing=missing) # working directory changed
662 662
663 663 def basestate(self):
664 664 return self._repo[b'.'].hex()
665 665
666 666 def checknested(self, path):
667 667 return self._repo._checknested(self._repo.wjoin(path))
668 668
669 669 @annotatesubrepoerror
670 670 def commit(self, text, user, date):
671 671 # don't bother committing in the subrepo if it's only been
672 672 # updated
673 673 if not self.dirty(True):
674 674 return self._repo[b'.'].hex()
675 675 self.ui.debug(b"committing subrepo %s\n" % subrelpath(self))
676 676 n = self._repo.commit(text, user, date)
677 677 if not n:
678 678 return self._repo[b'.'].hex() # different version checked out
679 679 return node.hex(n)
680 680
681 681 @annotatesubrepoerror
682 682 def phase(self, state):
683 683 return self._repo[state or b'.'].phase()
684 684
685 685 @annotatesubrepoerror
686 686 def remove(self):
687 687 # we can't fully delete the repository as it may contain
688 688 # local-only history
689 689 self.ui.note(_(b'removing subrepo %s\n') % subrelpath(self))
690 690 hg.clean(self._repo, node.nullid, False)
691 691
692 692 def _get(self, state):
693 693 source, revision, kind = state
694 694 parentrepo = self._repo._subparent
695 695
696 696 if revision in self._repo.unfiltered():
697 697 # Allow shared subrepos tracked at null to setup the sharedpath
698 698 if len(self._repo) != 0 or not parentrepo.shared():
699 699 return True
700 700 self._repo._subsource = source
701 701 srcurl = _abssource(self._repo)
702 702
703 703 # Defer creating the peer until after the status message is logged, in
704 704 # case there are network problems.
705 705 getpeer = lambda: hg.peer(self._repo, {}, srcurl)
706 706
707 707 if len(self._repo) == 0:
708 708 # use self._repo.vfs instead of self.wvfs to remove .hg only
709 709 self._repo.vfs.rmtree()
710 710
711 711 # A remote subrepo could be shared if there is a local copy
712 712 # relative to the parent's share source. But clone pooling doesn't
713 713 # assemble the repos in a tree, so that can't be consistently done.
714 714 # A simpler option is for the user to configure clone pooling, and
715 715 # work with that.
716 716 if parentrepo.shared() and hg.islocal(srcurl):
717 717 self.ui.status(
718 718 _(b'sharing subrepo %s from %s\n')
719 719 % (subrelpath(self), srcurl)
720 720 )
721 721 shared = hg.share(
722 722 self._repo._subparent.baseui,
723 723 getpeer(),
724 724 self._repo.root,
725 725 update=False,
726 726 bookmarks=False,
727 727 )
728 728 self._repo = shared.local()
729 729 else:
730 730 # TODO: find a common place for this and this code in the
731 731 # share.py wrap of the clone command.
732 732 if parentrepo.shared():
733 733 pool = self.ui.config(b'share', b'pool')
734 734 if pool:
735 735 pool = util.expandpath(pool)
736 736
737 737 shareopts = {
738 738 b'pool': pool,
739 739 b'mode': self.ui.config(b'share', b'poolnaming'),
740 740 }
741 741 else:
742 742 shareopts = {}
743 743
744 744 self.ui.status(
745 745 _(b'cloning subrepo %s from %s\n')
746 746 % (subrelpath(self), util.hidepassword(srcurl))
747 747 )
748 748 other, cloned = hg.clone(
749 749 self._repo._subparent.baseui,
750 750 {},
751 751 getpeer(),
752 752 self._repo.root,
753 753 update=False,
754 754 shareopts=shareopts,
755 755 )
756 756 self._repo = cloned.local()
757 757 self._initrepo(parentrepo, source, create=True)
758 758 self._cachestorehash(srcurl)
759 759 else:
760 760 self.ui.status(
761 761 _(b'pulling subrepo %s from %s\n')
762 762 % (subrelpath(self), util.hidepassword(srcurl))
763 763 )
764 764 cleansub = self.storeclean(srcurl)
765 765 exchange.pull(self._repo, getpeer())
766 766 if cleansub:
767 767 # keep the repo clean after pull
768 768 self._cachestorehash(srcurl)
769 769 return False
770 770
771 771 @annotatesubrepoerror
772 772 def get(self, state, overwrite=False):
773 773 inrepo = self._get(state)
774 774 source, revision, kind = state
775 775 repo = self._repo
776 776 repo.ui.debug(b"getting subrepo %s\n" % self._path)
777 777 if inrepo:
778 778 urepo = repo.unfiltered()
779 779 ctx = urepo[revision]
780 780 if ctx.hidden():
781 781 urepo.ui.warn(
782 782 _(b'revision %s in subrepository "%s" is hidden\n')
783 783 % (revision[0:12], self._path)
784 784 )
785 785 repo = urepo
786 786 hg.updaterepo(repo, revision, overwrite)
787 787
788 788 @annotatesubrepoerror
789 789 def merge(self, state):
790 790 self._get(state)
791 791 cur = self._repo[b'.']
792 792 dst = self._repo[state[1]]
793 793 anc = dst.ancestor(cur)
794 794
795 795 def mergefunc():
796 796 if anc == cur and dst.branch() == cur.branch():
797 797 self.ui.debug(
798 798 b'updating subrepository "%s"\n' % subrelpath(self)
799 799 )
800 800 hg.update(self._repo, state[1])
801 801 elif anc == dst:
802 802 self.ui.debug(
803 803 b'skipping subrepository "%s"\n' % subrelpath(self)
804 804 )
805 805 else:
806 806 self.ui.debug(
807 807 b'merging subrepository "%s"\n' % subrelpath(self)
808 808 )
809 809 hg.merge(self._repo, state[1], remind=False)
810 810
811 811 wctx = self._repo[None]
812 812 if self.dirty():
813 813 if anc != dst:
814 814 if _updateprompt(self.ui, self, wctx.dirty(), cur, dst):
815 815 mergefunc()
816 816 else:
817 817 mergefunc()
818 818 else:
819 819 mergefunc()
820 820
821 821 @annotatesubrepoerror
822 822 def push(self, opts):
823 823 force = opts.get(b'force')
824 824 newbranch = opts.get(b'new_branch')
825 825 ssh = opts.get(b'ssh')
826 826
827 827 # push subrepos depth-first for coherent ordering
828 828 c = self._repo[b'.']
829 829 subs = c.substate # only repos that are committed
830 830 for s in sorted(subs):
831 831 if c.sub(s).push(opts) == 0:
832 832 return False
833 833
834 834 dsturl = _abssource(self._repo, True)
835 835 if not force:
836 836 if self.storeclean(dsturl):
837 837 self.ui.status(
838 838 _(b'no changes made to subrepo %s since last push to %s\n')
839 839 % (subrelpath(self), util.hidepassword(dsturl))
840 840 )
841 841 return None
842 842 self.ui.status(
843 843 _(b'pushing subrepo %s to %s\n')
844 844 % (subrelpath(self), util.hidepassword(dsturl))
845 845 )
846 846 other = hg.peer(self._repo, {b'ssh': ssh}, dsturl)
847 847 res = exchange.push(self._repo, other, force, newbranch=newbranch)
848 848
849 849 # the repo is now clean
850 850 self._cachestorehash(dsturl)
851 851 return res.cgresult
852 852
853 853 @annotatesubrepoerror
854 854 def outgoing(self, ui, dest, opts):
855 855 if b'rev' in opts or b'branch' in opts:
856 856 opts = copy.copy(opts)
857 857 opts.pop(b'rev', None)
858 858 opts.pop(b'branch', None)
859 859 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
860 860
861 861 @annotatesubrepoerror
862 862 def incoming(self, ui, source, opts):
863 863 if b'rev' in opts or b'branch' in opts:
864 864 opts = copy.copy(opts)
865 865 opts.pop(b'rev', None)
866 866 opts.pop(b'branch', None)
867 867 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
868 868
869 869 @annotatesubrepoerror
870 870 def files(self):
871 871 rev = self._state[1]
872 872 ctx = self._repo[rev]
873 873 return ctx.manifest().keys()
874 874
875 875 def filedata(self, name, decode):
876 876 rev = self._state[1]
877 877 data = self._repo[rev][name].data()
878 878 if decode:
879 879 data = self._repo.wwritedata(name, data)
880 880 return data
881 881
882 882 def fileflags(self, name):
883 883 rev = self._state[1]
884 884 ctx = self._repo[rev]
885 885 return ctx.flags(name)
886 886
887 887 @annotatesubrepoerror
888 888 def printfiles(self, ui, m, uipathfn, fm, fmt, subrepos):
889 889 # If the parent context is a workingctx, use the workingctx here for
890 890 # consistency.
891 891 if self._ctx.rev() is None:
892 892 ctx = self._repo[None]
893 893 else:
894 894 rev = self._state[1]
895 895 ctx = self._repo[rev]
896 896 return cmdutil.files(ui, ctx, m, uipathfn, fm, fmt, subrepos)
897 897
898 898 @annotatesubrepoerror
899 def matchfileset(self, expr, badfn=None):
899 def matchfileset(self, cwd, expr, badfn=None):
900 900 if self._ctx.rev() is None:
901 901 ctx = self._repo[None]
902 902 else:
903 903 rev = self._state[1]
904 904 ctx = self._repo[rev]
905 905
906 matchers = [ctx.matchfileset(expr, badfn=badfn)]
906 matchers = [ctx.matchfileset(cwd, expr, badfn=badfn)]
907 907
908 908 for subpath in ctx.substate:
909 909 sub = ctx.sub(subpath)
910 910
911 911 try:
912 sm = sub.matchfileset(expr, badfn=badfn)
912 sm = sub.matchfileset(cwd, expr, badfn=badfn)
913 913 pm = matchmod.prefixdirmatcher(subpath, sm, badfn=badfn)
914 914 matchers.append(pm)
915 915 except error.LookupError:
916 916 self.ui.status(
917 917 _(b"skipping missing subrepository: %s\n")
918 918 % self.wvfs.reljoin(reporelpath(self), subpath)
919 919 )
920 920 if len(matchers) == 1:
921 921 return matchers[0]
922 922 return matchmod.unionmatcher(matchers)
923 923
924 924 def walk(self, match):
925 925 ctx = self._repo[None]
926 926 return ctx.walk(match)
927 927
928 928 @annotatesubrepoerror
929 929 def forget(self, match, prefix, uipathfn, dryrun, interactive):
930 930 return cmdutil.forget(
931 931 self.ui,
932 932 self._repo,
933 933 match,
934 934 prefix,
935 935 uipathfn,
936 936 True,
937 937 dryrun=dryrun,
938 938 interactive=interactive,
939 939 )
940 940
941 941 @annotatesubrepoerror
942 942 def removefiles(
943 943 self,
944 944 matcher,
945 945 prefix,
946 946 uipathfn,
947 947 after,
948 948 force,
949 949 subrepos,
950 950 dryrun,
951 951 warnings,
952 952 ):
953 953 return cmdutil.remove(
954 954 self.ui,
955 955 self._repo,
956 956 matcher,
957 957 prefix,
958 958 uipathfn,
959 959 after,
960 960 force,
961 961 subrepos,
962 962 dryrun,
963 963 )
964 964
965 965 @annotatesubrepoerror
966 966 def revert(self, substate, *pats, **opts):
967 967 # reverting a subrepo is a 2 step process:
968 968 # 1. if the no_backup is not set, revert all modified
969 969 # files inside the subrepo
970 970 # 2. update the subrepo to the revision specified in
971 971 # the corresponding substate dictionary
972 972 self.ui.status(_(b'reverting subrepo %s\n') % substate[0])
973 973 if not opts.get('no_backup'):
974 974 # Revert all files on the subrepo, creating backups
975 975 # Note that this will not recursively revert subrepos
976 976 # We could do it if there was a set:subrepos() predicate
977 977 opts = opts.copy()
978 978 opts['date'] = None
979 979 opts['rev'] = substate[1]
980 980
981 981 self.filerevert(*pats, **opts)
982 982
983 983 # Update the repo to the revision specified in the given substate
984 984 if not opts.get('dry_run'):
985 985 self.get(substate, overwrite=True)
986 986
987 987 def filerevert(self, *pats, **opts):
988 988 ctx = self._repo[opts['rev']]
989 989 parents = self._repo.dirstate.parents()
990 990 if opts.get('all'):
991 991 pats = [b'set:modified()']
992 992 else:
993 993 pats = []
994 994 cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts)
995 995
996 996 def shortid(self, revid):
997 997 return revid[:12]
998 998
999 999 @annotatesubrepoerror
1000 1000 def unshare(self):
1001 1001 # subrepo inherently violates our import layering rules
1002 1002 # because it wants to make repo objects from deep inside the stack
1003 1003 # so we manually delay the circular imports to not break
1004 1004 # scripts that don't use our demand-loading
1005 1005 global hg
1006 1006 from . import hg as h
1007 1007
1008 1008 hg = h
1009 1009
1010 1010 # Nothing prevents a user from sharing in a repo, and then making that a
1011 1011 # subrepo. Alternately, the previous unshare attempt may have failed
1012 1012 # part way through. So recurse whether or not this layer is shared.
1013 1013 if self._repo.shared():
1014 1014 self.ui.status(_(b"unsharing subrepo '%s'\n") % self._relpath)
1015 1015
1016 1016 hg.unshare(self.ui, self._repo)
1017 1017
1018 1018 def verify(self, onpush=False):
1019 1019 try:
1020 1020 rev = self._state[1]
1021 1021 ctx = self._repo.unfiltered()[rev]
1022 1022 if ctx.hidden():
1023 1023 # Since hidden revisions aren't pushed/pulled, it seems worth an
1024 1024 # explicit warning.
1025 1025 msg = _(b"subrepo '%s' is hidden in revision %s") % (
1026 1026 self._relpath,
1027 1027 node.short(self._ctx.node()),
1028 1028 )
1029 1029
1030 1030 if onpush:
1031 1031 raise error.Abort(msg)
1032 1032 else:
1033 1033 self._repo.ui.warn(b'%s\n' % msg)
1034 1034 return 0
1035 1035 except error.RepoLookupError:
1036 1036 # A missing subrepo revision may be a case of needing to pull it, so
1037 1037 # don't treat this as an error for `hg verify`.
1038 1038 msg = _(b"subrepo '%s' not found in revision %s") % (
1039 1039 self._relpath,
1040 1040 node.short(self._ctx.node()),
1041 1041 )
1042 1042
1043 1043 if onpush:
1044 1044 raise error.Abort(msg)
1045 1045 else:
1046 1046 self._repo.ui.warn(b'%s\n' % msg)
1047 1047 return 0
1048 1048
1049 1049 @propertycache
1050 1050 def wvfs(self):
1051 1051 """return own wvfs for efficiency and consistency
1052 1052 """
1053 1053 return self._repo.wvfs
1054 1054
1055 1055 @propertycache
1056 1056 def _relpath(self):
1057 1057 """return path to this subrepository as seen from outermost repository
1058 1058 """
1059 1059 # Keep consistent dir separators by avoiding vfs.join(self._path)
1060 1060 return reporelpath(self._repo)
1061 1061
1062 1062
1063 1063 class svnsubrepo(abstractsubrepo):
1064 1064 def __init__(self, ctx, path, state, allowcreate):
1065 1065 super(svnsubrepo, self).__init__(ctx, path)
1066 1066 self._state = state
1067 1067 self._exe = procutil.findexe(b'svn')
1068 1068 if not self._exe:
1069 1069 raise error.Abort(
1070 1070 _(b"'svn' executable not found for subrepo '%s'") % self._path
1071 1071 )
1072 1072
1073 1073 def _svncommand(self, commands, filename=b'', failok=False):
1074 1074 cmd = [self._exe]
1075 1075 extrakw = {}
1076 1076 if not self.ui.interactive():
1077 1077 # Making stdin be a pipe should prevent svn from behaving
1078 1078 # interactively even if we can't pass --non-interactive.
1079 1079 extrakw['stdin'] = subprocess.PIPE
1080 1080 # Starting in svn 1.5 --non-interactive is a global flag
1081 1081 # instead of being per-command, but we need to support 1.4 so
1082 1082 # we have to be intelligent about what commands take
1083 1083 # --non-interactive.
1084 1084 if commands[0] in (b'update', b'checkout', b'commit'):
1085 1085 cmd.append(b'--non-interactive')
1086 1086 cmd.extend(commands)
1087 1087 if filename is not None:
1088 1088 path = self.wvfs.reljoin(
1089 1089 self._ctx.repo().origroot, self._path, filename
1090 1090 )
1091 1091 cmd.append(path)
1092 1092 env = dict(encoding.environ)
1093 1093 # Avoid localized output, preserve current locale for everything else.
1094 1094 lc_all = env.get(b'LC_ALL')
1095 1095 if lc_all:
1096 1096 env[b'LANG'] = lc_all
1097 1097 del env[b'LC_ALL']
1098 1098 env[b'LC_MESSAGES'] = b'C'
1099 1099 p = subprocess.Popen(
1100 1100 pycompat.rapply(procutil.tonativestr, cmd),
1101 1101 bufsize=-1,
1102 1102 close_fds=procutil.closefds,
1103 1103 stdout=subprocess.PIPE,
1104 1104 stderr=subprocess.PIPE,
1105 1105 env=procutil.tonativeenv(env),
1106 1106 **extrakw
1107 1107 )
1108 1108 stdout, stderr = map(util.fromnativeeol, p.communicate())
1109 1109 stderr = stderr.strip()
1110 1110 if not failok:
1111 1111 if p.returncode:
1112 1112 raise error.Abort(
1113 1113 stderr or b'exited with code %d' % p.returncode
1114 1114 )
1115 1115 if stderr:
1116 1116 self.ui.warn(stderr + b'\n')
1117 1117 return stdout, stderr
1118 1118
1119 1119 @propertycache
1120 1120 def _svnversion(self):
1121 1121 output, err = self._svncommand(
1122 1122 [b'--version', b'--quiet'], filename=None
1123 1123 )
1124 1124 m = re.search(br'^(\d+)\.(\d+)', output)
1125 1125 if not m:
1126 1126 raise error.Abort(_(b'cannot retrieve svn tool version'))
1127 1127 return (int(m.group(1)), int(m.group(2)))
1128 1128
1129 1129 def _svnmissing(self):
1130 1130 return not self.wvfs.exists(b'.svn')
1131 1131
1132 1132 def _wcrevs(self):
1133 1133 # Get the working directory revision as well as the last
1134 1134 # commit revision so we can compare the subrepo state with
1135 1135 # both. We used to store the working directory one.
1136 1136 output, err = self._svncommand([b'info', b'--xml'])
1137 1137 doc = xml.dom.minidom.parseString(output)
1138 1138 entries = doc.getElementsByTagName('entry')
1139 1139 lastrev, rev = b'0', b'0'
1140 1140 if entries:
1141 1141 rev = pycompat.bytestr(entries[0].getAttribute('revision')) or b'0'
1142 1142 commits = entries[0].getElementsByTagName('commit')
1143 1143 if commits:
1144 1144 lastrev = (
1145 1145 pycompat.bytestr(commits[0].getAttribute('revision'))
1146 1146 or b'0'
1147 1147 )
1148 1148 return (lastrev, rev)
1149 1149
1150 1150 def _wcrev(self):
1151 1151 return self._wcrevs()[0]
1152 1152
1153 1153 def _wcchanged(self):
1154 1154 """Return (changes, extchanges, missing) where changes is True
1155 1155 if the working directory was changed, extchanges is
1156 1156 True if any of these changes concern an external entry and missing
1157 1157 is True if any change is a missing entry.
1158 1158 """
1159 1159 output, err = self._svncommand([b'status', b'--xml'])
1160 1160 externals, changes, missing = [], [], []
1161 1161 doc = xml.dom.minidom.parseString(output)
1162 1162 for e in doc.getElementsByTagName('entry'):
1163 1163 s = e.getElementsByTagName('wc-status')
1164 1164 if not s:
1165 1165 continue
1166 1166 item = s[0].getAttribute('item')
1167 1167 props = s[0].getAttribute('props')
1168 1168 path = e.getAttribute('path').encode('utf8')
1169 1169 if item == 'external':
1170 1170 externals.append(path)
1171 1171 elif item == 'missing':
1172 1172 missing.append(path)
1173 1173 if item not in (
1174 1174 '',
1175 1175 'normal',
1176 1176 'unversioned',
1177 1177 'external',
1178 1178 ) or props not in ('', 'none', 'normal'):
1179 1179 changes.append(path)
1180 1180 for path in changes:
1181 1181 for ext in externals:
1182 1182 if path == ext or path.startswith(ext + pycompat.ossep):
1183 1183 return True, True, bool(missing)
1184 1184 return bool(changes), False, bool(missing)
1185 1185
1186 1186 @annotatesubrepoerror
1187 1187 def dirty(self, ignoreupdate=False, missing=False):
1188 1188 if self._svnmissing():
1189 1189 return self._state[1] != b''
1190 1190 wcchanged = self._wcchanged()
1191 1191 changed = wcchanged[0] or (missing and wcchanged[2])
1192 1192 if not changed:
1193 1193 if self._state[1] in self._wcrevs() or ignoreupdate:
1194 1194 return False
1195 1195 return True
1196 1196
1197 1197 def basestate(self):
1198 1198 lastrev, rev = self._wcrevs()
1199 1199 if lastrev != rev:
1200 1200 # Last committed rev is not the same than rev. We would
1201 1201 # like to take lastrev but we do not know if the subrepo
1202 1202 # URL exists at lastrev. Test it and fallback to rev it
1203 1203 # is not there.
1204 1204 try:
1205 1205 self._svncommand(
1206 1206 [b'list', b'%s@%s' % (self._state[0], lastrev)]
1207 1207 )
1208 1208 return lastrev
1209 1209 except error.Abort:
1210 1210 pass
1211 1211 return rev
1212 1212
1213 1213 @annotatesubrepoerror
1214 1214 def commit(self, text, user, date):
1215 1215 # user and date are out of our hands since svn is centralized
1216 1216 changed, extchanged, missing = self._wcchanged()
1217 1217 if not changed:
1218 1218 return self.basestate()
1219 1219 if extchanged:
1220 1220 # Do not try to commit externals
1221 1221 raise error.Abort(_(b'cannot commit svn externals'))
1222 1222 if missing:
1223 1223 # svn can commit with missing entries but aborting like hg
1224 1224 # seems a better approach.
1225 1225 raise error.Abort(_(b'cannot commit missing svn entries'))
1226 1226 commitinfo, err = self._svncommand([b'commit', b'-m', text])
1227 1227 self.ui.status(commitinfo)
1228 1228 newrev = re.search(b'Committed revision ([0-9]+).', commitinfo)
1229 1229 if not newrev:
1230 1230 if not commitinfo.strip():
1231 1231 # Sometimes, our definition of "changed" differs from
1232 1232 # svn one. For instance, svn ignores missing files
1233 1233 # when committing. If there are only missing files, no
1234 1234 # commit is made, no output and no error code.
1235 1235 raise error.Abort(_(b'failed to commit svn changes'))
1236 1236 raise error.Abort(commitinfo.splitlines()[-1])
1237 1237 newrev = newrev.groups()[0]
1238 1238 self.ui.status(self._svncommand([b'update', b'-r', newrev])[0])
1239 1239 return newrev
1240 1240
1241 1241 @annotatesubrepoerror
1242 1242 def remove(self):
1243 1243 if self.dirty():
1244 1244 self.ui.warn(
1245 1245 _(b'not removing repo %s because it has changes.\n')
1246 1246 % self._path
1247 1247 )
1248 1248 return
1249 1249 self.ui.note(_(b'removing subrepo %s\n') % self._path)
1250 1250
1251 1251 self.wvfs.rmtree(forcibly=True)
1252 1252 try:
1253 1253 pwvfs = self._ctx.repo().wvfs
1254 1254 pwvfs.removedirs(pwvfs.dirname(self._path))
1255 1255 except OSError:
1256 1256 pass
1257 1257
1258 1258 @annotatesubrepoerror
1259 1259 def get(self, state, overwrite=False):
1260 1260 if overwrite:
1261 1261 self._svncommand([b'revert', b'--recursive'])
1262 1262 args = [b'checkout']
1263 1263 if self._svnversion >= (1, 5):
1264 1264 args.append(b'--force')
1265 1265 # The revision must be specified at the end of the URL to properly
1266 1266 # update to a directory which has since been deleted and recreated.
1267 1267 args.append(b'%s@%s' % (state[0], state[1]))
1268 1268
1269 1269 # SEC: check that the ssh url is safe
1270 1270 util.checksafessh(state[0])
1271 1271
1272 1272 status, err = self._svncommand(args, failok=True)
1273 1273 _sanitize(self.ui, self.wvfs, b'.svn')
1274 1274 if not re.search(b'Checked out revision [0-9]+.', status):
1275 1275 if b'is already a working copy for a different URL' in err and (
1276 1276 self._wcchanged()[:2] == (False, False)
1277 1277 ):
1278 1278 # obstructed but clean working copy, so just blow it away.
1279 1279 self.remove()
1280 1280 self.get(state, overwrite=False)
1281 1281 return
1282 1282 raise error.Abort((status or err).splitlines()[-1])
1283 1283 self.ui.status(status)
1284 1284
1285 1285 @annotatesubrepoerror
1286 1286 def merge(self, state):
1287 1287 old = self._state[1]
1288 1288 new = state[1]
1289 1289 wcrev = self._wcrev()
1290 1290 if new != wcrev:
1291 1291 dirty = old == wcrev or self._wcchanged()[0]
1292 1292 if _updateprompt(self.ui, self, dirty, wcrev, new):
1293 1293 self.get(state, False)
1294 1294
1295 1295 def push(self, opts):
1296 1296 # push is a no-op for SVN
1297 1297 return True
1298 1298
1299 1299 @annotatesubrepoerror
1300 1300 def files(self):
1301 1301 output = self._svncommand([b'list', b'--recursive', b'--xml'])[0]
1302 1302 doc = xml.dom.minidom.parseString(output)
1303 1303 paths = []
1304 1304 for e in doc.getElementsByTagName('entry'):
1305 1305 kind = pycompat.bytestr(e.getAttribute('kind'))
1306 1306 if kind != b'file':
1307 1307 continue
1308 1308 name = ''.join(
1309 1309 c.data
1310 1310 for c in e.getElementsByTagName('name')[0].childNodes
1311 1311 if c.nodeType == c.TEXT_NODE
1312 1312 )
1313 1313 paths.append(name.encode('utf8'))
1314 1314 return paths
1315 1315
1316 1316 def filedata(self, name, decode):
1317 1317 return self._svncommand([b'cat'], name)[0]
1318 1318
1319 1319
1320 1320 class gitsubrepo(abstractsubrepo):
1321 1321 def __init__(self, ctx, path, state, allowcreate):
1322 1322 super(gitsubrepo, self).__init__(ctx, path)
1323 1323 self._state = state
1324 1324 self._abspath = ctx.repo().wjoin(path)
1325 1325 self._subparent = ctx.repo()
1326 1326 self._ensuregit()
1327 1327
1328 1328 def _ensuregit(self):
1329 1329 try:
1330 1330 self._gitexecutable = b'git'
1331 1331 out, err = self._gitnodir([b'--version'])
1332 1332 except OSError as e:
1333 1333 genericerror = _(b"error executing git for subrepo '%s': %s")
1334 1334 notfoundhint = _(b"check git is installed and in your PATH")
1335 1335 if e.errno != errno.ENOENT:
1336 1336 raise error.Abort(
1337 1337 genericerror % (self._path, encoding.strtolocal(e.strerror))
1338 1338 )
1339 1339 elif pycompat.iswindows:
1340 1340 try:
1341 1341 self._gitexecutable = b'git.cmd'
1342 1342 out, err = self._gitnodir([b'--version'])
1343 1343 except OSError as e2:
1344 1344 if e2.errno == errno.ENOENT:
1345 1345 raise error.Abort(
1346 1346 _(
1347 1347 b"couldn't find 'git' or 'git.cmd'"
1348 1348 b" for subrepo '%s'"
1349 1349 )
1350 1350 % self._path,
1351 1351 hint=notfoundhint,
1352 1352 )
1353 1353 else:
1354 1354 raise error.Abort(
1355 1355 genericerror
1356 1356 % (self._path, encoding.strtolocal(e2.strerror))
1357 1357 )
1358 1358 else:
1359 1359 raise error.Abort(
1360 1360 _(b"couldn't find git for subrepo '%s'") % self._path,
1361 1361 hint=notfoundhint,
1362 1362 )
1363 1363 versionstatus = self._checkversion(out)
1364 1364 if versionstatus == b'unknown':
1365 1365 self.ui.warn(_(b'cannot retrieve git version\n'))
1366 1366 elif versionstatus == b'abort':
1367 1367 raise error.Abort(
1368 1368 _(b'git subrepo requires at least 1.6.0 or later')
1369 1369 )
1370 1370 elif versionstatus == b'warning':
1371 1371 self.ui.warn(_(b'git subrepo requires at least 1.6.0 or later\n'))
1372 1372
1373 1373 @staticmethod
1374 1374 def _gitversion(out):
1375 1375 m = re.search(br'^git version (\d+)\.(\d+)\.(\d+)', out)
1376 1376 if m:
1377 1377 return (int(m.group(1)), int(m.group(2)), int(m.group(3)))
1378 1378
1379 1379 m = re.search(br'^git version (\d+)\.(\d+)', out)
1380 1380 if m:
1381 1381 return (int(m.group(1)), int(m.group(2)), 0)
1382 1382
1383 1383 return -1
1384 1384
1385 1385 @staticmethod
1386 1386 def _checkversion(out):
1387 1387 '''ensure git version is new enough
1388 1388
1389 1389 >>> _checkversion = gitsubrepo._checkversion
1390 1390 >>> _checkversion(b'git version 1.6.0')
1391 1391 'ok'
1392 1392 >>> _checkversion(b'git version 1.8.5')
1393 1393 'ok'
1394 1394 >>> _checkversion(b'git version 1.4.0')
1395 1395 'abort'
1396 1396 >>> _checkversion(b'git version 1.5.0')
1397 1397 'warning'
1398 1398 >>> _checkversion(b'git version 1.9-rc0')
1399 1399 'ok'
1400 1400 >>> _checkversion(b'git version 1.9.0.265.g81cdec2')
1401 1401 'ok'
1402 1402 >>> _checkversion(b'git version 1.9.0.GIT')
1403 1403 'ok'
1404 1404 >>> _checkversion(b'git version 12345')
1405 1405 'unknown'
1406 1406 >>> _checkversion(b'no')
1407 1407 'unknown'
1408 1408 '''
1409 1409 version = gitsubrepo._gitversion(out)
1410 1410 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1411 1411 # despite the docstring comment. For now, error on 1.4.0, warn on
1412 1412 # 1.5.0 but attempt to continue.
1413 1413 if version == -1:
1414 1414 return b'unknown'
1415 1415 if version < (1, 5, 0):
1416 1416 return b'abort'
1417 1417 elif version < (1, 6, 0):
1418 1418 return b'warning'
1419 1419 return b'ok'
1420 1420
1421 1421 def _gitcommand(self, commands, env=None, stream=False):
1422 1422 return self._gitdir(commands, env=env, stream=stream)[0]
1423 1423
1424 1424 def _gitdir(self, commands, env=None, stream=False):
1425 1425 return self._gitnodir(
1426 1426 commands, env=env, stream=stream, cwd=self._abspath
1427 1427 )
1428 1428
1429 1429 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1430 1430 """Calls the git command
1431 1431
1432 1432 The methods tries to call the git command. versions prior to 1.6.0
1433 1433 are not supported and very probably fail.
1434 1434 """
1435 1435 self.ui.debug(b'%s: git %s\n' % (self._relpath, b' '.join(commands)))
1436 1436 if env is None:
1437 1437 env = encoding.environ.copy()
1438 1438 # disable localization for Git output (issue5176)
1439 1439 env[b'LC_ALL'] = b'C'
1440 1440 # fix for Git CVE-2015-7545
1441 1441 if b'GIT_ALLOW_PROTOCOL' not in env:
1442 1442 env[b'GIT_ALLOW_PROTOCOL'] = b'file:git:http:https:ssh'
1443 1443 # unless ui.quiet is set, print git's stderr,
1444 1444 # which is mostly progress and useful info
1445 1445 errpipe = None
1446 1446 if self.ui.quiet:
1447 1447 errpipe = pycompat.open(os.devnull, b'w')
1448 1448 if self.ui._colormode and len(commands) and commands[0] == b"diff":
1449 1449 # insert the argument in the front,
1450 1450 # the end of git diff arguments is used for paths
1451 1451 commands.insert(1, b'--color')
1452 1452 p = subprocess.Popen(
1453 1453 pycompat.rapply(
1454 1454 procutil.tonativestr, [self._gitexecutable] + commands
1455 1455 ),
1456 1456 bufsize=-1,
1457 1457 cwd=pycompat.rapply(procutil.tonativestr, cwd),
1458 1458 env=procutil.tonativeenv(env),
1459 1459 close_fds=procutil.closefds,
1460 1460 stdout=subprocess.PIPE,
1461 1461 stderr=errpipe,
1462 1462 )
1463 1463 if stream:
1464 1464 return p.stdout, None
1465 1465
1466 1466 retdata = p.stdout.read().strip()
1467 1467 # wait for the child to exit to avoid race condition.
1468 1468 p.wait()
1469 1469
1470 1470 if p.returncode != 0 and p.returncode != 1:
1471 1471 # there are certain error codes that are ok
1472 1472 command = commands[0]
1473 1473 if command in (b'cat-file', b'symbolic-ref'):
1474 1474 return retdata, p.returncode
1475 1475 # for all others, abort
1476 1476 raise error.Abort(
1477 1477 _(b'git %s error %d in %s')
1478 1478 % (command, p.returncode, self._relpath)
1479 1479 )
1480 1480
1481 1481 return retdata, p.returncode
1482 1482
1483 1483 def _gitmissing(self):
1484 1484 return not self.wvfs.exists(b'.git')
1485 1485
1486 1486 def _gitstate(self):
1487 1487 return self._gitcommand([b'rev-parse', b'HEAD'])
1488 1488
1489 1489 def _gitcurrentbranch(self):
1490 1490 current, err = self._gitdir([b'symbolic-ref', b'HEAD', b'--quiet'])
1491 1491 if err:
1492 1492 current = None
1493 1493 return current
1494 1494
1495 1495 def _gitremote(self, remote):
1496 1496 out = self._gitcommand([b'remote', b'show', b'-n', remote])
1497 1497 line = out.split(b'\n')[1]
1498 1498 i = line.index(b'URL: ') + len(b'URL: ')
1499 1499 return line[i:]
1500 1500
1501 1501 def _githavelocally(self, revision):
1502 1502 out, code = self._gitdir([b'cat-file', b'-e', revision])
1503 1503 return code == 0
1504 1504
1505 1505 def _gitisancestor(self, r1, r2):
1506 1506 base = self._gitcommand([b'merge-base', r1, r2])
1507 1507 return base == r1
1508 1508
1509 1509 def _gitisbare(self):
1510 1510 return self._gitcommand([b'config', b'--bool', b'core.bare']) == b'true'
1511 1511
1512 1512 def _gitupdatestat(self):
1513 1513 """This must be run before git diff-index.
1514 1514 diff-index only looks at changes to file stat;
1515 1515 this command looks at file contents and updates the stat."""
1516 1516 self._gitcommand([b'update-index', b'-q', b'--refresh'])
1517 1517
1518 1518 def _gitbranchmap(self):
1519 1519 '''returns 2 things:
1520 1520 a map from git branch to revision
1521 1521 a map from revision to branches'''
1522 1522 branch2rev = {}
1523 1523 rev2branch = {}
1524 1524
1525 1525 out = self._gitcommand(
1526 1526 [b'for-each-ref', b'--format', b'%(objectname) %(refname)']
1527 1527 )
1528 1528 for line in out.split(b'\n'):
1529 1529 revision, ref = line.split(b' ')
1530 1530 if not ref.startswith(b'refs/heads/') and not ref.startswith(
1531 1531 b'refs/remotes/'
1532 1532 ):
1533 1533 continue
1534 1534 if ref.startswith(b'refs/remotes/') and ref.endswith(b'/HEAD'):
1535 1535 continue # ignore remote/HEAD redirects
1536 1536 branch2rev[ref] = revision
1537 1537 rev2branch.setdefault(revision, []).append(ref)
1538 1538 return branch2rev, rev2branch
1539 1539
1540 1540 def _gittracking(self, branches):
1541 1541 """return map of remote branch to local tracking branch"""
1542 1542 # assumes no more than one local tracking branch for each remote
1543 1543 tracking = {}
1544 1544 for b in branches:
1545 1545 if b.startswith(b'refs/remotes/'):
1546 1546 continue
1547 1547 bname = b.split(b'/', 2)[2]
1548 1548 remote = self._gitcommand([b'config', b'branch.%s.remote' % bname])
1549 1549 if remote:
1550 1550 ref = self._gitcommand([b'config', b'branch.%s.merge' % bname])
1551 1551 tracking[
1552 1552 b'refs/remotes/%s/%s' % (remote, ref.split(b'/', 2)[2])
1553 1553 ] = b
1554 1554 return tracking
1555 1555
1556 1556 def _abssource(self, source):
1557 1557 if b'://' not in source:
1558 1558 # recognize the scp syntax as an absolute source
1559 1559 colon = source.find(b':')
1560 1560 if colon != -1 and b'/' not in source[:colon]:
1561 1561 return source
1562 1562 self._subsource = source
1563 1563 return _abssource(self)
1564 1564
1565 1565 def _fetch(self, source, revision):
1566 1566 if self._gitmissing():
1567 1567 # SEC: check for safe ssh url
1568 1568 util.checksafessh(source)
1569 1569
1570 1570 source = self._abssource(source)
1571 1571 self.ui.status(
1572 1572 _(b'cloning subrepo %s from %s\n') % (self._relpath, source)
1573 1573 )
1574 1574 self._gitnodir([b'clone', source, self._abspath])
1575 1575 if self._githavelocally(revision):
1576 1576 return
1577 1577 self.ui.status(
1578 1578 _(b'pulling subrepo %s from %s\n')
1579 1579 % (self._relpath, self._gitremote(b'origin'))
1580 1580 )
1581 1581 # try only origin: the originally cloned repo
1582 1582 self._gitcommand([b'fetch'])
1583 1583 if not self._githavelocally(revision):
1584 1584 raise error.Abort(
1585 1585 _(b'revision %s does not exist in subrepository "%s"\n')
1586 1586 % (revision, self._relpath)
1587 1587 )
1588 1588
1589 1589 @annotatesubrepoerror
1590 1590 def dirty(self, ignoreupdate=False, missing=False):
1591 1591 if self._gitmissing():
1592 1592 return self._state[1] != b''
1593 1593 if self._gitisbare():
1594 1594 return True
1595 1595 if not ignoreupdate and self._state[1] != self._gitstate():
1596 1596 # different version checked out
1597 1597 return True
1598 1598 # check for staged changes or modified files; ignore untracked files
1599 1599 self._gitupdatestat()
1600 1600 out, code = self._gitdir([b'diff-index', b'--quiet', b'HEAD'])
1601 1601 return code == 1
1602 1602
1603 1603 def basestate(self):
1604 1604 return self._gitstate()
1605 1605
1606 1606 @annotatesubrepoerror
1607 1607 def get(self, state, overwrite=False):
1608 1608 source, revision, kind = state
1609 1609 if not revision:
1610 1610 self.remove()
1611 1611 return
1612 1612 self._fetch(source, revision)
1613 1613 # if the repo was set to be bare, unbare it
1614 1614 if self._gitisbare():
1615 1615 self._gitcommand([b'config', b'core.bare', b'false'])
1616 1616 if self._gitstate() == revision:
1617 1617 self._gitcommand([b'reset', b'--hard', b'HEAD'])
1618 1618 return
1619 1619 elif self._gitstate() == revision:
1620 1620 if overwrite:
1621 1621 # first reset the index to unmark new files for commit, because
1622 1622 # reset --hard will otherwise throw away files added for commit,
1623 1623 # not just unmark them.
1624 1624 self._gitcommand([b'reset', b'HEAD'])
1625 1625 self._gitcommand([b'reset', b'--hard', b'HEAD'])
1626 1626 return
1627 1627 branch2rev, rev2branch = self._gitbranchmap()
1628 1628
1629 1629 def checkout(args):
1630 1630 cmd = [b'checkout']
1631 1631 if overwrite:
1632 1632 # first reset the index to unmark new files for commit, because
1633 1633 # the -f option will otherwise throw away files added for
1634 1634 # commit, not just unmark them.
1635 1635 self._gitcommand([b'reset', b'HEAD'])
1636 1636 cmd.append(b'-f')
1637 1637 self._gitcommand(cmd + args)
1638 1638 _sanitize(self.ui, self.wvfs, b'.git')
1639 1639
1640 1640 def rawcheckout():
1641 1641 # no branch to checkout, check it out with no branch
1642 1642 self.ui.warn(
1643 1643 _(b'checking out detached HEAD in subrepository "%s"\n')
1644 1644 % self._relpath
1645 1645 )
1646 1646 self.ui.warn(
1647 1647 _(b'check out a git branch if you intend to make changes\n')
1648 1648 )
1649 1649 checkout([b'-q', revision])
1650 1650
1651 1651 if revision not in rev2branch:
1652 1652 rawcheckout()
1653 1653 return
1654 1654 branches = rev2branch[revision]
1655 1655 firstlocalbranch = None
1656 1656 for b in branches:
1657 1657 if b == b'refs/heads/master':
1658 1658 # master trumps all other branches
1659 1659 checkout([b'refs/heads/master'])
1660 1660 return
1661 1661 if not firstlocalbranch and not b.startswith(b'refs/remotes/'):
1662 1662 firstlocalbranch = b
1663 1663 if firstlocalbranch:
1664 1664 checkout([firstlocalbranch])
1665 1665 return
1666 1666
1667 1667 tracking = self._gittracking(branch2rev.keys())
1668 1668 # choose a remote branch already tracked if possible
1669 1669 remote = branches[0]
1670 1670 if remote not in tracking:
1671 1671 for b in branches:
1672 1672 if b in tracking:
1673 1673 remote = b
1674 1674 break
1675 1675
1676 1676 if remote not in tracking:
1677 1677 # create a new local tracking branch
1678 1678 local = remote.split(b'/', 3)[3]
1679 1679 checkout([b'-b', local, remote])
1680 1680 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1681 1681 # When updating to a tracked remote branch,
1682 1682 # if the local tracking branch is downstream of it,
1683 1683 # a normal `git pull` would have performed a "fast-forward merge"
1684 1684 # which is equivalent to updating the local branch to the remote.
1685 1685 # Since we are only looking at branching at update, we need to
1686 1686 # detect this situation and perform this action lazily.
1687 1687 if tracking[remote] != self._gitcurrentbranch():
1688 1688 checkout([tracking[remote]])
1689 1689 self._gitcommand([b'merge', b'--ff', remote])
1690 1690 _sanitize(self.ui, self.wvfs, b'.git')
1691 1691 else:
1692 1692 # a real merge would be required, just checkout the revision
1693 1693 rawcheckout()
1694 1694
1695 1695 @annotatesubrepoerror
1696 1696 def commit(self, text, user, date):
1697 1697 if self._gitmissing():
1698 1698 raise error.Abort(_(b"subrepo %s is missing") % self._relpath)
1699 1699 cmd = [b'commit', b'-a', b'-m', text]
1700 1700 env = encoding.environ.copy()
1701 1701 if user:
1702 1702 cmd += [b'--author', user]
1703 1703 if date:
1704 1704 # git's date parser silently ignores when seconds < 1e9
1705 1705 # convert to ISO8601
1706 1706 env[b'GIT_AUTHOR_DATE'] = dateutil.datestr(
1707 1707 date, b'%Y-%m-%dT%H:%M:%S %1%2'
1708 1708 )
1709 1709 self._gitcommand(cmd, env=env)
1710 1710 # make sure commit works otherwise HEAD might not exist under certain
1711 1711 # circumstances
1712 1712 return self._gitstate()
1713 1713
1714 1714 @annotatesubrepoerror
1715 1715 def merge(self, state):
1716 1716 source, revision, kind = state
1717 1717 self._fetch(source, revision)
1718 1718 base = self._gitcommand([b'merge-base', revision, self._state[1]])
1719 1719 self._gitupdatestat()
1720 1720 out, code = self._gitdir([b'diff-index', b'--quiet', b'HEAD'])
1721 1721
1722 1722 def mergefunc():
1723 1723 if base == revision:
1724 1724 self.get(state) # fast forward merge
1725 1725 elif base != self._state[1]:
1726 1726 self._gitcommand([b'merge', b'--no-commit', revision])
1727 1727 _sanitize(self.ui, self.wvfs, b'.git')
1728 1728
1729 1729 if self.dirty():
1730 1730 if self._gitstate() != revision:
1731 1731 dirty = self._gitstate() == self._state[1] or code != 0
1732 1732 if _updateprompt(
1733 1733 self.ui, self, dirty, self._state[1][:7], revision[:7]
1734 1734 ):
1735 1735 mergefunc()
1736 1736 else:
1737 1737 mergefunc()
1738 1738
1739 1739 @annotatesubrepoerror
1740 1740 def push(self, opts):
1741 1741 force = opts.get(b'force')
1742 1742
1743 1743 if not self._state[1]:
1744 1744 return True
1745 1745 if self._gitmissing():
1746 1746 raise error.Abort(_(b"subrepo %s is missing") % self._relpath)
1747 1747 # if a branch in origin contains the revision, nothing to do
1748 1748 branch2rev, rev2branch = self._gitbranchmap()
1749 1749 if self._state[1] in rev2branch:
1750 1750 for b in rev2branch[self._state[1]]:
1751 1751 if b.startswith(b'refs/remotes/origin/'):
1752 1752 return True
1753 1753 for b, revision in pycompat.iteritems(branch2rev):
1754 1754 if b.startswith(b'refs/remotes/origin/'):
1755 1755 if self._gitisancestor(self._state[1], revision):
1756 1756 return True
1757 1757 # otherwise, try to push the currently checked out branch
1758 1758 cmd = [b'push']
1759 1759 if force:
1760 1760 cmd.append(b'--force')
1761 1761
1762 1762 current = self._gitcurrentbranch()
1763 1763 if current:
1764 1764 # determine if the current branch is even useful
1765 1765 if not self._gitisancestor(self._state[1], current):
1766 1766 self.ui.warn(
1767 1767 _(
1768 1768 b'unrelated git branch checked out '
1769 1769 b'in subrepository "%s"\n'
1770 1770 )
1771 1771 % self._relpath
1772 1772 )
1773 1773 return False
1774 1774 self.ui.status(
1775 1775 _(b'pushing branch %s of subrepository "%s"\n')
1776 1776 % (current.split(b'/', 2)[2], self._relpath)
1777 1777 )
1778 1778 ret = self._gitdir(cmd + [b'origin', current])
1779 1779 return ret[1] == 0
1780 1780 else:
1781 1781 self.ui.warn(
1782 1782 _(
1783 1783 b'no branch checked out in subrepository "%s"\n'
1784 1784 b'cannot push revision %s\n'
1785 1785 )
1786 1786 % (self._relpath, self._state[1])
1787 1787 )
1788 1788 return False
1789 1789
1790 1790 @annotatesubrepoerror
1791 1791 def add(self, ui, match, prefix, uipathfn, explicitonly, **opts):
1792 1792 if self._gitmissing():
1793 1793 return []
1794 1794
1795 1795 s = self.status(None, unknown=True, clean=True)
1796 1796
1797 1797 tracked = set()
1798 1798 # dirstates 'amn' warn, 'r' is added again
1799 1799 for l in (s.modified, s.added, s.deleted, s.clean):
1800 1800 tracked.update(l)
1801 1801
1802 1802 # Unknown files not of interest will be rejected by the matcher
1803 1803 files = s.unknown
1804 1804 files.extend(match.files())
1805 1805
1806 1806 rejected = []
1807 1807
1808 1808 files = [f for f in sorted(set(files)) if match(f)]
1809 1809 for f in files:
1810 1810 exact = match.exact(f)
1811 1811 command = [b"add"]
1812 1812 if exact:
1813 1813 command.append(b"-f") # should be added, even if ignored
1814 1814 if ui.verbose or not exact:
1815 1815 ui.status(_(b'adding %s\n') % uipathfn(f))
1816 1816
1817 1817 if f in tracked: # hg prints 'adding' even if already tracked
1818 1818 if exact:
1819 1819 rejected.append(f)
1820 1820 continue
1821 1821 if not opts.get('dry_run'):
1822 1822 self._gitcommand(command + [f])
1823 1823
1824 1824 for f in rejected:
1825 1825 ui.warn(_(b"%s already tracked!\n") % uipathfn(f))
1826 1826
1827 1827 return rejected
1828 1828
1829 1829 @annotatesubrepoerror
1830 1830 def remove(self):
1831 1831 if self._gitmissing():
1832 1832 return
1833 1833 if self.dirty():
1834 1834 self.ui.warn(
1835 1835 _(b'not removing repo %s because it has changes.\n')
1836 1836 % self._relpath
1837 1837 )
1838 1838 return
1839 1839 # we can't fully delete the repository as it may contain
1840 1840 # local-only history
1841 1841 self.ui.note(_(b'removing subrepo %s\n') % self._relpath)
1842 1842 self._gitcommand([b'config', b'core.bare', b'true'])
1843 1843 for f, kind in self.wvfs.readdir():
1844 1844 if f == b'.git':
1845 1845 continue
1846 1846 if kind == stat.S_IFDIR:
1847 1847 self.wvfs.rmtree(f)
1848 1848 else:
1849 1849 self.wvfs.unlink(f)
1850 1850
1851 1851 def archive(self, archiver, prefix, match=None, decode=True):
1852 1852 total = 0
1853 1853 source, revision = self._state
1854 1854 if not revision:
1855 1855 return total
1856 1856 self._fetch(source, revision)
1857 1857
1858 1858 # Parse git's native archive command.
1859 1859 # This should be much faster than manually traversing the trees
1860 1860 # and objects with many subprocess calls.
1861 1861 tarstream = self._gitcommand([b'archive', revision], stream=True)
1862 1862 tar = tarfile.open(fileobj=tarstream, mode='r|')
1863 1863 relpath = subrelpath(self)
1864 1864 progress = self.ui.makeprogress(
1865 1865 _(b'archiving (%s)') % relpath, unit=_(b'files')
1866 1866 )
1867 1867 progress.update(0)
1868 1868 for info in tar:
1869 1869 if info.isdir():
1870 1870 continue
1871 1871 bname = pycompat.fsencode(info.name)
1872 1872 if match and not match(bname):
1873 1873 continue
1874 1874 if info.issym():
1875 1875 data = info.linkname
1876 1876 else:
1877 1877 data = tar.extractfile(info).read()
1878 1878 archiver.addfile(prefix + bname, info.mode, info.issym(), data)
1879 1879 total += 1
1880 1880 progress.increment()
1881 1881 progress.complete()
1882 1882 return total
1883 1883
1884 1884 @annotatesubrepoerror
1885 1885 def cat(self, match, fm, fntemplate, prefix, **opts):
1886 1886 rev = self._state[1]
1887 1887 if match.anypats():
1888 1888 return 1 # No support for include/exclude yet
1889 1889
1890 1890 if not match.files():
1891 1891 return 1
1892 1892
1893 1893 # TODO: add support for non-plain formatter (see cmdutil.cat())
1894 1894 for f in match.files():
1895 1895 output = self._gitcommand([b"show", b"%s:%s" % (rev, f)])
1896 1896 fp = cmdutil.makefileobj(
1897 1897 self._ctx, fntemplate, pathname=self.wvfs.reljoin(prefix, f)
1898 1898 )
1899 1899 fp.write(output)
1900 1900 fp.close()
1901 1901 return 0
1902 1902
1903 1903 @annotatesubrepoerror
1904 1904 def status(self, rev2, **opts):
1905 1905 rev1 = self._state[1]
1906 1906 if self._gitmissing() or not rev1:
1907 1907 # if the repo is missing, return no results
1908 1908 return scmutil.status([], [], [], [], [], [], [])
1909 1909 modified, added, removed = [], [], []
1910 1910 self._gitupdatestat()
1911 1911 if rev2:
1912 1912 command = [b'diff-tree', b'--no-renames', b'-r', rev1, rev2]
1913 1913 else:
1914 1914 command = [b'diff-index', b'--no-renames', rev1]
1915 1915 out = self._gitcommand(command)
1916 1916 for line in out.split(b'\n'):
1917 1917 tab = line.find(b'\t')
1918 1918 if tab == -1:
1919 1919 continue
1920 1920 status, f = line[tab - 1 : tab], line[tab + 1 :]
1921 1921 if status == b'M':
1922 1922 modified.append(f)
1923 1923 elif status == b'A':
1924 1924 added.append(f)
1925 1925 elif status == b'D':
1926 1926 removed.append(f)
1927 1927
1928 1928 deleted, unknown, ignored, clean = [], [], [], []
1929 1929
1930 1930 command = [b'status', b'--porcelain', b'-z']
1931 1931 if opts.get('unknown'):
1932 1932 command += [b'--untracked-files=all']
1933 1933 if opts.get('ignored'):
1934 1934 command += [b'--ignored']
1935 1935 out = self._gitcommand(command)
1936 1936
1937 1937 changedfiles = set()
1938 1938 changedfiles.update(modified)
1939 1939 changedfiles.update(added)
1940 1940 changedfiles.update(removed)
1941 1941 for line in out.split(b'\0'):
1942 1942 if not line:
1943 1943 continue
1944 1944 st = line[0:2]
1945 1945 # moves and copies show 2 files on one line
1946 1946 if line.find(b'\0') >= 0:
1947 1947 filename1, filename2 = line[3:].split(b'\0')
1948 1948 else:
1949 1949 filename1 = line[3:]
1950 1950 filename2 = None
1951 1951
1952 1952 changedfiles.add(filename1)
1953 1953 if filename2:
1954 1954 changedfiles.add(filename2)
1955 1955
1956 1956 if st == b'??':
1957 1957 unknown.append(filename1)
1958 1958 elif st == b'!!':
1959 1959 ignored.append(filename1)
1960 1960
1961 1961 if opts.get('clean'):
1962 1962 out = self._gitcommand([b'ls-files'])
1963 1963 for f in out.split(b'\n'):
1964 1964 if not f in changedfiles:
1965 1965 clean.append(f)
1966 1966
1967 1967 return scmutil.status(
1968 1968 modified, added, removed, deleted, unknown, ignored, clean
1969 1969 )
1970 1970
1971 1971 @annotatesubrepoerror
1972 1972 def diff(self, ui, diffopts, node2, match, prefix, **opts):
1973 1973 node1 = self._state[1]
1974 1974 cmd = [b'diff', b'--no-renames']
1975 1975 if opts['stat']:
1976 1976 cmd.append(b'--stat')
1977 1977 else:
1978 1978 # for Git, this also implies '-p'
1979 1979 cmd.append(b'-U%d' % diffopts.context)
1980 1980
1981 1981 if diffopts.noprefix:
1982 1982 cmd.extend(
1983 1983 [b'--src-prefix=%s/' % prefix, b'--dst-prefix=%s/' % prefix]
1984 1984 )
1985 1985 else:
1986 1986 cmd.extend(
1987 1987 [b'--src-prefix=a/%s/' % prefix, b'--dst-prefix=b/%s/' % prefix]
1988 1988 )
1989 1989
1990 1990 if diffopts.ignorews:
1991 1991 cmd.append(b'--ignore-all-space')
1992 1992 if diffopts.ignorewsamount:
1993 1993 cmd.append(b'--ignore-space-change')
1994 1994 if (
1995 1995 self._gitversion(self._gitcommand([b'--version'])) >= (1, 8, 4)
1996 1996 and diffopts.ignoreblanklines
1997 1997 ):
1998 1998 cmd.append(b'--ignore-blank-lines')
1999 1999
2000 2000 cmd.append(node1)
2001 2001 if node2:
2002 2002 cmd.append(node2)
2003 2003
2004 2004 output = b""
2005 2005 if match.always():
2006 2006 output += self._gitcommand(cmd) + b'\n'
2007 2007 else:
2008 2008 st = self.status(node2)
2009 2009 files = [
2010 2010 f
2011 2011 for sublist in (st.modified, st.added, st.removed)
2012 2012 for f in sublist
2013 2013 ]
2014 2014 for f in files:
2015 2015 if match(f):
2016 2016 output += self._gitcommand(cmd + [b'--', f]) + b'\n'
2017 2017
2018 2018 if output.strip():
2019 2019 ui.write(output)
2020 2020
2021 2021 @annotatesubrepoerror
2022 2022 def revert(self, substate, *pats, **opts):
2023 2023 self.ui.status(_(b'reverting subrepo %s\n') % substate[0])
2024 2024 if not opts.get('no_backup'):
2025 2025 status = self.status(None)
2026 2026 names = status.modified
2027 2027 for name in names:
2028 2028 # backuppath() expects a path relative to the parent repo (the
2029 2029 # repo that ui.origbackuppath is relative to)
2030 2030 parentname = os.path.join(self._path, name)
2031 2031 bakname = scmutil.backuppath(
2032 2032 self.ui, self._subparent, parentname
2033 2033 )
2034 2034 self.ui.note(
2035 2035 _(b'saving current version of %s as %s\n')
2036 2036 % (name, os.path.relpath(bakname))
2037 2037 )
2038 2038 util.rename(self.wvfs.join(name), bakname)
2039 2039
2040 2040 if not opts.get('dry_run'):
2041 2041 self.get(substate, overwrite=True)
2042 2042 return []
2043 2043
2044 2044 def shortid(self, revid):
2045 2045 return revid[:7]
2046 2046
2047 2047
2048 2048 types = {
2049 2049 b'hg': hgsubrepo,
2050 2050 b'svn': svnsubrepo,
2051 2051 b'git': gitsubrepo,
2052 2052 }
@@ -1,1697 +1,1693 b''
1 1 A script that implements uppercasing of specific lines in a file. This
2 2 approximates the behavior of code formatters well enough for our tests.
3 3
4 4 $ UPPERCASEPY="$TESTTMP/uppercase.py"
5 5 $ cat > $UPPERCASEPY <<EOF
6 6 > import sys
7 7 > from mercurial.utils.procutil import setbinary
8 8 > setbinary(sys.stdin)
9 9 > setbinary(sys.stdout)
10 10 > lines = set()
11 11 > for arg in sys.argv[1:]:
12 12 > if arg == 'all':
13 13 > sys.stdout.write(sys.stdin.read().upper())
14 14 > sys.exit(0)
15 15 > else:
16 16 > first, last = arg.split('-')
17 17 > lines.update(range(int(first), int(last) + 1))
18 18 > for i, line in enumerate(sys.stdin.readlines()):
19 19 > if i + 1 in lines:
20 20 > sys.stdout.write(line.upper())
21 21 > else:
22 22 > sys.stdout.write(line)
23 23 > EOF
24 24 $ TESTLINES="foo\nbar\nbaz\nqux\n"
25 25 $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY
26 26 foo
27 27 bar
28 28 baz
29 29 qux
30 30 $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY all
31 31 FOO
32 32 BAR
33 33 BAZ
34 34 QUX
35 35 $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY 1-1
36 36 FOO
37 37 bar
38 38 baz
39 39 qux
40 40 $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY 1-2
41 41 FOO
42 42 BAR
43 43 baz
44 44 qux
45 45 $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY 2-3
46 46 foo
47 47 BAR
48 48 BAZ
49 49 qux
50 50 $ printf $TESTLINES | "$PYTHON" $UPPERCASEPY 2-2 4-4
51 51 foo
52 52 BAR
53 53 baz
54 54 QUX
55 55
56 56 Set up the config with two simple fixers: one that fixes specific line ranges,
57 57 and one that always fixes the whole file. They both "fix" files by converting
58 58 letters to uppercase. They use different file extensions, so each test case can
59 59 choose which behavior to use by naming files.
60 60
61 61 $ cat >> $HGRCPATH <<EOF
62 62 > [extensions]
63 63 > fix =
64 64 > [experimental]
65 65 > evolution.createmarkers=True
66 66 > evolution.allowunstable=True
67 67 > [fix]
68 68 > uppercase-whole-file:command="$PYTHON" $UPPERCASEPY all
69 69 > uppercase-whole-file:pattern=set:**.whole
70 70 > uppercase-changed-lines:command="$PYTHON" $UPPERCASEPY
71 71 > uppercase-changed-lines:linerange={first}-{last}
72 72 > uppercase-changed-lines:pattern=set:**.changed
73 73 > EOF
74 74
75 75 Help text for fix.
76 76
77 77 $ hg help fix
78 78 hg fix [OPTION]... [FILE]...
79 79
80 80 rewrite file content in changesets or working directory
81 81
82 82 Runs any configured tools to fix the content of files. Only affects files
83 83 with changes, unless file arguments are provided. Only affects changed
84 84 lines of files, unless the --whole flag is used. Some tools may always
85 85 affect the whole file regardless of --whole.
86 86
87 87 If revisions are specified with --rev, those revisions will be checked,
88 88 and they may be replaced with new revisions that have fixed file content.
89 89 It is desirable to specify all descendants of each specified revision, so
90 90 that the fixes propagate to the descendants. If all descendants are fixed
91 91 at the same time, no merging, rebasing, or evolution will be required.
92 92
93 93 If --working-dir is used, files with uncommitted changes in the working
94 94 copy will be fixed. If the checked-out revision is also fixed, the working
95 95 directory will update to the replacement revision.
96 96
97 97 When determining what lines of each file to fix at each revision, the
98 98 whole set of revisions being fixed is considered, so that fixes to earlier
99 99 revisions are not forgotten in later ones. The --base flag can be used to
100 100 override this default behavior, though it is not usually desirable to do
101 101 so.
102 102
103 103 (use 'hg help -e fix' to show help for the fix extension)
104 104
105 105 options ([+] can be repeated):
106 106
107 107 --all fix all non-public non-obsolete revisions
108 108 --base REV [+] revisions to diff against (overrides automatic selection,
109 109 and applies to every revision being fixed)
110 110 -r --rev REV [+] revisions to fix
111 111 -w --working-dir fix the working directory
112 112 --whole always fix every line of a file
113 113
114 114 (some details hidden, use --verbose to show complete help)
115 115
116 116 $ hg help -e fix
117 117 fix extension - rewrite file content in changesets or working copy
118 118 (EXPERIMENTAL)
119 119
120 120 Provides a command that runs configured tools on the contents of modified
121 121 files, writing back any fixes to the working copy or replacing changesets.
122 122
123 123 Here is an example configuration that causes 'hg fix' to apply automatic
124 124 formatting fixes to modified lines in C++ code:
125 125
126 126 [fix]
127 127 clang-format:command=clang-format --assume-filename={rootpath}
128 128 clang-format:linerange=--lines={first}:{last}
129 129 clang-format:pattern=set:**.cpp or **.hpp
130 130
131 131 The :command suboption forms the first part of the shell command that will be
132 132 used to fix a file. The content of the file is passed on standard input, and
133 133 the fixed file content is expected on standard output. Any output on standard
134 134 error will be displayed as a warning. If the exit status is not zero, the file
135 135 will not be affected. A placeholder warning is displayed if there is a non-
136 136 zero exit status but no standard error output. Some values may be substituted
137 137 into the command:
138 138
139 139 {rootpath} The path of the file being fixed, relative to the repo root
140 140 {basename} The name of the file being fixed, without the directory path
141 141
142 142 If the :linerange suboption is set, the tool will only be run if there are
143 143 changed lines in a file. The value of this suboption is appended to the shell
144 144 command once for every range of changed lines in the file. Some values may be
145 145 substituted into the command:
146 146
147 147 {first} The 1-based line number of the first line in the modified range
148 148 {last} The 1-based line number of the last line in the modified range
149 149
150 150 Deleted sections of a file will be ignored by :linerange, because there is no
151 151 corresponding line range in the version being fixed.
152 152
153 153 By default, tools that set :linerange will only be executed if there is at
154 154 least one changed line range. This is meant to prevent accidents like running
155 155 a code formatter in such a way that it unexpectedly reformats the whole file.
156 156 If such a tool needs to operate on unchanged files, it should set the
157 157 :skipclean suboption to false.
158 158
159 159 The :pattern suboption determines which files will be passed through each
160 160 configured tool. See 'hg help patterns' for possible values. However, all
161 161 patterns are relative to the repo root, even if that text says they are
162 162 relative to the current working directory. If there are file arguments to 'hg
163 163 fix', the intersection of these patterns is used.
164 164
165 165 There is also a configurable limit for the maximum size of file that will be
166 166 processed by 'hg fix':
167 167
168 168 [fix]
169 169 maxfilesize = 2MB
170 170
171 171 Normally, execution of configured tools will continue after a failure
172 172 (indicated by a non-zero exit status). It can also be configured to abort
173 173 after the first such failure, so that no files will be affected if any tool
174 174 fails. This abort will also cause 'hg fix' to exit with a non-zero status:
175 175
176 176 [fix]
177 177 failure = abort
178 178
179 179 When multiple tools are configured to affect a file, they execute in an order
180 180 defined by the :priority suboption. The priority suboption has a default value
181 181 of zero for each tool. Tools are executed in order of descending priority. The
182 182 execution order of tools with equal priority is unspecified. For example, you
183 183 could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers
184 184 in a text file by ensuring that 'sort' runs before 'head':
185 185
186 186 [fix]
187 187 sort:command = sort -n
188 188 head:command = head -n 10
189 189 sort:pattern = numbers.txt
190 190 head:pattern = numbers.txt
191 191 sort:priority = 2
192 192 head:priority = 1
193 193
194 194 To account for changes made by each tool, the line numbers used for
195 195 incremental formatting are recomputed before executing the next tool. So, each
196 196 tool may see different values for the arguments added by the :linerange
197 197 suboption.
198 198
199 199 Each fixer tool is allowed to return some metadata in addition to the fixed
200 200 file content. The metadata must be placed before the file content on stdout,
201 201 separated from the file content by a zero byte. The metadata is parsed as a
202 202 JSON value (so, it should be UTF-8 encoded and contain no zero bytes). A fixer
203 203 tool is expected to produce this metadata encoding if and only if the
204 204 :metadata suboption is true:
205 205
206 206 [fix]
207 207 tool:command = tool --prepend-json-metadata
208 208 tool:metadata = true
209 209
210 210 The metadata values are passed to hooks, which can be used to print summaries
211 211 or perform other post-fixing work. The supported hooks are:
212 212
213 213 "postfixfile"
214 214 Run once for each file in each revision where any fixer tools made changes
215 215 to the file content. Provides "$HG_REV" and "$HG_PATH" to identify the file,
216 216 and "$HG_METADATA" with a map of fixer names to metadata values from fixer
217 217 tools that affected the file. Fixer tools that didn't affect the file have a
218 218 valueof None. Only fixer tools that executed are present in the metadata.
219 219
220 220 "postfix"
221 221 Run once after all files and revisions have been handled. Provides
222 222 "$HG_REPLACEMENTS" with information about what revisions were created and
223 223 made obsolete. Provides a boolean "$HG_WDIRWRITTEN" to indicate whether any
224 224 files in the working copy were updated. Provides a list "$HG_METADATA"
225 225 mapping fixer tool names to lists of metadata values returned from
226 226 executions that modified a file. This aggregates the same metadata
227 227 previously passed to the "postfixfile" hook.
228 228
229 229 Fixer tools are run the in repository's root directory. This allows them to
230 230 read configuration files from the working copy, or even write to the working
231 231 copy. The working copy is not updated to match the revision being fixed. In
232 232 fact, several revisions may be fixed in parallel. Writes to the working copy
233 233 are not amended into the revision being fixed; fixer tools should always write
234 234 fixed file content back to stdout as documented above.
235 235
236 236 list of commands:
237 237
238 238 fix rewrite file content in changesets or working directory
239 239
240 240 (use 'hg help -v -e fix' to show built-in aliases and global options)
241 241
242 242 There is no default behavior in the absence of --rev and --working-dir.
243 243
244 244 $ hg init badusage
245 245 $ cd badusage
246 246
247 247 $ hg fix
248 248 abort: no changesets specified
249 249 (use --rev or --working-dir)
250 250 [255]
251 251 $ hg fix --whole
252 252 abort: no changesets specified
253 253 (use --rev or --working-dir)
254 254 [255]
255 255 $ hg fix --base 0
256 256 abort: no changesets specified
257 257 (use --rev or --working-dir)
258 258 [255]
259 259
260 260 Fixing a public revision isn't allowed. It should abort early enough that
261 261 nothing happens, even to the working directory.
262 262
263 263 $ printf "hello\n" > hello.whole
264 264 $ hg commit -Aqm "hello"
265 265 $ hg phase -r 0 --public
266 266 $ hg fix -r 0
267 267 abort: cannot fix public changesets
268 268 (see 'hg help phases' for details)
269 269 [255]
270 270 $ hg fix -r 0 --working-dir
271 271 abort: cannot fix public changesets
272 272 (see 'hg help phases' for details)
273 273 [255]
274 274 $ hg cat -r tip hello.whole
275 275 hello
276 276 $ cat hello.whole
277 277 hello
278 278
279 279 $ cd ..
280 280
281 281 Fixing a clean working directory should do nothing. Even the --whole flag
282 282 shouldn't cause any clean files to be fixed. Specifying a clean file explicitly
283 283 should only fix it if the fixer always fixes the whole file. The combination of
284 284 an explicit filename and --whole should format the entire file regardless.
285 285
286 286 $ hg init fixcleanwdir
287 287 $ cd fixcleanwdir
288 288
289 289 $ printf "hello\n" > hello.changed
290 290 $ printf "world\n" > hello.whole
291 291 $ hg commit -Aqm "foo"
292 292 $ hg fix --working-dir
293 293 $ hg diff
294 294 $ hg fix --working-dir --whole
295 295 $ hg diff
296 296 $ hg fix --working-dir *
297 297 $ cat *
298 298 hello
299 299 WORLD
300 300 $ hg revert --all --no-backup
301 301 reverting hello.whole
302 302 $ hg fix --working-dir * --whole
303 303 $ cat *
304 304 HELLO
305 305 WORLD
306 306
307 307 The same ideas apply to fixing a revision, so we create a revision that doesn't
308 308 modify either of the files in question and try fixing it. This also tests that
309 309 we ignore a file that doesn't match any configured fixer.
310 310
311 311 $ hg revert --all --no-backup
312 312 reverting hello.changed
313 313 reverting hello.whole
314 314 $ printf "unimportant\n" > some.file
315 315 $ hg commit -Aqm "some other file"
316 316
317 317 $ hg fix -r .
318 318 $ hg cat -r tip *
319 319 hello
320 320 world
321 321 unimportant
322 322 $ hg fix -r . --whole
323 323 $ hg cat -r tip *
324 324 hello
325 325 world
326 326 unimportant
327 327 $ hg fix -r . *
328 328 $ hg cat -r tip *
329 329 hello
330 330 WORLD
331 331 unimportant
332 332 $ hg fix -r . * --whole --config experimental.evolution.allowdivergence=true
333 333 2 new content-divergent changesets
334 334 $ hg cat -r tip *
335 335 HELLO
336 336 WORLD
337 337 unimportant
338 338
339 339 $ cd ..
340 340
341 341 Fixing the working directory should still work if there are no revisions.
342 342
343 343 $ hg init norevisions
344 344 $ cd norevisions
345 345
346 346 $ printf "something\n" > something.whole
347 347 $ hg add
348 348 adding something.whole
349 349 $ hg fix --working-dir
350 350 $ cat something.whole
351 351 SOMETHING
352 352
353 353 $ cd ..
354 354
355 355 Test the effect of fixing the working directory for each possible status, with
356 356 and without providing explicit file arguments.
357 357
358 358 $ hg init implicitlyfixstatus
359 359 $ cd implicitlyfixstatus
360 360
361 361 $ printf "modified\n" > modified.whole
362 362 $ printf "removed\n" > removed.whole
363 363 $ printf "deleted\n" > deleted.whole
364 364 $ printf "clean\n" > clean.whole
365 365 $ printf "ignored.whole" > .hgignore
366 366 $ hg commit -Aqm "stuff"
367 367
368 368 $ printf "modified!!!\n" > modified.whole
369 369 $ printf "unknown\n" > unknown.whole
370 370 $ printf "ignored\n" > ignored.whole
371 371 $ printf "added\n" > added.whole
372 372 $ hg add added.whole
373 373 $ hg remove removed.whole
374 374 $ rm deleted.whole
375 375
376 376 $ hg status --all
377 377 M modified.whole
378 378 A added.whole
379 379 R removed.whole
380 380 ! deleted.whole
381 381 ? unknown.whole
382 382 I ignored.whole
383 383 C .hgignore
384 384 C clean.whole
385 385
386 386 $ hg fix --working-dir
387 387
388 388 $ hg status --all
389 389 M modified.whole
390 390 A added.whole
391 391 R removed.whole
392 392 ! deleted.whole
393 393 ? unknown.whole
394 394 I ignored.whole
395 395 C .hgignore
396 396 C clean.whole
397 397
398 398 $ cat *.whole
399 399 ADDED
400 400 clean
401 401 ignored
402 402 MODIFIED!!!
403 403 unknown
404 404
405 405 $ printf "modified!!!\n" > modified.whole
406 406 $ printf "added\n" > added.whole
407 407
408 408 Listing the files explicitly causes untracked files to also be fixed, but
409 409 ignored files are still unaffected.
410 410
411 411 $ hg fix --working-dir *.whole
412 412
413 413 $ hg status --all
414 414 M clean.whole
415 415 M modified.whole
416 416 A added.whole
417 417 R removed.whole
418 418 ! deleted.whole
419 419 ? unknown.whole
420 420 I ignored.whole
421 421 C .hgignore
422 422
423 423 $ cat *.whole
424 424 ADDED
425 425 CLEAN
426 426 ignored
427 427 MODIFIED!!!
428 428 UNKNOWN
429 429
430 430 $ cd ..
431 431
432 432 Test that incremental fixing works on files with additions, deletions, and
433 433 changes in multiple line ranges. Note that deletions do not generally cause
434 434 neighboring lines to be fixed, so we don't return a line range for purely
435 435 deleted sections. In the future we should support a :deletion config that
436 436 allows fixers to know where deletions are located.
437 437
438 438 $ hg init incrementalfixedlines
439 439 $ cd incrementalfixedlines
440 440
441 441 $ printf "a\nb\nc\nd\ne\nf\ng\n" > foo.txt
442 442 $ hg commit -Aqm "foo"
443 443 $ printf "zz\na\nc\ndd\nee\nff\nf\ngg\n" > foo.txt
444 444
445 445 $ hg --config "fix.fail:command=echo" \
446 446 > --config "fix.fail:linerange={first}:{last}" \
447 447 > --config "fix.fail:pattern=foo.txt" \
448 448 > fix --working-dir
449 449 $ cat foo.txt
450 450 1:1 4:6 8:8
451 451
452 452 $ cd ..
453 453
454 454 Test that --whole fixes all lines regardless of the diffs present.
455 455
456 456 $ hg init wholeignoresdiffs
457 457 $ cd wholeignoresdiffs
458 458
459 459 $ printf "a\nb\nc\nd\ne\nf\ng\n" > foo.changed
460 460 $ hg commit -Aqm "foo"
461 461 $ printf "zz\na\nc\ndd\nee\nff\nf\ngg\n" > foo.changed
462 462
463 463 $ hg fix --working-dir
464 464 $ cat foo.changed
465 465 ZZ
466 466 a
467 467 c
468 468 DD
469 469 EE
470 470 FF
471 471 f
472 472 GG
473 473
474 474 $ hg fix --working-dir --whole
475 475 $ cat foo.changed
476 476 ZZ
477 477 A
478 478 C
479 479 DD
480 480 EE
481 481 FF
482 482 F
483 483 GG
484 484
485 485 $ cd ..
486 486
487 487 We should do nothing with symlinks, and their targets should be unaffected. Any
488 488 other behavior would be more complicated to implement and harder to document.
489 489
490 490 #if symlink
491 491 $ hg init dontmesswithsymlinks
492 492 $ cd dontmesswithsymlinks
493 493
494 494 $ printf "hello\n" > hello.whole
495 495 $ ln -s hello.whole hellolink
496 496 $ hg add
497 497 adding hello.whole
498 498 adding hellolink
499 499 $ hg fix --working-dir hellolink
500 500 $ hg status
501 501 A hello.whole
502 502 A hellolink
503 503
504 504 $ cd ..
505 505 #endif
506 506
507 507 We should allow fixers to run on binary files, even though this doesn't sound
508 508 like a common use case. There's not much benefit to disallowing it, and users
509 509 can add "and not binary()" to their filesets if needed. The Mercurial
510 510 philosophy is generally to not handle binary files specially anyway.
511 511
512 512 $ hg init cantouchbinaryfiles
513 513 $ cd cantouchbinaryfiles
514 514
515 515 $ printf "hello\0\n" > hello.whole
516 516 $ hg add
517 517 adding hello.whole
518 518 $ hg fix --working-dir 'set:binary()'
519 519 $ cat hello.whole
520 520 HELLO\x00 (esc)
521 521
522 522 $ cd ..
523 523
524 524 We have a config for the maximum size of file we will attempt to fix. This can
525 525 be helpful to avoid running unsuspecting fixer tools on huge inputs, which
526 526 could happen by accident without a well considered configuration. A more
527 527 precise configuration could use the size() fileset function if one global limit
528 528 is undesired.
529 529
530 530 $ hg init maxfilesize
531 531 $ cd maxfilesize
532 532
533 533 $ printf "this file is huge\n" > hello.whole
534 534 $ hg add
535 535 adding hello.whole
536 536 $ hg --config fix.maxfilesize=10 fix --working-dir
537 537 ignoring file larger than 10 bytes: hello.whole
538 538 $ cat hello.whole
539 539 this file is huge
540 540
541 541 $ cd ..
542 542
543 543 If we specify a file to fix, other files should be left alone, even if they
544 544 have changes.
545 545
546 546 $ hg init fixonlywhatitellyouto
547 547 $ cd fixonlywhatitellyouto
548 548
549 549 $ printf "fix me!\n" > fixme.whole
550 550 $ printf "not me.\n" > notme.whole
551 551 $ hg add
552 552 adding fixme.whole
553 553 adding notme.whole
554 554 $ hg fix --working-dir fixme.whole
555 555 $ cat *.whole
556 556 FIX ME!
557 557 not me.
558 558
559 559 $ cd ..
560 560
561 561 If we try to fix a missing file, we still fix other files.
562 562
563 563 $ hg init fixmissingfile
564 564 $ cd fixmissingfile
565 565
566 566 $ printf "fix me!\n" > foo.whole
567 567 $ hg add
568 568 adding foo.whole
569 569 $ hg fix --working-dir foo.whole bar.whole
570 570 bar.whole: $ENOENT$
571 571 $ cat *.whole
572 572 FIX ME!
573 573
574 574 $ cd ..
575 575
576 576 Specifying a directory name should fix all its files and subdirectories.
577 577
578 578 $ hg init fixdirectory
579 579 $ cd fixdirectory
580 580
581 581 $ mkdir -p dir1/dir2
582 582 $ printf "foo\n" > foo.whole
583 583 $ printf "bar\n" > dir1/bar.whole
584 584 $ printf "baz\n" > dir1/dir2/baz.whole
585 585 $ hg add
586 586 adding dir1/bar.whole
587 587 adding dir1/dir2/baz.whole
588 588 adding foo.whole
589 589 $ hg fix --working-dir dir1
590 590 $ cat foo.whole dir1/bar.whole dir1/dir2/baz.whole
591 591 foo
592 592 BAR
593 593 BAZ
594 594
595 595 $ cd ..
596 596
597 597 Fixing a file in the working directory that needs no fixes should not actually
598 598 write back to the file, so for example the mtime shouldn't change.
599 599
600 600 $ hg init donttouchunfixedfiles
601 601 $ cd donttouchunfixedfiles
602 602
603 603 $ printf "NO FIX NEEDED\n" > foo.whole
604 604 $ hg add
605 605 adding foo.whole
606 606 $ cp -p foo.whole foo.whole.orig
607 607 $ cp -p foo.whole.orig foo.whole
608 608 $ sleep 2 # mtime has a resolution of one or two seconds.
609 609 $ hg fix --working-dir
610 610 $ f foo.whole.orig --newer foo.whole
611 611 foo.whole.orig: newer than foo.whole
612 612
613 613 $ cd ..
614 614
615 615 When a fixer prints to stderr, we don't assume that it has failed. We show the
616 616 error messages to the user, and we still let the fixer affect the file it was
617 617 fixing if its exit code is zero. Some code formatters might emit error messages
618 618 on stderr and nothing on stdout, which would cause us the clear the file,
619 619 except that they also exit with a non-zero code. We show the user which fixer
620 620 emitted the stderr, and which revision, but we assume that the fixer will print
621 621 the filename if it is relevant (since the issue may be non-specific). There is
622 622 also a config to abort (without affecting any files whatsoever) if we see any
623 623 tool with a non-zero exit status.
624 624
625 625 $ hg init showstderr
626 626 $ cd showstderr
627 627
628 628 $ printf "hello\n" > hello.txt
629 629 $ hg add
630 630 adding hello.txt
631 631 $ cat > $TESTTMP/work.sh <<'EOF'
632 632 > printf 'HELLO\n'
633 633 > printf "$@: some\nerror that didn't stop the tool" >&2
634 634 > exit 0 # success despite the stderr output
635 635 > EOF
636 636 $ hg --config "fix.work:command=sh $TESTTMP/work.sh {rootpath}" \
637 637 > --config "fix.work:pattern=hello.txt" \
638 638 > fix --working-dir
639 639 [wdir] work: hello.txt: some
640 640 [wdir] work: error that didn't stop the tool
641 641 $ cat hello.txt
642 642 HELLO
643 643
644 644 $ printf "goodbye\n" > hello.txt
645 645 $ printf "foo\n" > foo.whole
646 646 $ hg add
647 647 adding foo.whole
648 648 $ cat > $TESTTMP/fail.sh <<'EOF'
649 649 > printf 'GOODBYE\n'
650 650 > printf "$@: some\nerror that did stop the tool\n" >&2
651 651 > exit 42 # success despite the stdout output
652 652 > EOF
653 653 $ hg --config "fix.fail:command=sh $TESTTMP/fail.sh {rootpath}" \
654 654 > --config "fix.fail:pattern=hello.txt" \
655 655 > --config "fix.failure=abort" \
656 656 > fix --working-dir
657 657 [wdir] fail: hello.txt: some
658 658 [wdir] fail: error that did stop the tool
659 659 abort: no fixes will be applied
660 660 (use --config fix.failure=continue to apply any successful fixes anyway)
661 661 [255]
662 662 $ cat hello.txt
663 663 goodbye
664 664 $ cat foo.whole
665 665 foo
666 666
667 667 $ hg --config "fix.fail:command=sh $TESTTMP/fail.sh {rootpath}" \
668 668 > --config "fix.fail:pattern=hello.txt" \
669 669 > fix --working-dir
670 670 [wdir] fail: hello.txt: some
671 671 [wdir] fail: error that did stop the tool
672 672 $ cat hello.txt
673 673 goodbye
674 674 $ cat foo.whole
675 675 FOO
676 676
677 677 $ hg --config "fix.fail:command=exit 42" \
678 678 > --config "fix.fail:pattern=hello.txt" \
679 679 > fix --working-dir
680 680 [wdir] fail: exited with status 42
681 681
682 682 $ cd ..
683 683
684 684 Fixing the working directory and its parent revision at the same time should
685 685 check out the replacement revision for the parent. This prevents any new
686 686 uncommitted changes from appearing. We test this for a clean working directory
687 687 and a dirty one. In both cases, all lines/files changed since the grandparent
688 688 will be fixed. The grandparent is the "baserev" for both the parent and the
689 689 working copy.
690 690
691 691 $ hg init fixdotandcleanwdir
692 692 $ cd fixdotandcleanwdir
693 693
694 694 $ printf "hello\n" > hello.whole
695 695 $ printf "world\n" > world.whole
696 696 $ hg commit -Aqm "the parent commit"
697 697
698 698 $ hg parents --template '{rev} {desc}\n'
699 699 0 the parent commit
700 700 $ hg fix --working-dir -r .
701 701 $ hg parents --template '{rev} {desc}\n'
702 702 1 the parent commit
703 703 $ hg cat -r . *.whole
704 704 HELLO
705 705 WORLD
706 706 $ cat *.whole
707 707 HELLO
708 708 WORLD
709 709 $ hg status
710 710
711 711 $ cd ..
712 712
713 713 Same test with a dirty working copy.
714 714
715 715 $ hg init fixdotanddirtywdir
716 716 $ cd fixdotanddirtywdir
717 717
718 718 $ printf "hello\n" > hello.whole
719 719 $ printf "world\n" > world.whole
720 720 $ hg commit -Aqm "the parent commit"
721 721
722 722 $ printf "hello,\n" > hello.whole
723 723 $ printf "world!\n" > world.whole
724 724
725 725 $ hg parents --template '{rev} {desc}\n'
726 726 0 the parent commit
727 727 $ hg fix --working-dir -r .
728 728 $ hg parents --template '{rev} {desc}\n'
729 729 1 the parent commit
730 730 $ hg cat -r . *.whole
731 731 HELLO
732 732 WORLD
733 733 $ cat *.whole
734 734 HELLO,
735 735 WORLD!
736 736 $ hg status
737 737 M hello.whole
738 738 M world.whole
739 739
740 740 $ cd ..
741 741
742 742 When we have a chain of commits that change mutually exclusive lines of code,
743 743 we should be able to do incremental fixing that causes each commit in the chain
744 744 to include fixes made to the previous commits. This prevents children from
745 745 backing out the fixes made in their parents. A dirty working directory is
746 746 conceptually similar to another commit in the chain.
747 747
748 748 $ hg init incrementallyfixchain
749 749 $ cd incrementallyfixchain
750 750
751 751 $ cat > file.changed <<EOF
752 752 > first
753 753 > second
754 754 > third
755 755 > fourth
756 756 > fifth
757 757 > EOF
758 758 $ hg commit -Aqm "the common ancestor (the baserev)"
759 759 $ cat > file.changed <<EOF
760 760 > first (changed)
761 761 > second
762 762 > third
763 763 > fourth
764 764 > fifth
765 765 > EOF
766 766 $ hg commit -Aqm "the first commit to fix"
767 767 $ cat > file.changed <<EOF
768 768 > first (changed)
769 769 > second
770 770 > third (changed)
771 771 > fourth
772 772 > fifth
773 773 > EOF
774 774 $ hg commit -Aqm "the second commit to fix"
775 775 $ cat > file.changed <<EOF
776 776 > first (changed)
777 777 > second
778 778 > third (changed)
779 779 > fourth
780 780 > fifth (changed)
781 781 > EOF
782 782
783 783 $ hg fix -r . -r '.^' --working-dir
784 784
785 785 $ hg parents --template '{rev}\n'
786 786 4
787 787 $ hg cat -r '.^^' file.changed
788 788 first
789 789 second
790 790 third
791 791 fourth
792 792 fifth
793 793 $ hg cat -r '.^' file.changed
794 794 FIRST (CHANGED)
795 795 second
796 796 third
797 797 fourth
798 798 fifth
799 799 $ hg cat -r . file.changed
800 800 FIRST (CHANGED)
801 801 second
802 802 THIRD (CHANGED)
803 803 fourth
804 804 fifth
805 805 $ cat file.changed
806 806 FIRST (CHANGED)
807 807 second
808 808 THIRD (CHANGED)
809 809 fourth
810 810 FIFTH (CHANGED)
811 811
812 812 $ cd ..
813 813
814 814 If we incrementally fix a merge commit, we should fix any lines that changed
815 815 versus either parent. You could imagine only fixing the intersection or some
816 816 other subset, but this is necessary if either parent is being fixed. It
817 817 prevents us from forgetting fixes made in either parent.
818 818
819 819 $ hg init incrementallyfixmergecommit
820 820 $ cd incrementallyfixmergecommit
821 821
822 822 $ printf "a\nb\nc\n" > file.changed
823 823 $ hg commit -Aqm "ancestor"
824 824
825 825 $ printf "aa\nb\nc\n" > file.changed
826 826 $ hg commit -m "change a"
827 827
828 828 $ hg checkout '.^'
829 829 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
830 830 $ printf "a\nb\ncc\n" > file.changed
831 831 $ hg commit -m "change c"
832 832 created new head
833 833
834 834 $ hg merge
835 835 merging file.changed
836 836 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
837 837 (branch merge, don't forget to commit)
838 838 $ hg commit -m "merge"
839 839 $ hg cat -r . file.changed
840 840 aa
841 841 b
842 842 cc
843 843
844 844 $ hg fix -r . --working-dir
845 845 $ hg cat -r . file.changed
846 846 AA
847 847 b
848 848 CC
849 849
850 850 $ cd ..
851 851
852 852 Abort fixing revisions if there is an unfinished operation. We don't want to
853 853 make things worse by editing files or stripping/obsoleting things. Also abort
854 854 fixing the working directory if there are unresolved merge conflicts.
855 855
856 856 $ hg init abortunresolved
857 857 $ cd abortunresolved
858 858
859 859 $ echo "foo1" > foo.whole
860 860 $ hg commit -Aqm "foo 1"
861 861
862 862 $ hg update null
863 863 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
864 864 $ echo "foo2" > foo.whole
865 865 $ hg commit -Aqm "foo 2"
866 866
867 867 $ hg --config extensions.rebase= rebase -r 1 -d 0
868 868 rebasing 1:c3b6dc0e177a "foo 2" (tip)
869 869 merging foo.whole
870 870 warning: conflicts while merging foo.whole! (edit, then use 'hg resolve --mark')
871 871 unresolved conflicts (see hg resolve, then hg rebase --continue)
872 872 [1]
873 873
874 874 $ hg --config extensions.rebase= fix --working-dir
875 875 abort: unresolved conflicts
876 876 (use 'hg resolve')
877 877 [255]
878 878
879 879 $ hg --config extensions.rebase= fix -r .
880 880 abort: rebase in progress
881 881 (use 'hg rebase --continue' or 'hg rebase --abort')
882 882 [255]
883 883
884 884 $ cd ..
885 885
886 886 When fixing a file that was renamed, we should diff against the source of the
887 887 rename for incremental fixing and we should correctly reproduce the rename in
888 888 the replacement revision.
889 889
890 890 $ hg init fixrenamecommit
891 891 $ cd fixrenamecommit
892 892
893 893 $ printf "a\nb\nc\n" > source.changed
894 894 $ hg commit -Aqm "source revision"
895 895 $ hg move source.changed dest.changed
896 896 $ printf "a\nb\ncc\n" > dest.changed
897 897 $ hg commit -m "dest revision"
898 898
899 899 $ hg fix -r .
900 900 $ hg log -r tip --copies --template "{file_copies}\n"
901 901 dest.changed (source.changed)
902 902 $ hg cat -r tip dest.changed
903 903 a
904 904 b
905 905 CC
906 906
907 907 $ cd ..
908 908
909 909 When fixing revisions that remove files we must ensure that the replacement
910 910 actually removes the file, whereas it could accidentally leave it unchanged or
911 911 write an empty string to it.
912 912
913 913 $ hg init fixremovedfile
914 914 $ cd fixremovedfile
915 915
916 916 $ printf "foo\n" > foo.whole
917 917 $ printf "bar\n" > bar.whole
918 918 $ hg commit -Aqm "add files"
919 919 $ hg remove bar.whole
920 920 $ hg commit -m "remove file"
921 921 $ hg status --change .
922 922 R bar.whole
923 923 $ hg fix -r . foo.whole
924 924 $ hg status --change tip
925 925 M foo.whole
926 926 R bar.whole
927 927
928 928 $ cd ..
929 929
930 930 If fixing a revision finds no fixes to make, no replacement revision should be
931 931 created.
932 932
933 933 $ hg init nofixesneeded
934 934 $ cd nofixesneeded
935 935
936 936 $ printf "FOO\n" > foo.whole
937 937 $ hg commit -Aqm "add file"
938 938 $ hg log --template '{rev}\n'
939 939 0
940 940 $ hg fix -r .
941 941 $ hg log --template '{rev}\n'
942 942 0
943 943
944 944 $ cd ..
945 945
946 946 If fixing a commit reverts all the changes in the commit, we replace it with a
947 947 commit that changes no files.
948 948
949 949 $ hg init nochangesleft
950 950 $ cd nochangesleft
951 951
952 952 $ printf "FOO\n" > foo.whole
953 953 $ hg commit -Aqm "add file"
954 954 $ printf "foo\n" > foo.whole
955 955 $ hg commit -m "edit file"
956 956 $ hg status --change .
957 957 M foo.whole
958 958 $ hg fix -r .
959 959 $ hg status --change tip
960 960
961 961 $ cd ..
962 962
963 963 If we fix a parent and child revision together, the child revision must be
964 964 replaced if the parent is replaced, even if the diffs of the child needed no
965 965 fixes. However, we're free to not replace revisions that need no fixes and have
966 966 no ancestors that are replaced.
967 967
968 968 $ hg init mustreplacechild
969 969 $ cd mustreplacechild
970 970
971 971 $ printf "FOO\n" > foo.whole
972 972 $ hg commit -Aqm "add foo"
973 973 $ printf "foo\n" > foo.whole
974 974 $ hg commit -m "edit foo"
975 975 $ printf "BAR\n" > bar.whole
976 976 $ hg commit -Aqm "add bar"
977 977
978 978 $ hg log --graph --template '{rev} {files}'
979 979 @ 2 bar.whole
980 980 |
981 981 o 1 foo.whole
982 982 |
983 983 o 0 foo.whole
984 984
985 985 $ hg fix -r 0:2
986 986 $ hg log --graph --template '{rev} {files}'
987 987 o 4 bar.whole
988 988 |
989 989 o 3
990 990 |
991 991 | @ 2 bar.whole
992 992 | |
993 993 | x 1 foo.whole
994 994 |/
995 995 o 0 foo.whole
996 996
997 997
998 998 $ cd ..
999 999
1000 1000 It's also possible that the child needs absolutely no changes, but we still
1001 1001 need to replace it to update its parent. If we skipped replacing the child
1002 1002 because it had no file content changes, it would become an orphan for no good
1003 1003 reason.
1004 1004
1005 1005 $ hg init mustreplacechildevenifnop
1006 1006 $ cd mustreplacechildevenifnop
1007 1007
1008 1008 $ printf "Foo\n" > foo.whole
1009 1009 $ hg commit -Aqm "add a bad foo"
1010 1010 $ printf "FOO\n" > foo.whole
1011 1011 $ hg commit -m "add a good foo"
1012 1012 $ hg fix -r . -r '.^'
1013 1013 $ hg log --graph --template '{rev} {desc}'
1014 1014 o 3 add a good foo
1015 1015 |
1016 1016 o 2 add a bad foo
1017 1017
1018 1018 @ 1 add a good foo
1019 1019 |
1020 1020 x 0 add a bad foo
1021 1021
1022 1022
1023 1023 $ cd ..
1024 1024
1025 1025 Similar to the case above, the child revision may become empty as a result of
1026 1026 fixing its parent. We should still create an empty replacement child.
1027 1027 TODO: determine how this should interact with ui.allowemptycommit given that
1028 1028 the empty replacement could have children.
1029 1029
1030 1030 $ hg init mustreplacechildevenifempty
1031 1031 $ cd mustreplacechildevenifempty
1032 1032
1033 1033 $ printf "foo\n" > foo.whole
1034 1034 $ hg commit -Aqm "add foo"
1035 1035 $ printf "Foo\n" > foo.whole
1036 1036 $ hg commit -m "edit foo"
1037 1037 $ hg fix -r . -r '.^'
1038 1038 $ hg log --graph --template '{rev} {desc}\n' --stat
1039 1039 o 3 edit foo
1040 1040 |
1041 1041 o 2 add foo
1042 1042 foo.whole | 1 +
1043 1043 1 files changed, 1 insertions(+), 0 deletions(-)
1044 1044
1045 1045 @ 1 edit foo
1046 1046 | foo.whole | 2 +-
1047 1047 | 1 files changed, 1 insertions(+), 1 deletions(-)
1048 1048 |
1049 1049 x 0 add foo
1050 1050 foo.whole | 1 +
1051 1051 1 files changed, 1 insertions(+), 0 deletions(-)
1052 1052
1053 1053
1054 1054 $ cd ..
1055 1055
1056 1056 Fixing a secret commit should replace it with another secret commit.
1057 1057
1058 1058 $ hg init fixsecretcommit
1059 1059 $ cd fixsecretcommit
1060 1060
1061 1061 $ printf "foo\n" > foo.whole
1062 1062 $ hg commit -Aqm "add foo" --secret
1063 1063 $ hg fix -r .
1064 1064 $ hg log --template '{rev} {phase}\n'
1065 1065 1 secret
1066 1066 0 secret
1067 1067
1068 1068 $ cd ..
1069 1069
1070 1070 We should also preserve phase when fixing a draft commit while the user has
1071 1071 their default set to secret.
1072 1072
1073 1073 $ hg init respectphasesnewcommit
1074 1074 $ cd respectphasesnewcommit
1075 1075
1076 1076 $ printf "foo\n" > foo.whole
1077 1077 $ hg commit -Aqm "add foo"
1078 1078 $ hg --config phases.newcommit=secret fix -r .
1079 1079 $ hg log --template '{rev} {phase}\n'
1080 1080 1 draft
1081 1081 0 draft
1082 1082
1083 1083 $ cd ..
1084 1084
1085 1085 Debug output should show what fixer commands are being subprocessed, which is
1086 1086 useful for anyone trying to set up a new config.
1087 1087
1088 1088 $ hg init debugoutput
1089 1089 $ cd debugoutput
1090 1090
1091 1091 $ printf "foo\nbar\nbaz\n" > foo.changed
1092 1092 $ hg commit -Aqm "foo"
1093 1093 $ printf "Foo\nbar\nBaz\n" > foo.changed
1094 1094 $ hg --debug fix --working-dir
1095 1095 subprocess: * $TESTTMP/uppercase.py 1-1 3-3 (glob)
1096 1096
1097 1097 $ cd ..
1098 1098
1099 1099 Fixing an obsolete revision can cause divergence, so we abort unless the user
1100 1100 configures to allow it. This is not yet smart enough to know whether there is a
1101 1101 successor, but even then it is not likely intentional or idiomatic to fix an
1102 1102 obsolete revision.
1103 1103
1104 1104 $ hg init abortobsoleterev
1105 1105 $ cd abortobsoleterev
1106 1106
1107 1107 $ printf "foo\n" > foo.changed
1108 1108 $ hg commit -Aqm "foo"
1109 1109 $ hg debugobsolete `hg parents --template '{node}'`
1110 1110 1 new obsolescence markers
1111 1111 obsoleted 1 changesets
1112 1112 $ hg --hidden fix -r 0
1113 1113 abort: fixing obsolete revision could cause divergence
1114 1114 [255]
1115 1115
1116 1116 $ hg --hidden fix -r 0 --config experimental.evolution.allowdivergence=true
1117 1117 $ hg cat -r tip foo.changed
1118 1118 FOO
1119 1119
1120 1120 $ cd ..
1121 1121
1122 1122 Test all of the available substitution values for fixer commands.
1123 1123
1124 1124 $ hg init substitution
1125 1125 $ cd substitution
1126 1126
1127 1127 $ mkdir foo
1128 1128 $ printf "hello\ngoodbye\n" > foo/bar
1129 1129 $ hg add
1130 1130 adding foo/bar
1131 1131 $ hg --config "fix.fail:command=printf '%s\n' '{rootpath}' '{basename}'" \
1132 1132 > --config "fix.fail:linerange='{first}' '{last}'" \
1133 1133 > --config "fix.fail:pattern=foo/bar" \
1134 1134 > fix --working-dir
1135 1135 $ cat foo/bar
1136 1136 foo/bar
1137 1137 bar
1138 1138 1
1139 1139 2
1140 1140
1141 1141 $ cd ..
1142 1142
1143 1143 The --base flag should allow picking the revisions to diff against for changed
1144 1144 files and incremental line formatting.
1145 1145
1146 1146 $ hg init baseflag
1147 1147 $ cd baseflag
1148 1148
1149 1149 $ printf "one\ntwo\n" > foo.changed
1150 1150 $ printf "bar\n" > bar.changed
1151 1151 $ hg commit -Aqm "first"
1152 1152 $ printf "one\nTwo\n" > foo.changed
1153 1153 $ hg commit -m "second"
1154 1154 $ hg fix -w --base .
1155 1155 $ hg status
1156 1156 $ hg fix -w --base null
1157 1157 $ cat foo.changed
1158 1158 ONE
1159 1159 TWO
1160 1160 $ cat bar.changed
1161 1161 BAR
1162 1162
1163 1163 $ cd ..
1164 1164
1165 1165 If the user asks to fix the parent of another commit, they are asking to create
1166 1166 an orphan. We must respect experimental.evolution.allowunstable.
1167 1167
1168 1168 $ hg init allowunstable
1169 1169 $ cd allowunstable
1170 1170
1171 1171 $ printf "one\n" > foo.whole
1172 1172 $ hg commit -Aqm "first"
1173 1173 $ printf "two\n" > foo.whole
1174 1174 $ hg commit -m "second"
1175 1175 $ hg --config experimental.evolution.allowunstable=False fix -r '.^'
1176 1176 abort: cannot fix changeset with children
1177 1177 [255]
1178 1178 $ hg fix -r '.^'
1179 1179 1 new orphan changesets
1180 1180 $ hg cat -r 2 foo.whole
1181 1181 ONE
1182 1182
1183 1183 $ cd ..
1184 1184
1185 1185 The --base flag affects the set of files being fixed. So while the --whole flag
1186 1186 makes the base irrelevant for changed line ranges, it still changes the
1187 1187 meaning and effect of the command. In this example, no files or lines are fixed
1188 1188 until we specify the base, but then we do fix unchanged lines.
1189 1189
1190 1190 $ hg init basewhole
1191 1191 $ cd basewhole
1192 1192 $ printf "foo1\n" > foo.changed
1193 1193 $ hg commit -Aqm "first"
1194 1194 $ printf "foo2\n" >> foo.changed
1195 1195 $ printf "bar\n" > bar.changed
1196 1196 $ hg commit -Aqm "second"
1197 1197
1198 1198 $ hg fix --working-dir --whole
1199 1199 $ cat *.changed
1200 1200 bar
1201 1201 foo1
1202 1202 foo2
1203 1203
1204 1204 $ hg fix --working-dir --base 0 --whole
1205 1205 $ cat *.changed
1206 1206 BAR
1207 1207 FOO1
1208 1208 FOO2
1209 1209
1210 1210 $ cd ..
1211 1211
1212 1212 The execution order of tools can be controlled. This example doesn't work if
1213 1213 you sort after truncating, but the config defines the correct order while the
1214 1214 definitions are out of order (which might imply the incorrect order given the
1215 1215 implementation of fix). The goal is to use multiple tools to select the lowest
1216 1216 5 numbers in the file.
1217 1217
1218 1218 $ hg init priorityexample
1219 1219 $ cd priorityexample
1220 1220
1221 1221 $ cat >> .hg/hgrc <<EOF
1222 1222 > [fix]
1223 1223 > head:command = head -n 5
1224 1224 > head:pattern = numbers.txt
1225 1225 > head:priority = 1
1226 1226 > sort:command = sort -n
1227 1227 > sort:pattern = numbers.txt
1228 1228 > sort:priority = 2
1229 1229 > EOF
1230 1230
1231 1231 $ printf "8\n2\n3\n6\n7\n4\n9\n5\n1\n0\n" > numbers.txt
1232 1232 $ hg add -q
1233 1233 $ hg fix -w
1234 1234 $ cat numbers.txt
1235 1235 0
1236 1236 1
1237 1237 2
1238 1238 3
1239 1239 4
1240 1240
1241 1241 And of course we should be able to break this by reversing the execution order.
1242 1242 Test negative priorities while we're at it.
1243 1243
1244 1244 $ cat >> .hg/hgrc <<EOF
1245 1245 > [fix]
1246 1246 > head:priority = -1
1247 1247 > sort:priority = -2
1248 1248 > EOF
1249 1249 $ printf "8\n2\n3\n6\n7\n4\n9\n5\n1\n0\n" > numbers.txt
1250 1250 $ hg fix -w
1251 1251 $ cat numbers.txt
1252 1252 2
1253 1253 3
1254 1254 6
1255 1255 7
1256 1256 8
1257 1257
1258 1258 $ cd ..
1259 1259
1260 1260 It's possible for repeated applications of a fixer tool to create cycles in the
1261 1261 generated content of a file. For example, two users with different versions of
1262 1262 a code formatter might fight over the formatting when they run hg fix. In the
1263 1263 absence of other changes, this means we could produce commits with the same
1264 1264 hash in subsequent runs of hg fix. This is a problem unless we support
1265 1265 obsolescence cycles well. We avoid this by adding an extra field to the
1266 1266 successor which forces it to have a new hash. That's why this test creates
1267 1267 three revisions instead of two.
1268 1268
1269 1269 $ hg init cyclictool
1270 1270 $ cd cyclictool
1271 1271
1272 1272 $ cat >> .hg/hgrc <<EOF
1273 1273 > [fix]
1274 1274 > swapletters:command = tr ab ba
1275 1275 > swapletters:pattern = foo
1276 1276 > EOF
1277 1277
1278 1278 $ echo ab > foo
1279 1279 $ hg commit -Aqm foo
1280 1280
1281 1281 $ hg fix -r 0
1282 1282 $ hg fix -r 1
1283 1283
1284 1284 $ hg cat -r 0 foo --hidden
1285 1285 ab
1286 1286 $ hg cat -r 1 foo --hidden
1287 1287 ba
1288 1288 $ hg cat -r 2 foo
1289 1289 ab
1290 1290
1291 1291 $ cd ..
1292 1292
1293 1293 We run fixer tools in the repo root so they can look for config files or other
1294 1294 important things in the working directory. This does NOT mean we are
1295 1295 reconstructing a working copy of every revision being fixed; we're just giving
1296 1296 the tool knowledge of the repo's location in case it can do something
1297 1297 reasonable with that.
1298 1298
1299 1299 $ hg init subprocesscwd
1300 1300 $ cd subprocesscwd
1301 1301
1302 1302 $ cat >> .hg/hgrc <<EOF
1303 1303 > [fix]
1304 1304 > printcwd:command = "$PYTHON" -c "import os; print(os.getcwd())"
1305 1305 > printcwd:pattern = relpath:foo/bar
1306 1306 > filesetpwd:command = "$PYTHON" -c "import os; print('fs: ' + os.getcwd())"
1307 1307 > filesetpwd:pattern = set:**quux
1308 1308 > EOF
1309 1309
1310 1310 $ mkdir foo
1311 1311 $ printf "bar\n" > foo/bar
1312 1312 $ printf "quux\n" > quux
1313 1313 $ hg commit -Aqm blah
1314 1314
1315 1315 $ hg fix -w -r . foo/bar
1316 1316 $ hg cat -r tip foo/bar
1317 1317 $TESTTMP/subprocesscwd
1318 1318 $ cat foo/bar
1319 1319 $TESTTMP/subprocesscwd
1320 1320
1321 1321 $ cd foo
1322 1322
1323 1323 $ hg fix -w -r . bar
1324 1324 $ hg cat -r tip bar ../quux
1325 1325 $TESTTMP/subprocesscwd
1326 1326 quux
1327 1327 $ cat bar ../quux
1328 1328 $TESTTMP/subprocesscwd
1329 1329 quux
1330 1330 $ echo modified > bar
1331 1331 $ hg fix -w bar
1332 1332 $ cat bar
1333 1333 $TESTTMP/subprocesscwd
1334 1334
1335 1335 Apparently fixing p1() and its descendants doesn't include wdir() unless
1336 1336 explicitly stated.
1337 1337
1338 BROKEN: fileset matches aren't relative to repo.root for commits
1339
1340 1338 $ hg fix -r '.::'
1341 1339 $ hg cat -r . ../quux
1342 1340 quux
1343 1341 $ hg cat -r tip ../quux
1344 quux
1342 fs: $TESTTMP/subprocesscwd
1345 1343 $ cat ../quux
1346 1344 quux
1347 1345
1348 1346 Clean files are not fixed unless explicitly named
1349 1347 $ echo 'dirty' > ../quux
1350 1348
1351 BROKEN: fileset matches aren't relative to repo.root for wdir
1352
1353 1349 $ hg fix --working-dir
1354 1350 $ cat ../quux
1355 dirty
1351 fs: $TESTTMP/subprocesscwd
1356 1352
1357 1353 $ cd ../..
1358 1354
1359 1355 Tools configured without a pattern are ignored. It would be too dangerous to
1360 1356 run them on all files, because this might happen while testing a configuration
1361 1357 that also deletes all of the file content. There is no reasonable subset of the
1362 1358 files to use as a default. Users should be explicit about what files are
1363 1359 affected by a tool. This test also confirms that we don't crash when the
1364 1360 pattern config is missing, and that we only warn about it once.
1365 1361
1366 1362 $ hg init nopatternconfigured
1367 1363 $ cd nopatternconfigured
1368 1364
1369 1365 $ printf "foo" > foo
1370 1366 $ printf "bar" > bar
1371 1367 $ hg add -q
1372 1368 $ hg fix --debug --working-dir --config "fix.nopattern:command=echo fixed"
1373 1369 fixer tool has no pattern configuration: nopattern
1374 1370 $ cat foo bar
1375 1371 foobar (no-eol)
1376 1372 $ hg fix --debug --working-dir --config "fix.nocommand:pattern=foo.bar"
1377 1373 fixer tool has no command configuration: nocommand
1378 1374
1379 1375 $ cd ..
1380 1376
1381 1377 Tools can be disabled. Disabled tools do nothing but print a debug message.
1382 1378
1383 1379 $ hg init disabled
1384 1380 $ cd disabled
1385 1381
1386 1382 $ printf "foo\n" > foo
1387 1383 $ hg add -q
1388 1384 $ hg fix --debug --working-dir --config "fix.disabled:command=echo fixed" \
1389 1385 > --config "fix.disabled:pattern=foo" \
1390 1386 > --config "fix.disabled:enabled=false"
1391 1387 ignoring disabled fixer tool: disabled
1392 1388 $ cat foo
1393 1389 foo
1394 1390
1395 1391 $ cd ..
1396 1392
1397 1393 Test that we can configure a fixer to affect all files regardless of the cwd.
1398 1394 The way we invoke matching must not prohibit this.
1399 1395
1400 1396 $ hg init affectallfiles
1401 1397 $ cd affectallfiles
1402 1398
1403 1399 $ mkdir foo bar
1404 1400 $ printf "foo" > foo/file
1405 1401 $ printf "bar" > bar/file
1406 1402 $ printf "baz" > baz_file
1407 1403 $ hg add -q
1408 1404
1409 1405 $ cd bar
1410 1406 $ hg fix --working-dir --config "fix.cooltool:command=echo fixed" \
1411 1407 > --config "fix.cooltool:pattern=glob:**"
1412 1408 $ cd ..
1413 1409
1414 1410 $ cat foo/file
1415 1411 fixed
1416 1412 $ cat bar/file
1417 1413 fixed
1418 1414 $ cat baz_file
1419 1415 fixed
1420 1416
1421 1417 $ cd ..
1422 1418
1423 1419 Tools should be able to run on unchanged files, even if they set :linerange.
1424 1420 This includes a corner case where deleted chunks of a file are not considered
1425 1421 changes.
1426 1422
1427 1423 $ hg init skipclean
1428 1424 $ cd skipclean
1429 1425
1430 1426 $ printf "a\nb\nc\n" > foo
1431 1427 $ printf "a\nb\nc\n" > bar
1432 1428 $ printf "a\nb\nc\n" > baz
1433 1429 $ hg commit -Aqm "base"
1434 1430
1435 1431 $ printf "a\nc\n" > foo
1436 1432 $ printf "a\nx\nc\n" > baz
1437 1433
1438 1434 $ cat >> print.py <<EOF
1439 1435 > import sys
1440 1436 > for a in sys.argv[1:]:
1441 1437 > print(a)
1442 1438 > EOF
1443 1439
1444 1440 $ hg fix --working-dir foo bar baz \
1445 1441 > --config "fix.changedlines:command=\"$PYTHON\" print.py \"Line ranges:\"" \
1446 1442 > --config 'fix.changedlines:linerange="{first} through {last}"' \
1447 1443 > --config 'fix.changedlines:pattern=glob:**' \
1448 1444 > --config 'fix.changedlines:skipclean=false'
1449 1445
1450 1446 $ cat foo
1451 1447 Line ranges:
1452 1448 $ cat bar
1453 1449 Line ranges:
1454 1450 $ cat baz
1455 1451 Line ranges:
1456 1452 2 through 2
1457 1453
1458 1454 $ cd ..
1459 1455
1460 1456 Test various cases around merges. We were previously dropping files if they were
1461 1457 created on only the p2 side of the merge, so let's test permutations of:
1462 1458 * added, was fixed
1463 1459 * added, considered for fixing but was already good
1464 1460 * added, not considered for fixing
1465 1461 * modified, was fixed
1466 1462 * modified, considered for fixing but was already good
1467 1463 * modified, not considered for fixing
1468 1464
1469 1465 Before the bug was fixed where we would drop files, this test demonstrated the
1470 1466 following issues:
1471 1467 * new_in_r1.ignored, new_in_r1_already_good.changed, and
1472 1468 > mod_in_r1_already_good.changed were NOT in the manifest for the merge commit
1473 1469 * mod_in_r1.ignored had its contents from r0, NOT r1.
1474 1470
1475 1471 We're also setting a named branch for every commit to demonstrate that the
1476 1472 branch is kept intact and there aren't issues updating to another branch in the
1477 1473 middle of fix.
1478 1474
1479 1475 $ hg init merge_keeps_files
1480 1476 $ cd merge_keeps_files
1481 1477 $ for f in r0 mod_in_r1 mod_in_r2 mod_in_merge mod_in_child; do
1482 1478 > for c in changed whole ignored; do
1483 1479 > printf "hello\n" > $f.$c
1484 1480 > done
1485 1481 > printf "HELLO\n" > "mod_in_${f}_already_good.changed"
1486 1482 > done
1487 1483 $ hg branch -q r0
1488 1484 $ hg ci -Aqm 'r0'
1489 1485 $ hg phase -p
1490 1486 $ make_test_files() {
1491 1487 > printf "world\n" >> "mod_in_$1.changed"
1492 1488 > printf "world\n" >> "mod_in_$1.whole"
1493 1489 > printf "world\n" >> "mod_in_$1.ignored"
1494 1490 > printf "WORLD\n" >> "mod_in_$1_already_good.changed"
1495 1491 > printf "new in $1\n" > "new_in_$1.changed"
1496 1492 > printf "new in $1\n" > "new_in_$1.whole"
1497 1493 > printf "new in $1\n" > "new_in_$1.ignored"
1498 1494 > printf "ALREADY GOOD, NEW IN THIS REV\n" > "new_in_$1_already_good.changed"
1499 1495 > }
1500 1496 $ make_test_commit() {
1501 1497 > make_test_files "$1"
1502 1498 > hg branch -q "$1"
1503 1499 > hg ci -Aqm "$2"
1504 1500 > }
1505 1501 $ make_test_commit r1 "merge me, pt1"
1506 1502 $ hg co -q ".^"
1507 1503 $ make_test_commit r2 "merge me, pt2"
1508 1504 $ hg merge -qr 1
1509 1505 $ make_test_commit merge "evil merge"
1510 1506 $ make_test_commit child "child of merge"
1511 1507 $ make_test_files wdir
1512 1508 $ hg fix -r 'not public()' -w
1513 1509 $ hg log -G -T'{rev}:{shortest(node,8)}: branch:{branch} desc:{desc}'
1514 1510 @ 8:c22ce900: branch:child desc:child of merge
1515 1511 |
1516 1512 o 7:5a30615a: branch:merge desc:evil merge
1517 1513 |\
1518 1514 | o 6:4e5acdc4: branch:r2 desc:merge me, pt2
1519 1515 | |
1520 1516 o | 5:eea01878: branch:r1 desc:merge me, pt1
1521 1517 |/
1522 1518 o 0:0c548d87: branch:r0 desc:r0
1523 1519
1524 1520 $ hg files -r tip
1525 1521 mod_in_child.changed
1526 1522 mod_in_child.ignored
1527 1523 mod_in_child.whole
1528 1524 mod_in_child_already_good.changed
1529 1525 mod_in_merge.changed
1530 1526 mod_in_merge.ignored
1531 1527 mod_in_merge.whole
1532 1528 mod_in_merge_already_good.changed
1533 1529 mod_in_mod_in_child_already_good.changed
1534 1530 mod_in_mod_in_merge_already_good.changed
1535 1531 mod_in_mod_in_r1_already_good.changed
1536 1532 mod_in_mod_in_r2_already_good.changed
1537 1533 mod_in_r0_already_good.changed
1538 1534 mod_in_r1.changed
1539 1535 mod_in_r1.ignored
1540 1536 mod_in_r1.whole
1541 1537 mod_in_r1_already_good.changed
1542 1538 mod_in_r2.changed
1543 1539 mod_in_r2.ignored
1544 1540 mod_in_r2.whole
1545 1541 mod_in_r2_already_good.changed
1546 1542 new_in_child.changed
1547 1543 new_in_child.ignored
1548 1544 new_in_child.whole
1549 1545 new_in_child_already_good.changed
1550 1546 new_in_merge.changed
1551 1547 new_in_merge.ignored
1552 1548 new_in_merge.whole
1553 1549 new_in_merge_already_good.changed
1554 1550 new_in_r1.changed
1555 1551 new_in_r1.ignored
1556 1552 new_in_r1.whole
1557 1553 new_in_r1_already_good.changed
1558 1554 new_in_r2.changed
1559 1555 new_in_r2.ignored
1560 1556 new_in_r2.whole
1561 1557 new_in_r2_already_good.changed
1562 1558 r0.changed
1563 1559 r0.ignored
1564 1560 r0.whole
1565 1561 $ for f in "$(hg files -r tip)"; do hg cat -r tip $f -T'{path}:\n{data}\n'; done
1566 1562 mod_in_child.changed:
1567 1563 hello
1568 1564 WORLD
1569 1565
1570 1566 mod_in_child.ignored:
1571 1567 hello
1572 1568 world
1573 1569
1574 1570 mod_in_child.whole:
1575 1571 HELLO
1576 1572 WORLD
1577 1573
1578 1574 mod_in_child_already_good.changed:
1579 1575 WORLD
1580 1576
1581 1577 mod_in_merge.changed:
1582 1578 hello
1583 1579 WORLD
1584 1580
1585 1581 mod_in_merge.ignored:
1586 1582 hello
1587 1583 world
1588 1584
1589 1585 mod_in_merge.whole:
1590 1586 HELLO
1591 1587 WORLD
1592 1588
1593 1589 mod_in_merge_already_good.changed:
1594 1590 WORLD
1595 1591
1596 1592 mod_in_mod_in_child_already_good.changed:
1597 1593 HELLO
1598 1594
1599 1595 mod_in_mod_in_merge_already_good.changed:
1600 1596 HELLO
1601 1597
1602 1598 mod_in_mod_in_r1_already_good.changed:
1603 1599 HELLO
1604 1600
1605 1601 mod_in_mod_in_r2_already_good.changed:
1606 1602 HELLO
1607 1603
1608 1604 mod_in_r0_already_good.changed:
1609 1605 HELLO
1610 1606
1611 1607 mod_in_r1.changed:
1612 1608 hello
1613 1609 WORLD
1614 1610
1615 1611 mod_in_r1.ignored:
1616 1612 hello
1617 1613 world
1618 1614
1619 1615 mod_in_r1.whole:
1620 1616 HELLO
1621 1617 WORLD
1622 1618
1623 1619 mod_in_r1_already_good.changed:
1624 1620 WORLD
1625 1621
1626 1622 mod_in_r2.changed:
1627 1623 hello
1628 1624 WORLD
1629 1625
1630 1626 mod_in_r2.ignored:
1631 1627 hello
1632 1628 world
1633 1629
1634 1630 mod_in_r2.whole:
1635 1631 HELLO
1636 1632 WORLD
1637 1633
1638 1634 mod_in_r2_already_good.changed:
1639 1635 WORLD
1640 1636
1641 1637 new_in_child.changed:
1642 1638 NEW IN CHILD
1643 1639
1644 1640 new_in_child.ignored:
1645 1641 new in child
1646 1642
1647 1643 new_in_child.whole:
1648 1644 NEW IN CHILD
1649 1645
1650 1646 new_in_child_already_good.changed:
1651 1647 ALREADY GOOD, NEW IN THIS REV
1652 1648
1653 1649 new_in_merge.changed:
1654 1650 NEW IN MERGE
1655 1651
1656 1652 new_in_merge.ignored:
1657 1653 new in merge
1658 1654
1659 1655 new_in_merge.whole:
1660 1656 NEW IN MERGE
1661 1657
1662 1658 new_in_merge_already_good.changed:
1663 1659 ALREADY GOOD, NEW IN THIS REV
1664 1660
1665 1661 new_in_r1.changed:
1666 1662 NEW IN R1
1667 1663
1668 1664 new_in_r1.ignored:
1669 1665 new in r1
1670 1666
1671 1667 new_in_r1.whole:
1672 1668 NEW IN R1
1673 1669
1674 1670 new_in_r1_already_good.changed:
1675 1671 ALREADY GOOD, NEW IN THIS REV
1676 1672
1677 1673 new_in_r2.changed:
1678 1674 NEW IN R2
1679 1675
1680 1676 new_in_r2.ignored:
1681 1677 new in r2
1682 1678
1683 1679 new_in_r2.whole:
1684 1680 NEW IN R2
1685 1681
1686 1682 new_in_r2_already_good.changed:
1687 1683 ALREADY GOOD, NEW IN THIS REV
1688 1684
1689 1685 r0.changed:
1690 1686 hello
1691 1687
1692 1688 r0.ignored:
1693 1689 hello
1694 1690
1695 1691 r0.whole:
1696 1692 hello
1697 1693
General Comments 0
You need to be logged in to leave comments. Login now