##// END OF EJS Templates
match: implement __repr__() and update users (API)...
Martin von Zweigbergk -
r32406:95201747 default
parent child Browse files
Show More
@@ -1,741 +1,729 b''
1 1 # __init__.py - fsmonitor initialization and overrides
2 2 #
3 3 # Copyright 2013-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
9 9
10 10 Integrates the file-watching program Watchman with Mercurial to produce faster
11 11 status results.
12 12
13 13 On a particular Linux system, for a real-world repository with over 400,000
14 14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
15 15 system, with fsmonitor it takes about 0.3 seconds.
16 16
17 17 fsmonitor requires no configuration -- it will tell Watchman about your
18 18 repository as necessary. You'll need to install Watchman from
19 19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
20 20
21 21 The following configuration options exist:
22 22
23 23 ::
24 24
25 25 [fsmonitor]
26 26 mode = {off, on, paranoid}
27 27
28 28 When `mode = off`, fsmonitor will disable itself (similar to not loading the
29 29 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
30 30 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
31 31 and ensure that the results are consistent.
32 32
33 33 ::
34 34
35 35 [fsmonitor]
36 36 timeout = (float)
37 37
38 38 A value, in seconds, that determines how long fsmonitor will wait for Watchman
39 39 to return results. Defaults to `2.0`.
40 40
41 41 ::
42 42
43 43 [fsmonitor]
44 44 blacklistusers = (list of userids)
45 45
46 46 A list of usernames for which fsmonitor will disable itself altogether.
47 47
48 48 ::
49 49
50 50 [fsmonitor]
51 51 walk_on_invalidate = (boolean)
52 52
53 53 Whether or not to walk the whole repo ourselves when our cached state has been
54 54 invalidated, for example when Watchman has been restarted or .hgignore rules
55 55 have been changed. Walking the repo in that case can result in competing for
56 56 I/O with Watchman. For large repos it is recommended to set this value to
57 57 false. You may wish to set this to true if you have a very fast filesystem
58 58 that can outpace the IPC overhead of getting the result data for the full repo
59 59 from Watchman. Defaults to false.
60 60
61 61 fsmonitor is incompatible with the largefiles and eol extensions, and
62 62 will disable itself if any of those are active.
63 63
64 64 '''
65 65
66 66 # Platforms Supported
67 67 # ===================
68 68 #
69 69 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
70 70 # even under severe loads.
71 71 #
72 72 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
73 73 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
74 74 # user testing under normal loads.
75 75 #
76 76 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
77 77 # very little testing has been done.
78 78 #
79 79 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
80 80 #
81 81 # Known Issues
82 82 # ============
83 83 #
84 84 # * fsmonitor will disable itself if any of the following extensions are
85 85 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
86 86 # * fsmonitor will produce incorrect results if nested repos that are not
87 87 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
88 88 #
89 89 # The issues related to nested repos and subrepos are probably not fundamental
90 90 # ones. Patches to fix them are welcome.
91 91
92 92 from __future__ import absolute_import
93 93
94 94 import codecs
95 95 import hashlib
96 96 import os
97 97 import stat
98 98 import sys
99 99
100 100 from mercurial.i18n import _
101 101 from mercurial import (
102 102 context,
103 103 encoding,
104 104 error,
105 105 extensions,
106 106 localrepo,
107 107 merge,
108 108 pathutil,
109 109 pycompat,
110 110 scmutil,
111 111 util,
112 112 )
113 113 from mercurial import match as matchmod
114 114
115 115 from . import (
116 116 pywatchman,
117 117 state,
118 118 watchmanclient,
119 119 )
120 120
121 121 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
122 122 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
123 123 # be specifying the version(s) of Mercurial they are tested with, or
124 124 # leave the attribute unspecified.
125 125 testedwith = 'ships-with-hg-core'
126 126
127 127 # This extension is incompatible with the following blacklisted extensions
128 128 # and will disable itself when encountering one of these:
129 129 _blacklist = ['largefiles', 'eol']
130 130
131 131 def _handleunavailable(ui, state, ex):
132 132 """Exception handler for Watchman interaction exceptions"""
133 133 if isinstance(ex, watchmanclient.Unavailable):
134 134 if ex.warn:
135 135 ui.warn(str(ex) + '\n')
136 136 if ex.invalidate:
137 137 state.invalidate()
138 138 ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg)
139 139 else:
140 140 ui.log('fsmonitor', 'Watchman exception: %s\n', ex)
141 141
142 142 def _hashignore(ignore):
143 143 """Calculate hash for ignore patterns and filenames
144 144
145 145 If this information changes between Mercurial invocations, we can't
146 146 rely on Watchman information anymore and have to re-scan the working
147 147 copy.
148 148
149 149 """
150 150 sha1 = hashlib.sha1()
151 if util.safehasattr(ignore, 'includepat'):
152 sha1.update(ignore.includepat)
153 sha1.update('\0\0')
154 if util.safehasattr(ignore, 'excludepat'):
155 sha1.update(ignore.excludepat)
156 sha1.update('\0\0')
157 if util.safehasattr(ignore, 'patternspat'):
158 sha1.update(ignore.patternspat)
159 sha1.update('\0\0')
160 if util.safehasattr(ignore, '_files'):
161 for f in ignore._files:
162 sha1.update(f)
163 sha1.update('\0')
151 sha1.update(repr(ignore))
164 152 return sha1.hexdigest()
165 153
166 154 _watchmanencoding = pywatchman.encoding.get_local_encoding()
167 155 _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
168 156 _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
169 157
170 158 def _watchmantofsencoding(path):
171 159 """Fix path to match watchman and local filesystem encoding
172 160
173 161 watchman's paths encoding can differ from filesystem encoding. For example,
174 162 on Windows, it's always utf-8.
175 163 """
176 164 try:
177 165 decoded = path.decode(_watchmanencoding)
178 166 except UnicodeDecodeError as e:
179 167 raise error.Abort(str(e), hint='watchman encoding error')
180 168
181 169 try:
182 170 encoded = decoded.encode(_fsencoding, 'strict')
183 171 except UnicodeEncodeError as e:
184 172 raise error.Abort(str(e))
185 173
186 174 return encoded
187 175
188 176 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
189 177 '''Replacement for dirstate.walk, hooking into Watchman.
190 178
191 179 Whenever full is False, ignored is False, and the Watchman client is
192 180 available, use Watchman combined with saved state to possibly return only a
193 181 subset of files.'''
194 182 def bail():
195 183 return orig(match, subrepos, unknown, ignored, full=True)
196 184
197 185 if full or ignored or not self._watchmanclient.available():
198 186 return bail()
199 187 state = self._fsmonitorstate
200 188 clock, ignorehash, notefiles = state.get()
201 189 if not clock:
202 190 if state.walk_on_invalidate:
203 191 return bail()
204 192 # Initial NULL clock value, see
205 193 # https://facebook.github.io/watchman/docs/clockspec.html
206 194 clock = 'c:0:0'
207 195 notefiles = []
208 196
209 197 def fwarn(f, msg):
210 198 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
211 199 return False
212 200
213 201 def badtype(mode):
214 202 kind = _('unknown')
215 203 if stat.S_ISCHR(mode):
216 204 kind = _('character device')
217 205 elif stat.S_ISBLK(mode):
218 206 kind = _('block device')
219 207 elif stat.S_ISFIFO(mode):
220 208 kind = _('fifo')
221 209 elif stat.S_ISSOCK(mode):
222 210 kind = _('socket')
223 211 elif stat.S_ISDIR(mode):
224 212 kind = _('directory')
225 213 return _('unsupported file type (type is %s)') % kind
226 214
227 215 ignore = self._ignore
228 216 dirignore = self._dirignore
229 217 if unknown:
230 218 if _hashignore(ignore) != ignorehash and clock != 'c:0:0':
231 219 # ignore list changed -- can't rely on Watchman state any more
232 220 if state.walk_on_invalidate:
233 221 return bail()
234 222 notefiles = []
235 223 clock = 'c:0:0'
236 224 else:
237 225 # always ignore
238 226 ignore = util.always
239 227 dirignore = util.always
240 228
241 229 matchfn = match.matchfn
242 230 matchalways = match.always()
243 231 dmap = self._map
244 232 nonnormalset = getattr(self, '_nonnormalset', None)
245 233
246 234 copymap = self._copymap
247 235 getkind = stat.S_IFMT
248 236 dirkind = stat.S_IFDIR
249 237 regkind = stat.S_IFREG
250 238 lnkkind = stat.S_IFLNK
251 239 join = self._join
252 240 normcase = util.normcase
253 241 fresh_instance = False
254 242
255 243 exact = skipstep3 = False
256 244 if match.isexact(): # match.exact
257 245 exact = True
258 246 dirignore = util.always # skip step 2
259 247 elif match.prefix(): # match.match, no patterns
260 248 skipstep3 = True
261 249
262 250 if not exact and self._checkcase:
263 251 # note that even though we could receive directory entries, we're only
264 252 # interested in checking if a file with the same name exists. So only
265 253 # normalize files if possible.
266 254 normalize = self._normalizefile
267 255 skipstep3 = False
268 256 else:
269 257 normalize = None
270 258
271 259 # step 1: find all explicit files
272 260 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
273 261
274 262 skipstep3 = skipstep3 and not (work or dirsnotfound)
275 263 work = [d for d in work if not dirignore(d[0])]
276 264
277 265 if not work and (exact or skipstep3):
278 266 for s in subrepos:
279 267 del results[s]
280 268 del results['.hg']
281 269 return results
282 270
283 271 # step 2: query Watchman
284 272 try:
285 273 # Use the user-configured timeout for the query.
286 274 # Add a little slack over the top of the user query to allow for
287 275 # overheads while transferring the data
288 276 self._watchmanclient.settimeout(state.timeout + 0.1)
289 277 result = self._watchmanclient.command('query', {
290 278 'fields': ['mode', 'mtime', 'size', 'exists', 'name'],
291 279 'since': clock,
292 280 'expression': [
293 281 'not', [
294 282 'anyof', ['dirname', '.hg'],
295 283 ['name', '.hg', 'wholename']
296 284 ]
297 285 ],
298 286 'sync_timeout': int(state.timeout * 1000),
299 287 'empty_on_fresh_instance': state.walk_on_invalidate,
300 288 })
301 289 except Exception as ex:
302 290 _handleunavailable(self._ui, state, ex)
303 291 self._watchmanclient.clearconnection()
304 292 return bail()
305 293 else:
306 294 # We need to propagate the last observed clock up so that we
307 295 # can use it for our next query
308 296 state.setlastclock(result['clock'])
309 297 if result['is_fresh_instance']:
310 298 if state.walk_on_invalidate:
311 299 state.invalidate()
312 300 return bail()
313 301 fresh_instance = True
314 302 # Ignore any prior noteable files from the state info
315 303 notefiles = []
316 304
317 305 # for file paths which require normalization and we encounter a case
318 306 # collision, we store our own foldmap
319 307 if normalize:
320 308 foldmap = dict((normcase(k), k) for k in results)
321 309
322 310 switch_slashes = pycompat.ossep == '\\'
323 311 # The order of the results is, strictly speaking, undefined.
324 312 # For case changes on a case insensitive filesystem we may receive
325 313 # two entries, one with exists=True and another with exists=False.
326 314 # The exists=True entries in the same response should be interpreted
327 315 # as being happens-after the exists=False entries due to the way that
328 316 # Watchman tracks files. We use this property to reconcile deletes
329 317 # for name case changes.
330 318 for entry in result['files']:
331 319 fname = entry['name']
332 320 if _fixencoding:
333 321 fname = _watchmantofsencoding(fname)
334 322 if switch_slashes:
335 323 fname = fname.replace('\\', '/')
336 324 if normalize:
337 325 normed = normcase(fname)
338 326 fname = normalize(fname, True, True)
339 327 foldmap[normed] = fname
340 328 fmode = entry['mode']
341 329 fexists = entry['exists']
342 330 kind = getkind(fmode)
343 331
344 332 if not fexists:
345 333 # if marked as deleted and we don't already have a change
346 334 # record, mark it as deleted. If we already have an entry
347 335 # for fname then it was either part of walkexplicit or was
348 336 # an earlier result that was a case change
349 337 if fname not in results and fname in dmap and (
350 338 matchalways or matchfn(fname)):
351 339 results[fname] = None
352 340 elif kind == dirkind:
353 341 if fname in dmap and (matchalways or matchfn(fname)):
354 342 results[fname] = None
355 343 elif kind == regkind or kind == lnkkind:
356 344 if fname in dmap:
357 345 if matchalways or matchfn(fname):
358 346 results[fname] = entry
359 347 elif (matchalways or matchfn(fname)) and not ignore(fname):
360 348 results[fname] = entry
361 349 elif fname in dmap and (matchalways or matchfn(fname)):
362 350 results[fname] = None
363 351
364 352 # step 3: query notable files we don't already know about
365 353 # XXX try not to iterate over the entire dmap
366 354 if normalize:
367 355 # any notable files that have changed case will already be handled
368 356 # above, so just check membership in the foldmap
369 357 notefiles = set((normalize(f, True, True) for f in notefiles
370 358 if normcase(f) not in foldmap))
371 359 visit = set((f for f in notefiles if (f not in results and matchfn(f)
372 360 and (f in dmap or not ignore(f)))))
373 361
374 362 if nonnormalset is not None and not fresh_instance:
375 363 if matchalways:
376 364 visit.update(f for f in nonnormalset if f not in results)
377 365 visit.update(f for f in copymap if f not in results)
378 366 else:
379 367 visit.update(f for f in nonnormalset
380 368 if f not in results and matchfn(f))
381 369 visit.update(f for f in copymap
382 370 if f not in results and matchfn(f))
383 371 else:
384 372 if matchalways:
385 373 visit.update(f for f, st in dmap.iteritems()
386 374 if (f not in results and
387 375 (st[2] < 0 or st[0] != 'n' or fresh_instance)))
388 376 visit.update(f for f in copymap if f not in results)
389 377 else:
390 378 visit.update(f for f, st in dmap.iteritems()
391 379 if (f not in results and
392 380 (st[2] < 0 or st[0] != 'n' or fresh_instance)
393 381 and matchfn(f)))
394 382 visit.update(f for f in copymap
395 383 if f not in results and matchfn(f))
396 384
397 385 audit = pathutil.pathauditor(self._root).check
398 386 auditpass = [f for f in visit if audit(f)]
399 387 auditpass.sort()
400 388 auditfail = visit.difference(auditpass)
401 389 for f in auditfail:
402 390 results[f] = None
403 391
404 392 nf = iter(auditpass).next
405 393 for st in util.statfiles([join(f) for f in auditpass]):
406 394 f = nf()
407 395 if st or f in dmap:
408 396 results[f] = st
409 397
410 398 for s in subrepos:
411 399 del results[s]
412 400 del results['.hg']
413 401 return results
414 402
415 403 def overridestatus(
416 404 orig, self, node1='.', node2=None, match=None, ignored=False,
417 405 clean=False, unknown=False, listsubrepos=False):
418 406 listignored = ignored
419 407 listclean = clean
420 408 listunknown = unknown
421 409
422 410 def _cmpsets(l1, l2):
423 411 try:
424 412 if 'FSMONITOR_LOG_FILE' in encoding.environ:
425 413 fn = encoding.environ['FSMONITOR_LOG_FILE']
426 414 f = open(fn, 'wb')
427 415 else:
428 416 fn = 'fsmonitorfail.log'
429 417 f = self.opener(fn, 'wb')
430 418 except (IOError, OSError):
431 419 self.ui.warn(_('warning: unable to write to %s\n') % fn)
432 420 return
433 421
434 422 try:
435 423 for i, (s1, s2) in enumerate(zip(l1, l2)):
436 424 if set(s1) != set(s2):
437 425 f.write('sets at position %d are unequal\n' % i)
438 426 f.write('watchman returned: %s\n' % s1)
439 427 f.write('stat returned: %s\n' % s2)
440 428 finally:
441 429 f.close()
442 430
443 431 if isinstance(node1, context.changectx):
444 432 ctx1 = node1
445 433 else:
446 434 ctx1 = self[node1]
447 435 if isinstance(node2, context.changectx):
448 436 ctx2 = node2
449 437 else:
450 438 ctx2 = self[node2]
451 439
452 440 working = ctx2.rev() is None
453 441 parentworking = working and ctx1 == self['.']
454 442 match = match or matchmod.always(self.root, self.getcwd())
455 443
456 444 # Maybe we can use this opportunity to update Watchman's state.
457 445 # Mercurial uses workingcommitctx and/or memctx to represent the part of
458 446 # the workingctx that is to be committed. So don't update the state in
459 447 # that case.
460 448 # HG_PENDING is set in the environment when the dirstate is being updated
461 449 # in the middle of a transaction; we must not update our state in that
462 450 # case, or we risk forgetting about changes in the working copy.
463 451 updatestate = (parentworking and match.always() and
464 452 not isinstance(ctx2, (context.workingcommitctx,
465 453 context.memctx)) and
466 454 'HG_PENDING' not in encoding.environ)
467 455
468 456 try:
469 457 if self._fsmonitorstate.walk_on_invalidate:
470 458 # Use a short timeout to query the current clock. If that
471 459 # takes too long then we assume that the service will be slow
472 460 # to answer our query.
473 461 # walk_on_invalidate indicates that we prefer to walk the
474 462 # tree ourselves because we can ignore portions that Watchman
475 463 # cannot and we tend to be faster in the warmer buffer cache
476 464 # cases.
477 465 self._watchmanclient.settimeout(0.1)
478 466 else:
479 467 # Give Watchman more time to potentially complete its walk
480 468 # and return the initial clock. In this mode we assume that
481 469 # the filesystem will be slower than parsing a potentially
482 470 # very large Watchman result set.
483 471 self._watchmanclient.settimeout(
484 472 self._fsmonitorstate.timeout + 0.1)
485 473 startclock = self._watchmanclient.getcurrentclock()
486 474 except Exception as ex:
487 475 self._watchmanclient.clearconnection()
488 476 _handleunavailable(self.ui, self._fsmonitorstate, ex)
489 477 # boo, Watchman failed. bail
490 478 return orig(node1, node2, match, listignored, listclean,
491 479 listunknown, listsubrepos)
492 480
493 481 if updatestate:
494 482 # We need info about unknown files. This may make things slower the
495 483 # first time, but whatever.
496 484 stateunknown = True
497 485 else:
498 486 stateunknown = listunknown
499 487
500 488 r = orig(node1, node2, match, listignored, listclean, stateunknown,
501 489 listsubrepos)
502 490 modified, added, removed, deleted, unknown, ignored, clean = r
503 491
504 492 if updatestate:
505 493 notefiles = modified + added + removed + deleted + unknown
506 494 self._fsmonitorstate.set(
507 495 self._fsmonitorstate.getlastclock() or startclock,
508 496 _hashignore(self.dirstate._ignore),
509 497 notefiles)
510 498
511 499 if not listunknown:
512 500 unknown = []
513 501
514 502 # don't do paranoid checks if we're not going to query Watchman anyway
515 503 full = listclean or match.traversedir is not None
516 504 if self._fsmonitorstate.mode == 'paranoid' and not full:
517 505 # run status again and fall back to the old walk this time
518 506 self.dirstate._fsmonitordisable = True
519 507
520 508 # shut the UI up
521 509 quiet = self.ui.quiet
522 510 self.ui.quiet = True
523 511 fout, ferr = self.ui.fout, self.ui.ferr
524 512 self.ui.fout = self.ui.ferr = open(os.devnull, 'wb')
525 513
526 514 try:
527 515 rv2 = orig(
528 516 node1, node2, match, listignored, listclean, listunknown,
529 517 listsubrepos)
530 518 finally:
531 519 self.dirstate._fsmonitordisable = False
532 520 self.ui.quiet = quiet
533 521 self.ui.fout, self.ui.ferr = fout, ferr
534 522
535 523 # clean isn't tested since it's set to True above
536 524 _cmpsets([modified, added, removed, deleted, unknown, ignored, clean],
537 525 rv2)
538 526 modified, added, removed, deleted, unknown, ignored, clean = rv2
539 527
540 528 return scmutil.status(
541 529 modified, added, removed, deleted, unknown, ignored, clean)
542 530
543 531 def makedirstate(cls):
544 532 class fsmonitordirstate(cls):
545 533 def _fsmonitorinit(self, fsmonitorstate, watchmanclient):
546 534 # _fsmonitordisable is used in paranoid mode
547 535 self._fsmonitordisable = False
548 536 self._fsmonitorstate = fsmonitorstate
549 537 self._watchmanclient = watchmanclient
550 538
551 539 def walk(self, *args, **kwargs):
552 540 orig = super(fsmonitordirstate, self).walk
553 541 if self._fsmonitordisable:
554 542 return orig(*args, **kwargs)
555 543 return overridewalk(orig, self, *args, **kwargs)
556 544
557 545 def rebuild(self, *args, **kwargs):
558 546 self._fsmonitorstate.invalidate()
559 547 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
560 548
561 549 def invalidate(self, *args, **kwargs):
562 550 self._fsmonitorstate.invalidate()
563 551 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
564 552
565 553 return fsmonitordirstate
566 554
567 555 def wrapdirstate(orig, self):
568 556 ds = orig(self)
569 557 # only override the dirstate when Watchman is available for the repo
570 558 if util.safehasattr(self, '_fsmonitorstate'):
571 559 ds.__class__ = makedirstate(ds.__class__)
572 560 ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient)
573 561 return ds
574 562
575 563 def extsetup(ui):
576 564 wrapfilecache(localrepo.localrepository, 'dirstate', wrapdirstate)
577 565 if pycompat.sysplatform == 'darwin':
578 566 # An assist for avoiding the dangling-symlink fsevents bug
579 567 extensions.wrapfunction(os, 'symlink', wrapsymlink)
580 568
581 569 extensions.wrapfunction(merge, 'update', wrapupdate)
582 570
583 571 def wrapsymlink(orig, source, link_name):
584 572 ''' if we create a dangling symlink, also touch the parent dir
585 573 to encourage fsevents notifications to work more correctly '''
586 574 try:
587 575 return orig(source, link_name)
588 576 finally:
589 577 try:
590 578 os.utime(os.path.dirname(link_name), None)
591 579 except OSError:
592 580 pass
593 581
594 582 class state_update(object):
595 583 ''' This context manager is responsible for dispatching the state-enter
596 584 and state-leave signals to the watchman service '''
597 585
598 586 def __init__(self, repo, node, distance, partial):
599 587 self.repo = repo
600 588 self.node = node
601 589 self.distance = distance
602 590 self.partial = partial
603 591 self._lock = None
604 592 self.need_leave = False
605 593
606 594 def __enter__(self):
607 595 # We explicitly need to take a lock here, before we proceed to update
608 596 # watchman about the update operation, so that we don't race with
609 597 # some other actor. merge.update is going to take the wlock almost
610 598 # immediately anyway, so this is effectively extending the lock
611 599 # around a couple of short sanity checks.
612 600 self._lock = self.repo.wlock()
613 601 self.need_leave = self._state('state-enter')
614 602 return self
615 603
616 604 def __exit__(self, type_, value, tb):
617 605 try:
618 606 if self.need_leave:
619 607 status = 'ok' if type_ is None else 'failed'
620 608 self._state('state-leave', status=status)
621 609 finally:
622 610 if self._lock:
623 611 self._lock.release()
624 612
625 613 def _state(self, cmd, status='ok'):
626 614 if not util.safehasattr(self.repo, '_watchmanclient'):
627 615 return False
628 616 try:
629 617 commithash = self.repo[self.node].hex()
630 618 self.repo._watchmanclient.command(cmd, {
631 619 'name': 'hg.update',
632 620 'metadata': {
633 621 # the target revision
634 622 'rev': commithash,
635 623 # approximate number of commits between current and target
636 624 'distance': self.distance,
637 625 # success/failure (only really meaningful for state-leave)
638 626 'status': status,
639 627 # whether the working copy parent is changing
640 628 'partial': self.partial,
641 629 }})
642 630 return True
643 631 except Exception as e:
644 632 # Swallow any errors; fire and forget
645 633 self.repo.ui.log(
646 634 'watchman', 'Exception %s while running %s\n', e, cmd)
647 635 return False
648 636
649 637 # Bracket working copy updates with calls to the watchman state-enter
650 638 # and state-leave commands. This allows clients to perform more intelligent
651 639 # settling during bulk file change scenarios
652 640 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
653 641 def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None,
654 642 mergeancestor=False, labels=None, matcher=None, **kwargs):
655 643
656 644 distance = 0
657 645 partial = True
658 646 if matcher is None or matcher.always():
659 647 partial = False
660 648 wc = repo[None]
661 649 parents = wc.parents()
662 650 if len(parents) == 2:
663 651 anc = repo.changelog.ancestor(parents[0].node(), parents[1].node())
664 652 ancrev = repo[anc].rev()
665 653 distance = abs(repo[node].rev() - ancrev)
666 654 elif len(parents) == 1:
667 655 distance = abs(repo[node].rev() - parents[0].rev())
668 656
669 657 with state_update(repo, node, distance, partial):
670 658 return orig(
671 659 repo, node, branchmerge, force, ancestor, mergeancestor,
672 660 labels, matcher, **kwargs)
673 661
674 662 def reposetup(ui, repo):
675 663 # We don't work with largefiles or inotify
676 664 exts = extensions.enabled()
677 665 for ext in _blacklist:
678 666 if ext in exts:
679 667 ui.warn(_('The fsmonitor extension is incompatible with the %s '
680 668 'extension and has been disabled.\n') % ext)
681 669 return
682 670
683 671 if util.safehasattr(repo, 'dirstate'):
684 672 # We don't work with subrepos either. Note that we can get passed in
685 673 # e.g. a statichttprepo, which throws on trying to access the substate.
686 674 # XXX This sucks.
687 675 try:
688 676 # if repo[None].substate can cause a dirstate parse, which is too
689 677 # slow. Instead, look for a file called hgsubstate,
690 678 if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'):
691 679 return
692 680 except AttributeError:
693 681 return
694 682
695 683 fsmonitorstate = state.state(repo)
696 684 if fsmonitorstate.mode == 'off':
697 685 return
698 686
699 687 try:
700 688 client = watchmanclient.client(repo)
701 689 except Exception as ex:
702 690 _handleunavailable(ui, fsmonitorstate, ex)
703 691 return
704 692
705 693 repo._fsmonitorstate = fsmonitorstate
706 694 repo._watchmanclient = client
707 695
708 696 # at this point since fsmonitorstate wasn't present, repo.dirstate is
709 697 # not a fsmonitordirstate
710 698 dirstate = repo.dirstate
711 699 dirstate.__class__ = makedirstate(dirstate.__class__)
712 700 dirstate._fsmonitorinit(fsmonitorstate, client)
713 701 # invalidate property cache, but keep filecache which contains the
714 702 # wrapped dirstate object
715 703 del repo.unfiltered().__dict__['dirstate']
716 704 assert dirstate is repo._filecache['dirstate'].obj
717 705
718 706 class fsmonitorrepo(repo.__class__):
719 707 def status(self, *args, **kwargs):
720 708 orig = super(fsmonitorrepo, self).status
721 709 return overridestatus(orig, self, *args, **kwargs)
722 710
723 711 repo.__class__ = fsmonitorrepo
724 712
725 713 def wrapfilecache(cls, propname, wrapper):
726 714 """Wraps a filecache property. These can't be wrapped using the normal
727 715 wrapfunction. This should eventually go into upstream Mercurial.
728 716 """
729 717 assert callable(wrapper)
730 718 for currcls in cls.__mro__:
731 719 if propname in currcls.__dict__:
732 720 origfn = currcls.__dict__[propname].func
733 721 assert callable(origfn)
734 722 def wrap(*args, **kwargs):
735 723 return wrapper(origfn, *args, **kwargs)
736 724 currcls.__dict__[propname].func = wrap
737 725 break
738 726
739 727 if currcls is object:
740 728 raise AttributeError(
741 729 _("type '%s' has no property '%s'") % (cls, propname))
@@ -1,2165 +1,2161 b''
1 1 # debugcommands.py - command processing for debug* commands
2 2 #
3 3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import difflib
11 11 import errno
12 12 import operator
13 13 import os
14 14 import random
15 15 import socket
16 16 import string
17 17 import sys
18 18 import tempfile
19 19 import time
20 20
21 21 from .i18n import _
22 22 from .node import (
23 23 bin,
24 24 hex,
25 25 nullhex,
26 26 nullid,
27 27 nullrev,
28 28 short,
29 29 )
30 30 from . import (
31 31 bundle2,
32 32 changegroup,
33 33 cmdutil,
34 34 color,
35 35 context,
36 36 dagparser,
37 37 dagutil,
38 38 encoding,
39 39 error,
40 40 exchange,
41 41 extensions,
42 42 filemerge,
43 43 fileset,
44 44 formatter,
45 45 hg,
46 46 localrepo,
47 47 lock as lockmod,
48 48 merge as mergemod,
49 49 obsolete,
50 50 policy,
51 51 pvec,
52 52 pycompat,
53 53 registrar,
54 54 repair,
55 55 revlog,
56 56 revset,
57 57 revsetlang,
58 58 scmutil,
59 59 setdiscovery,
60 60 simplemerge,
61 61 smartset,
62 62 sslutil,
63 63 streamclone,
64 64 templater,
65 65 treediscovery,
66 66 upgrade,
67 67 util,
68 68 vfs as vfsmod,
69 69 )
70 70
71 71 release = lockmod.release
72 72
73 73 command = registrar.command()
74 74
75 75 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
76 76 def debugancestor(ui, repo, *args):
77 77 """find the ancestor revision of two revisions in a given index"""
78 78 if len(args) == 3:
79 79 index, rev1, rev2 = args
80 80 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False), index)
81 81 lookup = r.lookup
82 82 elif len(args) == 2:
83 83 if not repo:
84 84 raise error.Abort(_('there is no Mercurial repository here '
85 85 '(.hg not found)'))
86 86 rev1, rev2 = args
87 87 r = repo.changelog
88 88 lookup = repo.lookup
89 89 else:
90 90 raise error.Abort(_('either two or three arguments required'))
91 91 a = r.ancestor(lookup(rev1), lookup(rev2))
92 92 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
93 93
94 94 @command('debugapplystreamclonebundle', [], 'FILE')
95 95 def debugapplystreamclonebundle(ui, repo, fname):
96 96 """apply a stream clone bundle file"""
97 97 f = hg.openpath(ui, fname)
98 98 gen = exchange.readbundle(ui, f, fname)
99 99 gen.apply(repo)
100 100
101 101 @command('debugbuilddag',
102 102 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
103 103 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
104 104 ('n', 'new-file', None, _('add new file at each rev'))],
105 105 _('[OPTION]... [TEXT]'))
106 106 def debugbuilddag(ui, repo, text=None,
107 107 mergeable_file=False,
108 108 overwritten_file=False,
109 109 new_file=False):
110 110 """builds a repo with a given DAG from scratch in the current empty repo
111 111
112 112 The description of the DAG is read from stdin if not given on the
113 113 command line.
114 114
115 115 Elements:
116 116
117 117 - "+n" is a linear run of n nodes based on the current default parent
118 118 - "." is a single node based on the current default parent
119 119 - "$" resets the default parent to null (implied at the start);
120 120 otherwise the default parent is always the last node created
121 121 - "<p" sets the default parent to the backref p
122 122 - "*p" is a fork at parent p, which is a backref
123 123 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
124 124 - "/p2" is a merge of the preceding node and p2
125 125 - ":tag" defines a local tag for the preceding node
126 126 - "@branch" sets the named branch for subsequent nodes
127 127 - "#...\\n" is a comment up to the end of the line
128 128
129 129 Whitespace between the above elements is ignored.
130 130
131 131 A backref is either
132 132
133 133 - a number n, which references the node curr-n, where curr is the current
134 134 node, or
135 135 - the name of a local tag you placed earlier using ":tag", or
136 136 - empty to denote the default parent.
137 137
138 138 All string valued-elements are either strictly alphanumeric, or must
139 139 be enclosed in double quotes ("..."), with "\\" as escape character.
140 140 """
141 141
142 142 if text is None:
143 143 ui.status(_("reading DAG from stdin\n"))
144 144 text = ui.fin.read()
145 145
146 146 cl = repo.changelog
147 147 if len(cl) > 0:
148 148 raise error.Abort(_('repository is not empty'))
149 149
150 150 # determine number of revs in DAG
151 151 total = 0
152 152 for type, data in dagparser.parsedag(text):
153 153 if type == 'n':
154 154 total += 1
155 155
156 156 if mergeable_file:
157 157 linesperrev = 2
158 158 # make a file with k lines per rev
159 159 initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
160 160 initialmergedlines.append("")
161 161
162 162 tags = []
163 163
164 164 wlock = lock = tr = None
165 165 try:
166 166 wlock = repo.wlock()
167 167 lock = repo.lock()
168 168 tr = repo.transaction("builddag")
169 169
170 170 at = -1
171 171 atbranch = 'default'
172 172 nodeids = []
173 173 id = 0
174 174 ui.progress(_('building'), id, unit=_('revisions'), total=total)
175 175 for type, data in dagparser.parsedag(text):
176 176 if type == 'n':
177 177 ui.note(('node %s\n' % str(data)))
178 178 id, ps = data
179 179
180 180 files = []
181 181 fctxs = {}
182 182
183 183 p2 = None
184 184 if mergeable_file:
185 185 fn = "mf"
186 186 p1 = repo[ps[0]]
187 187 if len(ps) > 1:
188 188 p2 = repo[ps[1]]
189 189 pa = p1.ancestor(p2)
190 190 base, local, other = [x[fn].data() for x in (pa, p1,
191 191 p2)]
192 192 m3 = simplemerge.Merge3Text(base, local, other)
193 193 ml = [l.strip() for l in m3.merge_lines()]
194 194 ml.append("")
195 195 elif at > 0:
196 196 ml = p1[fn].data().split("\n")
197 197 else:
198 198 ml = initialmergedlines
199 199 ml[id * linesperrev] += " r%i" % id
200 200 mergedtext = "\n".join(ml)
201 201 files.append(fn)
202 202 fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
203 203
204 204 if overwritten_file:
205 205 fn = "of"
206 206 files.append(fn)
207 207 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
208 208
209 209 if new_file:
210 210 fn = "nf%i" % id
211 211 files.append(fn)
212 212 fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
213 213 if len(ps) > 1:
214 214 if not p2:
215 215 p2 = repo[ps[1]]
216 216 for fn in p2:
217 217 if fn.startswith("nf"):
218 218 files.append(fn)
219 219 fctxs[fn] = p2[fn]
220 220
221 221 def fctxfn(repo, cx, path):
222 222 return fctxs.get(path)
223 223
224 224 if len(ps) == 0 or ps[0] < 0:
225 225 pars = [None, None]
226 226 elif len(ps) == 1:
227 227 pars = [nodeids[ps[0]], None]
228 228 else:
229 229 pars = [nodeids[p] for p in ps]
230 230 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
231 231 date=(id, 0),
232 232 user="debugbuilddag",
233 233 extra={'branch': atbranch})
234 234 nodeid = repo.commitctx(cx)
235 235 nodeids.append(nodeid)
236 236 at = id
237 237 elif type == 'l':
238 238 id, name = data
239 239 ui.note(('tag %s\n' % name))
240 240 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
241 241 elif type == 'a':
242 242 ui.note(('branch %s\n' % data))
243 243 atbranch = data
244 244 ui.progress(_('building'), id, unit=_('revisions'), total=total)
245 245 tr.close()
246 246
247 247 if tags:
248 248 repo.vfs.write("localtags", "".join(tags))
249 249 finally:
250 250 ui.progress(_('building'), None)
251 251 release(tr, lock, wlock)
252 252
253 253 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
254 254 indent_string = ' ' * indent
255 255 if all:
256 256 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
257 257 % indent_string)
258 258
259 259 def showchunks(named):
260 260 ui.write("\n%s%s\n" % (indent_string, named))
261 261 chain = None
262 262 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
263 263 node = chunkdata['node']
264 264 p1 = chunkdata['p1']
265 265 p2 = chunkdata['p2']
266 266 cs = chunkdata['cs']
267 267 deltabase = chunkdata['deltabase']
268 268 delta = chunkdata['delta']
269 269 ui.write("%s%s %s %s %s %s %s\n" %
270 270 (indent_string, hex(node), hex(p1), hex(p2),
271 271 hex(cs), hex(deltabase), len(delta)))
272 272 chain = node
273 273
274 274 chunkdata = gen.changelogheader()
275 275 showchunks("changelog")
276 276 chunkdata = gen.manifestheader()
277 277 showchunks("manifest")
278 278 for chunkdata in iter(gen.filelogheader, {}):
279 279 fname = chunkdata['filename']
280 280 showchunks(fname)
281 281 else:
282 282 if isinstance(gen, bundle2.unbundle20):
283 283 raise error.Abort(_('use debugbundle2 for this file'))
284 284 chunkdata = gen.changelogheader()
285 285 chain = None
286 286 for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
287 287 node = chunkdata['node']
288 288 ui.write("%s%s\n" % (indent_string, hex(node)))
289 289 chain = node
290 290
291 291 def _debugbundle2(ui, gen, all=None, **opts):
292 292 """lists the contents of a bundle2"""
293 293 if not isinstance(gen, bundle2.unbundle20):
294 294 raise error.Abort(_('not a bundle2 file'))
295 295 ui.write(('Stream params: %s\n' % repr(gen.params)))
296 296 for part in gen.iterparts():
297 297 ui.write('%s -- %r\n' % (part.type, repr(part.params)))
298 298 if part.type == 'changegroup':
299 299 version = part.params.get('version', '01')
300 300 cg = changegroup.getunbundler(version, part, 'UN')
301 301 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
302 302
303 303 @command('debugbundle',
304 304 [('a', 'all', None, _('show all details')),
305 305 ('', 'spec', None, _('print the bundlespec of the bundle'))],
306 306 _('FILE'),
307 307 norepo=True)
308 308 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
309 309 """lists the contents of a bundle"""
310 310 with hg.openpath(ui, bundlepath) as f:
311 311 if spec:
312 312 spec = exchange.getbundlespec(ui, f)
313 313 ui.write('%s\n' % spec)
314 314 return
315 315
316 316 gen = exchange.readbundle(ui, f, bundlepath)
317 317 if isinstance(gen, bundle2.unbundle20):
318 318 return _debugbundle2(ui, gen, all=all, **opts)
319 319 _debugchangegroup(ui, gen, all=all, **opts)
320 320
321 321 @command('debugcheckstate', [], '')
322 322 def debugcheckstate(ui, repo):
323 323 """validate the correctness of the current dirstate"""
324 324 parent1, parent2 = repo.dirstate.parents()
325 325 m1 = repo[parent1].manifest()
326 326 m2 = repo[parent2].manifest()
327 327 errors = 0
328 328 for f in repo.dirstate:
329 329 state = repo.dirstate[f]
330 330 if state in "nr" and f not in m1:
331 331 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
332 332 errors += 1
333 333 if state in "a" and f in m1:
334 334 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
335 335 errors += 1
336 336 if state in "m" and f not in m1 and f not in m2:
337 337 ui.warn(_("%s in state %s, but not in either manifest\n") %
338 338 (f, state))
339 339 errors += 1
340 340 for f in m1:
341 341 state = repo.dirstate[f]
342 342 if state not in "nrm":
343 343 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
344 344 errors += 1
345 345 if errors:
346 346 error = _(".hg/dirstate inconsistent with current parent's manifest")
347 347 raise error.Abort(error)
348 348
349 349 @command('debugcolor',
350 350 [('', 'style', None, _('show all configured styles'))],
351 351 'hg debugcolor')
352 352 def debugcolor(ui, repo, **opts):
353 353 """show available color, effects or style"""
354 354 ui.write(('color mode: %s\n') % ui._colormode)
355 355 if opts.get('style'):
356 356 return _debugdisplaystyle(ui)
357 357 else:
358 358 return _debugdisplaycolor(ui)
359 359
360 360 def _debugdisplaycolor(ui):
361 361 ui = ui.copy()
362 362 ui._styles.clear()
363 363 for effect in color._activeeffects(ui).keys():
364 364 ui._styles[effect] = effect
365 365 if ui._terminfoparams:
366 366 for k, v in ui.configitems('color'):
367 367 if k.startswith('color.'):
368 368 ui._styles[k] = k[6:]
369 369 elif k.startswith('terminfo.'):
370 370 ui._styles[k] = k[9:]
371 371 ui.write(_('available colors:\n'))
372 372 # sort label with a '_' after the other to group '_background' entry.
373 373 items = sorted(ui._styles.items(),
374 374 key=lambda i: ('_' in i[0], i[0], i[1]))
375 375 for colorname, label in items:
376 376 ui.write(('%s\n') % colorname, label=label)
377 377
378 378 def _debugdisplaystyle(ui):
379 379 ui.write(_('available style:\n'))
380 380 width = max(len(s) for s in ui._styles)
381 381 for label, effects in sorted(ui._styles.items()):
382 382 ui.write('%s' % label, label=label)
383 383 if effects:
384 384 # 50
385 385 ui.write(': ')
386 386 ui.write(' ' * (max(0, width - len(label))))
387 387 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
388 388 ui.write('\n')
389 389
390 390 @command('debugcreatestreamclonebundle', [], 'FILE')
391 391 def debugcreatestreamclonebundle(ui, repo, fname):
392 392 """create a stream clone bundle file
393 393
394 394 Stream bundles are special bundles that are essentially archives of
395 395 revlog files. They are commonly used for cloning very quickly.
396 396 """
397 397 requirements, gen = streamclone.generatebundlev1(repo)
398 398 changegroup.writechunks(ui, gen, fname)
399 399
400 400 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
401 401
402 402 @command('debugdag',
403 403 [('t', 'tags', None, _('use tags as labels')),
404 404 ('b', 'branches', None, _('annotate with branch names')),
405 405 ('', 'dots', None, _('use dots for runs')),
406 406 ('s', 'spaces', None, _('separate elements by spaces'))],
407 407 _('[OPTION]... [FILE [REV]...]'),
408 408 optionalrepo=True)
409 409 def debugdag(ui, repo, file_=None, *revs, **opts):
410 410 """format the changelog or an index DAG as a concise textual description
411 411
412 412 If you pass a revlog index, the revlog's DAG is emitted. If you list
413 413 revision numbers, they get labeled in the output as rN.
414 414
415 415 Otherwise, the changelog DAG of the current repo is emitted.
416 416 """
417 417 spaces = opts.get('spaces')
418 418 dots = opts.get('dots')
419 419 if file_:
420 420 rlog = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
421 421 file_)
422 422 revs = set((int(r) for r in revs))
423 423 def events():
424 424 for r in rlog:
425 425 yield 'n', (r, list(p for p in rlog.parentrevs(r)
426 426 if p != -1))
427 427 if r in revs:
428 428 yield 'l', (r, "r%i" % r)
429 429 elif repo:
430 430 cl = repo.changelog
431 431 tags = opts.get('tags')
432 432 branches = opts.get('branches')
433 433 if tags:
434 434 labels = {}
435 435 for l, n in repo.tags().items():
436 436 labels.setdefault(cl.rev(n), []).append(l)
437 437 def events():
438 438 b = "default"
439 439 for r in cl:
440 440 if branches:
441 441 newb = cl.read(cl.node(r))[5]['branch']
442 442 if newb != b:
443 443 yield 'a', newb
444 444 b = newb
445 445 yield 'n', (r, list(p for p in cl.parentrevs(r)
446 446 if p != -1))
447 447 if tags:
448 448 ls = labels.get(r)
449 449 if ls:
450 450 for l in ls:
451 451 yield 'l', (r, l)
452 452 else:
453 453 raise error.Abort(_('need repo for changelog dag'))
454 454
455 455 for line in dagparser.dagtextlines(events(),
456 456 addspaces=spaces,
457 457 wraplabels=True,
458 458 wrapannotations=True,
459 459 wrapnonlinear=dots,
460 460 usedots=dots,
461 461 maxlinewidth=70):
462 462 ui.write(line)
463 463 ui.write("\n")
464 464
465 465 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
466 466 def debugdata(ui, repo, file_, rev=None, **opts):
467 467 """dump the contents of a data file revision"""
468 468 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
469 469 if rev is not None:
470 470 raise error.CommandError('debugdata', _('invalid arguments'))
471 471 file_, rev = None, file_
472 472 elif rev is None:
473 473 raise error.CommandError('debugdata', _('invalid arguments'))
474 474 r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
475 475 try:
476 476 ui.write(r.revision(r.lookup(rev), raw=True))
477 477 except KeyError:
478 478 raise error.Abort(_('invalid revision identifier %s') % rev)
479 479
480 480 @command('debugdate',
481 481 [('e', 'extended', None, _('try extended date formats'))],
482 482 _('[-e] DATE [RANGE]'),
483 483 norepo=True, optionalrepo=True)
484 484 def debugdate(ui, date, range=None, **opts):
485 485 """parse and display a date"""
486 486 if opts["extended"]:
487 487 d = util.parsedate(date, util.extendeddateformats)
488 488 else:
489 489 d = util.parsedate(date)
490 490 ui.write(("internal: %s %s\n") % d)
491 491 ui.write(("standard: %s\n") % util.datestr(d))
492 492 if range:
493 493 m = util.matchdate(range)
494 494 ui.write(("match: %s\n") % m(d[0]))
495 495
496 496 @command('debugdeltachain',
497 497 cmdutil.debugrevlogopts + cmdutil.formatteropts,
498 498 _('-c|-m|FILE'),
499 499 optionalrepo=True)
500 500 def debugdeltachain(ui, repo, file_=None, **opts):
501 501 """dump information about delta chains in a revlog
502 502
503 503 Output can be templatized. Available template keywords are:
504 504
505 505 :``rev``: revision number
506 506 :``chainid``: delta chain identifier (numbered by unique base)
507 507 :``chainlen``: delta chain length to this revision
508 508 :``prevrev``: previous revision in delta chain
509 509 :``deltatype``: role of delta / how it was computed
510 510 :``compsize``: compressed size of revision
511 511 :``uncompsize``: uncompressed size of revision
512 512 :``chainsize``: total size of compressed revisions in chain
513 513 :``chainratio``: total chain size divided by uncompressed revision size
514 514 (new delta chains typically start at ratio 2.00)
515 515 :``lindist``: linear distance from base revision in delta chain to end
516 516 of this revision
517 517 :``extradist``: total size of revisions not part of this delta chain from
518 518 base of delta chain to end of this revision; a measurement
519 519 of how much extra data we need to read/seek across to read
520 520 the delta chain for this revision
521 521 :``extraratio``: extradist divided by chainsize; another representation of
522 522 how much unrelated data is needed to load this delta chain
523 523 """
524 524 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
525 525 index = r.index
526 526 generaldelta = r.version & revlog.FLAG_GENERALDELTA
527 527
528 528 def revinfo(rev):
529 529 e = index[rev]
530 530 compsize = e[1]
531 531 uncompsize = e[2]
532 532 chainsize = 0
533 533
534 534 if generaldelta:
535 535 if e[3] == e[5]:
536 536 deltatype = 'p1'
537 537 elif e[3] == e[6]:
538 538 deltatype = 'p2'
539 539 elif e[3] == rev - 1:
540 540 deltatype = 'prev'
541 541 elif e[3] == rev:
542 542 deltatype = 'base'
543 543 else:
544 544 deltatype = 'other'
545 545 else:
546 546 if e[3] == rev:
547 547 deltatype = 'base'
548 548 else:
549 549 deltatype = 'prev'
550 550
551 551 chain = r._deltachain(rev)[0]
552 552 for iterrev in chain:
553 553 e = index[iterrev]
554 554 chainsize += e[1]
555 555
556 556 return compsize, uncompsize, deltatype, chain, chainsize
557 557
558 558 fm = ui.formatter('debugdeltachain', opts)
559 559
560 560 fm.plain(' rev chain# chainlen prev delta '
561 561 'size rawsize chainsize ratio lindist extradist '
562 562 'extraratio\n')
563 563
564 564 chainbases = {}
565 565 for rev in r:
566 566 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
567 567 chainbase = chain[0]
568 568 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
569 569 basestart = r.start(chainbase)
570 570 revstart = r.start(rev)
571 571 lineardist = revstart + comp - basestart
572 572 extradist = lineardist - chainsize
573 573 try:
574 574 prevrev = chain[-2]
575 575 except IndexError:
576 576 prevrev = -1
577 577
578 578 chainratio = float(chainsize) / float(uncomp)
579 579 extraratio = float(extradist) / float(chainsize)
580 580
581 581 fm.startitem()
582 582 fm.write('rev chainid chainlen prevrev deltatype compsize '
583 583 'uncompsize chainsize chainratio lindist extradist '
584 584 'extraratio',
585 585 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
586 586 rev, chainid, len(chain), prevrev, deltatype, comp,
587 587 uncomp, chainsize, chainratio, lineardist, extradist,
588 588 extraratio,
589 589 rev=rev, chainid=chainid, chainlen=len(chain),
590 590 prevrev=prevrev, deltatype=deltatype, compsize=comp,
591 591 uncompsize=uncomp, chainsize=chainsize,
592 592 chainratio=chainratio, lindist=lineardist,
593 593 extradist=extradist, extraratio=extraratio)
594 594
595 595 fm.end()
596 596
597 597 @command('debugdirstate|debugstate',
598 598 [('', 'nodates', None, _('do not display the saved mtime')),
599 599 ('', 'datesort', None, _('sort by saved mtime'))],
600 600 _('[OPTION]...'))
601 601 def debugstate(ui, repo, **opts):
602 602 """show the contents of the current dirstate"""
603 603
604 604 nodates = opts.get('nodates')
605 605 datesort = opts.get('datesort')
606 606
607 607 timestr = ""
608 608 if datesort:
609 609 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
610 610 else:
611 611 keyfunc = None # sort by filename
612 612 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
613 613 if ent[3] == -1:
614 614 timestr = 'unset '
615 615 elif nodates:
616 616 timestr = 'set '
617 617 else:
618 618 timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
619 619 time.localtime(ent[3]))
620 620 if ent[1] & 0o20000:
621 621 mode = 'lnk'
622 622 else:
623 623 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
624 624 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
625 625 for f in repo.dirstate.copies():
626 626 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
627 627
628 628 @command('debugdiscovery',
629 629 [('', 'old', None, _('use old-style discovery')),
630 630 ('', 'nonheads', None,
631 631 _('use old-style discovery with non-heads included')),
632 632 ] + cmdutil.remoteopts,
633 633 _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
634 634 def debugdiscovery(ui, repo, remoteurl="default", **opts):
635 635 """runs the changeset discovery protocol in isolation"""
636 636 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
637 637 opts.get('branch'))
638 638 remote = hg.peer(repo, opts, remoteurl)
639 639 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
640 640
641 641 # make sure tests are repeatable
642 642 random.seed(12323)
643 643
644 644 def doit(localheads, remoteheads, remote=remote):
645 645 if opts.get('old'):
646 646 if localheads:
647 647 raise error.Abort('cannot use localheads with old style '
648 648 'discovery')
649 649 if not util.safehasattr(remote, 'branches'):
650 650 # enable in-client legacy support
651 651 remote = localrepo.locallegacypeer(remote.local())
652 652 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
653 653 force=True)
654 654 common = set(common)
655 655 if not opts.get('nonheads'):
656 656 ui.write(("unpruned common: %s\n") %
657 657 " ".join(sorted(short(n) for n in common)))
658 658 dag = dagutil.revlogdag(repo.changelog)
659 659 all = dag.ancestorset(dag.internalizeall(common))
660 660 common = dag.externalizeall(dag.headsetofconnecteds(all))
661 661 else:
662 662 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
663 663 common = set(common)
664 664 rheads = set(hds)
665 665 lheads = set(repo.heads())
666 666 ui.write(("common heads: %s\n") %
667 667 " ".join(sorted(short(n) for n in common)))
668 668 if lheads <= common:
669 669 ui.write(("local is subset\n"))
670 670 elif rheads <= common:
671 671 ui.write(("remote is subset\n"))
672 672
673 673 serverlogs = opts.get('serverlog')
674 674 if serverlogs:
675 675 for filename in serverlogs:
676 676 with open(filename, 'r') as logfile:
677 677 line = logfile.readline()
678 678 while line:
679 679 parts = line.strip().split(';')
680 680 op = parts[1]
681 681 if op == 'cg':
682 682 pass
683 683 elif op == 'cgss':
684 684 doit(parts[2].split(' '), parts[3].split(' '))
685 685 elif op == 'unb':
686 686 doit(parts[3].split(' '), parts[2].split(' '))
687 687 line = logfile.readline()
688 688 else:
689 689 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
690 690 opts.get('remote_head'))
691 691 localrevs = opts.get('local_head')
692 692 doit(localrevs, remoterevs)
693 693
694 694 @command('debugextensions', cmdutil.formatteropts, [], norepo=True)
695 695 def debugextensions(ui, **opts):
696 696 '''show information about active extensions'''
697 697 exts = extensions.extensions(ui)
698 698 hgver = util.version()
699 699 fm = ui.formatter('debugextensions', opts)
700 700 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
701 701 isinternal = extensions.ismoduleinternal(extmod)
702 702 extsource = pycompat.fsencode(extmod.__file__)
703 703 if isinternal:
704 704 exttestedwith = [] # never expose magic string to users
705 705 else:
706 706 exttestedwith = getattr(extmod, 'testedwith', '').split()
707 707 extbuglink = getattr(extmod, 'buglink', None)
708 708
709 709 fm.startitem()
710 710
711 711 if ui.quiet or ui.verbose:
712 712 fm.write('name', '%s\n', extname)
713 713 else:
714 714 fm.write('name', '%s', extname)
715 715 if isinternal or hgver in exttestedwith:
716 716 fm.plain('\n')
717 717 elif not exttestedwith:
718 718 fm.plain(_(' (untested!)\n'))
719 719 else:
720 720 lasttestedversion = exttestedwith[-1]
721 721 fm.plain(' (%s!)\n' % lasttestedversion)
722 722
723 723 fm.condwrite(ui.verbose and extsource, 'source',
724 724 _(' location: %s\n'), extsource or "")
725 725
726 726 if ui.verbose:
727 727 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
728 728 fm.data(bundled=isinternal)
729 729
730 730 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
731 731 _(' tested with: %s\n'),
732 732 fm.formatlist(exttestedwith, name='ver'))
733 733
734 734 fm.condwrite(ui.verbose and extbuglink, 'buglink',
735 735 _(' bug reporting: %s\n'), extbuglink or "")
736 736
737 737 fm.end()
738 738
739 739 @command('debugfileset',
740 740 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV'))],
741 741 _('[-r REV] FILESPEC'))
742 742 def debugfileset(ui, repo, expr, **opts):
743 743 '''parse and apply a fileset specification'''
744 744 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
745 745 if ui.verbose:
746 746 tree = fileset.parse(expr)
747 747 ui.note(fileset.prettyformat(tree), "\n")
748 748
749 749 for f in ctx.getfileset(expr):
750 750 ui.write("%s\n" % f)
751 751
752 752 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
753 753 def debugfsinfo(ui, path="."):
754 754 """show information detected about current filesystem"""
755 755 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
756 756 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
757 757 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
758 758 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
759 759 casesensitive = '(unknown)'
760 760 try:
761 761 with tempfile.NamedTemporaryFile(prefix='.debugfsinfo', dir=path) as f:
762 762 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
763 763 except OSError:
764 764 pass
765 765 ui.write(('case-sensitive: %s\n') % casesensitive)
766 766
767 767 @command('debuggetbundle',
768 768 [('H', 'head', [], _('id of head node'), _('ID')),
769 769 ('C', 'common', [], _('id of common node'), _('ID')),
770 770 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
771 771 _('REPO FILE [-H|-C ID]...'),
772 772 norepo=True)
773 773 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
774 774 """retrieves a bundle from a repo
775 775
776 776 Every ID must be a full-length hex node id string. Saves the bundle to the
777 777 given file.
778 778 """
779 779 repo = hg.peer(ui, opts, repopath)
780 780 if not repo.capable('getbundle'):
781 781 raise error.Abort("getbundle() not supported by target repository")
782 782 args = {}
783 783 if common:
784 784 args['common'] = [bin(s) for s in common]
785 785 if head:
786 786 args['heads'] = [bin(s) for s in head]
787 787 # TODO: get desired bundlecaps from command line.
788 788 args['bundlecaps'] = None
789 789 bundle = repo.getbundle('debug', **args)
790 790
791 791 bundletype = opts.get('type', 'bzip2').lower()
792 792 btypes = {'none': 'HG10UN',
793 793 'bzip2': 'HG10BZ',
794 794 'gzip': 'HG10GZ',
795 795 'bundle2': 'HG20'}
796 796 bundletype = btypes.get(bundletype)
797 797 if bundletype not in bundle2.bundletypes:
798 798 raise error.Abort(_('unknown bundle type specified with --type'))
799 799 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
800 800
801 801 @command('debugignore', [], '[FILE]')
802 802 def debugignore(ui, repo, *files, **opts):
803 803 """display the combined ignore pattern and information about ignored files
804 804
805 805 With no argument display the combined ignore pattern.
806 806
807 807 Given space separated file names, shows if the given file is ignored and
808 808 if so, show the ignore rule (file and line number) that matched it.
809 809 """
810 810 ignore = repo.dirstate._ignore
811 811 if not files:
812 812 # Show all the patterns
813 includepat = getattr(ignore, 'includepat', None)
814 if includepat is not None:
815 ui.write("%s\n" % includepat)
816 else:
817 raise error.Abort(_("no ignore patterns found"))
813 ui.write("%s\n" % repr(ignore))
818 814 else:
819 815 for f in files:
820 816 nf = util.normpath(f)
821 817 ignored = None
822 818 ignoredata = None
823 819 if nf != '.':
824 820 if ignore(nf):
825 821 ignored = nf
826 822 ignoredata = repo.dirstate._ignorefileandline(nf)
827 823 else:
828 824 for p in util.finddirs(nf):
829 825 if ignore(p):
830 826 ignored = p
831 827 ignoredata = repo.dirstate._ignorefileandline(p)
832 828 break
833 829 if ignored:
834 830 if ignored == nf:
835 831 ui.write(_("%s is ignored\n") % f)
836 832 else:
837 833 ui.write(_("%s is ignored because of "
838 834 "containing folder %s\n")
839 835 % (f, ignored))
840 836 ignorefile, lineno, line = ignoredata
841 837 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
842 838 % (ignorefile, lineno, line))
843 839 else:
844 840 ui.write(_("%s is not ignored\n") % f)
845 841
846 842 @command('debugindex', cmdutil.debugrevlogopts +
847 843 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
848 844 _('[-f FORMAT] -c|-m|FILE'),
849 845 optionalrepo=True)
850 846 def debugindex(ui, repo, file_=None, **opts):
851 847 """dump the contents of an index file"""
852 848 r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
853 849 format = opts.get('format', 0)
854 850 if format not in (0, 1):
855 851 raise error.Abort(_("unknown format %d") % format)
856 852
857 853 generaldelta = r.version & revlog.FLAG_GENERALDELTA
858 854 if generaldelta:
859 855 basehdr = ' delta'
860 856 else:
861 857 basehdr = ' base'
862 858
863 859 if ui.debugflag:
864 860 shortfn = hex
865 861 else:
866 862 shortfn = short
867 863
868 864 # There might not be anything in r, so have a sane default
869 865 idlen = 12
870 866 for i in r:
871 867 idlen = len(shortfn(r.node(i)))
872 868 break
873 869
874 870 if format == 0:
875 871 ui.write((" rev offset length " + basehdr + " linkrev"
876 872 " %s %s p2\n") % ("nodeid".ljust(idlen), "p1".ljust(idlen)))
877 873 elif format == 1:
878 874 ui.write((" rev flag offset length"
879 875 " size " + basehdr + " link p1 p2"
880 876 " %s\n") % "nodeid".rjust(idlen))
881 877
882 878 for i in r:
883 879 node = r.node(i)
884 880 if generaldelta:
885 881 base = r.deltaparent(i)
886 882 else:
887 883 base = r.chainbase(i)
888 884 if format == 0:
889 885 try:
890 886 pp = r.parents(node)
891 887 except Exception:
892 888 pp = [nullid, nullid]
893 889 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
894 890 i, r.start(i), r.length(i), base, r.linkrev(i),
895 891 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
896 892 elif format == 1:
897 893 pr = r.parentrevs(i)
898 894 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
899 895 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
900 896 base, r.linkrev(i), pr[0], pr[1], shortfn(node)))
901 897
902 898 @command('debugindexdot', cmdutil.debugrevlogopts,
903 899 _('-c|-m|FILE'), optionalrepo=True)
904 900 def debugindexdot(ui, repo, file_=None, **opts):
905 901 """dump an index DAG as a graphviz dot file"""
906 902 r = cmdutil.openrevlog(repo, 'debugindexdot', file_, opts)
907 903 ui.write(("digraph G {\n"))
908 904 for i in r:
909 905 node = r.node(i)
910 906 pp = r.parents(node)
911 907 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
912 908 if pp[1] != nullid:
913 909 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
914 910 ui.write("}\n")
915 911
916 912 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
917 913 def debuginstall(ui, **opts):
918 914 '''test Mercurial installation
919 915
920 916 Returns 0 on success.
921 917 '''
922 918
923 919 def writetemp(contents):
924 920 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
925 921 f = os.fdopen(fd, pycompat.sysstr("wb"))
926 922 f.write(contents)
927 923 f.close()
928 924 return name
929 925
930 926 problems = 0
931 927
932 928 fm = ui.formatter('debuginstall', opts)
933 929 fm.startitem()
934 930
935 931 # encoding
936 932 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
937 933 err = None
938 934 try:
939 935 encoding.fromlocal("test")
940 936 except error.Abort as inst:
941 937 err = inst
942 938 problems += 1
943 939 fm.condwrite(err, 'encodingerror', _(" %s\n"
944 940 " (check that your locale is properly set)\n"), err)
945 941
946 942 # Python
947 943 fm.write('pythonexe', _("checking Python executable (%s)\n"),
948 944 pycompat.sysexecutable)
949 945 fm.write('pythonver', _("checking Python version (%s)\n"),
950 946 ("%d.%d.%d" % sys.version_info[:3]))
951 947 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
952 948 os.path.dirname(pycompat.fsencode(os.__file__)))
953 949
954 950 security = set(sslutil.supportedprotocols)
955 951 if sslutil.hassni:
956 952 security.add('sni')
957 953
958 954 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
959 955 fm.formatlist(sorted(security), name='protocol',
960 956 fmt='%s', sep=','))
961 957
962 958 # These are warnings, not errors. So don't increment problem count. This
963 959 # may change in the future.
964 960 if 'tls1.2' not in security:
965 961 fm.plain(_(' TLS 1.2 not supported by Python install; '
966 962 'network connections lack modern security\n'))
967 963 if 'sni' not in security:
968 964 fm.plain(_(' SNI not supported by Python install; may have '
969 965 'connectivity issues with some servers\n'))
970 966
971 967 # TODO print CA cert info
972 968
973 969 # hg version
974 970 hgver = util.version()
975 971 fm.write('hgver', _("checking Mercurial version (%s)\n"),
976 972 hgver.split('+')[0])
977 973 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
978 974 '+'.join(hgver.split('+')[1:]))
979 975
980 976 # compiled modules
981 977 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
982 978 policy.policy)
983 979 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
984 980 os.path.dirname(pycompat.fsencode(__file__)))
985 981
986 982 if policy.policy in ('c', 'allow'):
987 983 err = None
988 984 try:
989 985 from .cext import (
990 986 base85,
991 987 bdiff,
992 988 mpatch,
993 989 osutil,
994 990 )
995 991 dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
996 992 except Exception as inst:
997 993 err = inst
998 994 problems += 1
999 995 fm.condwrite(err, 'extensionserror', " %s\n", err)
1000 996
1001 997 compengines = util.compengines._engines.values()
1002 998 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1003 999 fm.formatlist(sorted(e.name() for e in compengines),
1004 1000 name='compengine', fmt='%s', sep=', '))
1005 1001 fm.write('compenginesavail', _('checking available compression engines '
1006 1002 '(%s)\n'),
1007 1003 fm.formatlist(sorted(e.name() for e in compengines
1008 1004 if e.available()),
1009 1005 name='compengine', fmt='%s', sep=', '))
1010 1006 wirecompengines = util.compengines.supportedwireengines(util.SERVERROLE)
1011 1007 fm.write('compenginesserver', _('checking available compression engines '
1012 1008 'for wire protocol (%s)\n'),
1013 1009 fm.formatlist([e.name() for e in wirecompengines
1014 1010 if e.wireprotosupport()],
1015 1011 name='compengine', fmt='%s', sep=', '))
1016 1012
1017 1013 # templates
1018 1014 p = templater.templatepaths()
1019 1015 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1020 1016 fm.condwrite(not p, '', _(" no template directories found\n"))
1021 1017 if p:
1022 1018 m = templater.templatepath("map-cmdline.default")
1023 1019 if m:
1024 1020 # template found, check if it is working
1025 1021 err = None
1026 1022 try:
1027 1023 templater.templater.frommapfile(m)
1028 1024 except Exception as inst:
1029 1025 err = inst
1030 1026 p = None
1031 1027 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1032 1028 else:
1033 1029 p = None
1034 1030 fm.condwrite(p, 'defaulttemplate',
1035 1031 _("checking default template (%s)\n"), m)
1036 1032 fm.condwrite(not m, 'defaulttemplatenotfound',
1037 1033 _(" template '%s' not found\n"), "default")
1038 1034 if not p:
1039 1035 problems += 1
1040 1036 fm.condwrite(not p, '',
1041 1037 _(" (templates seem to have been installed incorrectly)\n"))
1042 1038
1043 1039 # editor
1044 1040 editor = ui.geteditor()
1045 1041 editor = util.expandpath(editor)
1046 1042 fm.write('editor', _("checking commit editor... (%s)\n"), editor)
1047 1043 cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
1048 1044 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1049 1045 _(" No commit editor set and can't find %s in PATH\n"
1050 1046 " (specify a commit editor in your configuration"
1051 1047 " file)\n"), not cmdpath and editor == 'vi' and editor)
1052 1048 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1053 1049 _(" Can't find editor '%s' in PATH\n"
1054 1050 " (specify a commit editor in your configuration"
1055 1051 " file)\n"), not cmdpath and editor)
1056 1052 if not cmdpath and editor != 'vi':
1057 1053 problems += 1
1058 1054
1059 1055 # check username
1060 1056 username = None
1061 1057 err = None
1062 1058 try:
1063 1059 username = ui.username()
1064 1060 except error.Abort as e:
1065 1061 err = e
1066 1062 problems += 1
1067 1063
1068 1064 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1069 1065 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1070 1066 " (specify a username in your configuration file)\n"), err)
1071 1067
1072 1068 fm.condwrite(not problems, '',
1073 1069 _("no problems detected\n"))
1074 1070 if not problems:
1075 1071 fm.data(problems=problems)
1076 1072 fm.condwrite(problems, 'problems',
1077 1073 _("%d problems detected,"
1078 1074 " please check your install!\n"), problems)
1079 1075 fm.end()
1080 1076
1081 1077 return problems
1082 1078
1083 1079 @command('debugknown', [], _('REPO ID...'), norepo=True)
1084 1080 def debugknown(ui, repopath, *ids, **opts):
1085 1081 """test whether node ids are known to a repo
1086 1082
1087 1083 Every ID must be a full-length hex node id string. Returns a list of 0s
1088 1084 and 1s indicating unknown/known.
1089 1085 """
1090 1086 repo = hg.peer(ui, opts, repopath)
1091 1087 if not repo.capable('known'):
1092 1088 raise error.Abort("known() not supported by target repository")
1093 1089 flags = repo.known([bin(s) for s in ids])
1094 1090 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1095 1091
1096 1092 @command('debuglabelcomplete', [], _('LABEL...'))
1097 1093 def debuglabelcomplete(ui, repo, *args):
1098 1094 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1099 1095 debugnamecomplete(ui, repo, *args)
1100 1096
1101 1097 @command('debuglocks',
1102 1098 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1103 1099 ('W', 'force-wlock', None,
1104 1100 _('free the working state lock (DANGEROUS)'))],
1105 1101 _('[OPTION]...'))
1106 1102 def debuglocks(ui, repo, **opts):
1107 1103 """show or modify state of locks
1108 1104
1109 1105 By default, this command will show which locks are held. This
1110 1106 includes the user and process holding the lock, the amount of time
1111 1107 the lock has been held, and the machine name where the process is
1112 1108 running if it's not local.
1113 1109
1114 1110 Locks protect the integrity of Mercurial's data, so should be
1115 1111 treated with care. System crashes or other interruptions may cause
1116 1112 locks to not be properly released, though Mercurial will usually
1117 1113 detect and remove such stale locks automatically.
1118 1114
1119 1115 However, detecting stale locks may not always be possible (for
1120 1116 instance, on a shared filesystem). Removing locks may also be
1121 1117 blocked by filesystem permissions.
1122 1118
1123 1119 Returns 0 if no locks are held.
1124 1120
1125 1121 """
1126 1122
1127 1123 if opts.get('force_lock'):
1128 1124 repo.svfs.unlink('lock')
1129 1125 if opts.get('force_wlock'):
1130 1126 repo.vfs.unlink('wlock')
1131 1127 if opts.get('force_lock') or opts.get('force_lock'):
1132 1128 return 0
1133 1129
1134 1130 now = time.time()
1135 1131 held = 0
1136 1132
1137 1133 def report(vfs, name, method):
1138 1134 # this causes stale locks to get reaped for more accurate reporting
1139 1135 try:
1140 1136 l = method(False)
1141 1137 except error.LockHeld:
1142 1138 l = None
1143 1139
1144 1140 if l:
1145 1141 l.release()
1146 1142 else:
1147 1143 try:
1148 1144 stat = vfs.lstat(name)
1149 1145 age = now - stat.st_mtime
1150 1146 user = util.username(stat.st_uid)
1151 1147 locker = vfs.readlock(name)
1152 1148 if ":" in locker:
1153 1149 host, pid = locker.split(':')
1154 1150 if host == socket.gethostname():
1155 1151 locker = 'user %s, process %s' % (user, pid)
1156 1152 else:
1157 1153 locker = 'user %s, process %s, host %s' \
1158 1154 % (user, pid, host)
1159 1155 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1160 1156 return 1
1161 1157 except OSError as e:
1162 1158 if e.errno != errno.ENOENT:
1163 1159 raise
1164 1160
1165 1161 ui.write(("%-6s free\n") % (name + ":"))
1166 1162 return 0
1167 1163
1168 1164 held += report(repo.svfs, "lock", repo.lock)
1169 1165 held += report(repo.vfs, "wlock", repo.wlock)
1170 1166
1171 1167 return held
1172 1168
1173 1169 @command('debugmergestate', [], '')
1174 1170 def debugmergestate(ui, repo, *args):
1175 1171 """print merge state
1176 1172
1177 1173 Use --verbose to print out information about whether v1 or v2 merge state
1178 1174 was chosen."""
1179 1175 def _hashornull(h):
1180 1176 if h == nullhex:
1181 1177 return 'null'
1182 1178 else:
1183 1179 return h
1184 1180
1185 1181 def printrecords(version):
1186 1182 ui.write(('* version %s records\n') % version)
1187 1183 if version == 1:
1188 1184 records = v1records
1189 1185 else:
1190 1186 records = v2records
1191 1187
1192 1188 for rtype, record in records:
1193 1189 # pretty print some record types
1194 1190 if rtype == 'L':
1195 1191 ui.write(('local: %s\n') % record)
1196 1192 elif rtype == 'O':
1197 1193 ui.write(('other: %s\n') % record)
1198 1194 elif rtype == 'm':
1199 1195 driver, mdstate = record.split('\0', 1)
1200 1196 ui.write(('merge driver: %s (state "%s")\n')
1201 1197 % (driver, mdstate))
1202 1198 elif rtype in 'FDC':
1203 1199 r = record.split('\0')
1204 1200 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1205 1201 if version == 1:
1206 1202 onode = 'not stored in v1 format'
1207 1203 flags = r[7]
1208 1204 else:
1209 1205 onode, flags = r[7:9]
1210 1206 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1211 1207 % (f, rtype, state, _hashornull(hash)))
1212 1208 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1213 1209 ui.write((' ancestor path: %s (node %s)\n')
1214 1210 % (afile, _hashornull(anode)))
1215 1211 ui.write((' other path: %s (node %s)\n')
1216 1212 % (ofile, _hashornull(onode)))
1217 1213 elif rtype == 'f':
1218 1214 filename, rawextras = record.split('\0', 1)
1219 1215 extras = rawextras.split('\0')
1220 1216 i = 0
1221 1217 extrastrings = []
1222 1218 while i < len(extras):
1223 1219 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1224 1220 i += 2
1225 1221
1226 1222 ui.write(('file extras: %s (%s)\n')
1227 1223 % (filename, ', '.join(extrastrings)))
1228 1224 elif rtype == 'l':
1229 1225 labels = record.split('\0', 2)
1230 1226 labels = [l for l in labels if len(l) > 0]
1231 1227 ui.write(('labels:\n'))
1232 1228 ui.write((' local: %s\n' % labels[0]))
1233 1229 ui.write((' other: %s\n' % labels[1]))
1234 1230 if len(labels) > 2:
1235 1231 ui.write((' base: %s\n' % labels[2]))
1236 1232 else:
1237 1233 ui.write(('unrecognized entry: %s\t%s\n')
1238 1234 % (rtype, record.replace('\0', '\t')))
1239 1235
1240 1236 # Avoid mergestate.read() since it may raise an exception for unsupported
1241 1237 # merge state records. We shouldn't be doing this, but this is OK since this
1242 1238 # command is pretty low-level.
1243 1239 ms = mergemod.mergestate(repo)
1244 1240
1245 1241 # sort so that reasonable information is on top
1246 1242 v1records = ms._readrecordsv1()
1247 1243 v2records = ms._readrecordsv2()
1248 1244 order = 'LOml'
1249 1245 def key(r):
1250 1246 idx = order.find(r[0])
1251 1247 if idx == -1:
1252 1248 return (1, r[1])
1253 1249 else:
1254 1250 return (0, idx)
1255 1251 v1records.sort(key=key)
1256 1252 v2records.sort(key=key)
1257 1253
1258 1254 if not v1records and not v2records:
1259 1255 ui.write(('no merge state found\n'))
1260 1256 elif not v2records:
1261 1257 ui.note(('no version 2 merge state\n'))
1262 1258 printrecords(1)
1263 1259 elif ms._v1v2match(v1records, v2records):
1264 1260 ui.note(('v1 and v2 states match: using v2\n'))
1265 1261 printrecords(2)
1266 1262 else:
1267 1263 ui.note(('v1 and v2 states mismatch: using v1\n'))
1268 1264 printrecords(1)
1269 1265 if ui.verbose:
1270 1266 printrecords(2)
1271 1267
1272 1268 @command('debugnamecomplete', [], _('NAME...'))
1273 1269 def debugnamecomplete(ui, repo, *args):
1274 1270 '''complete "names" - tags, open branch names, bookmark names'''
1275 1271
1276 1272 names = set()
1277 1273 # since we previously only listed open branches, we will handle that
1278 1274 # specially (after this for loop)
1279 1275 for name, ns in repo.names.iteritems():
1280 1276 if name != 'branches':
1281 1277 names.update(ns.listnames(repo))
1282 1278 names.update(tag for (tag, heads, tip, closed)
1283 1279 in repo.branchmap().iterbranches() if not closed)
1284 1280 completions = set()
1285 1281 if not args:
1286 1282 args = ['']
1287 1283 for a in args:
1288 1284 completions.update(n for n in names if n.startswith(a))
1289 1285 ui.write('\n'.join(sorted(completions)))
1290 1286 ui.write('\n')
1291 1287
1292 1288 @command('debugobsolete',
1293 1289 [('', 'flags', 0, _('markers flag')),
1294 1290 ('', 'record-parents', False,
1295 1291 _('record parent information for the precursor')),
1296 1292 ('r', 'rev', [], _('display markers relevant to REV')),
1297 1293 ('', 'index', False, _('display index of the marker')),
1298 1294 ('', 'delete', [], _('delete markers specified by indices')),
1299 1295 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1300 1296 _('[OBSOLETED [REPLACEMENT ...]]'))
1301 1297 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1302 1298 """create arbitrary obsolete marker
1303 1299
1304 1300 With no arguments, displays the list of obsolescence markers."""
1305 1301
1306 1302 def parsenodeid(s):
1307 1303 try:
1308 1304 # We do not use revsingle/revrange functions here to accept
1309 1305 # arbitrary node identifiers, possibly not present in the
1310 1306 # local repository.
1311 1307 n = bin(s)
1312 1308 if len(n) != len(nullid):
1313 1309 raise TypeError()
1314 1310 return n
1315 1311 except TypeError:
1316 1312 raise error.Abort('changeset references must be full hexadecimal '
1317 1313 'node identifiers')
1318 1314
1319 1315 if opts.get('delete'):
1320 1316 indices = []
1321 1317 for v in opts.get('delete'):
1322 1318 try:
1323 1319 indices.append(int(v))
1324 1320 except ValueError:
1325 1321 raise error.Abort(_('invalid index value: %r') % v,
1326 1322 hint=_('use integers for indices'))
1327 1323
1328 1324 if repo.currenttransaction():
1329 1325 raise error.Abort(_('cannot delete obsmarkers in the middle '
1330 1326 'of transaction.'))
1331 1327
1332 1328 with repo.lock():
1333 1329 n = repair.deleteobsmarkers(repo.obsstore, indices)
1334 1330 ui.write(_('deleted %i obsolescence markers\n') % n)
1335 1331
1336 1332 return
1337 1333
1338 1334 if precursor is not None:
1339 1335 if opts['rev']:
1340 1336 raise error.Abort('cannot select revision when creating marker')
1341 1337 metadata = {}
1342 1338 metadata['user'] = opts['user'] or ui.username()
1343 1339 succs = tuple(parsenodeid(succ) for succ in successors)
1344 1340 l = repo.lock()
1345 1341 try:
1346 1342 tr = repo.transaction('debugobsolete')
1347 1343 try:
1348 1344 date = opts.get('date')
1349 1345 if date:
1350 1346 date = util.parsedate(date)
1351 1347 else:
1352 1348 date = None
1353 1349 prec = parsenodeid(precursor)
1354 1350 parents = None
1355 1351 if opts['record_parents']:
1356 1352 if prec not in repo.unfiltered():
1357 1353 raise error.Abort('cannot used --record-parents on '
1358 1354 'unknown changesets')
1359 1355 parents = repo.unfiltered()[prec].parents()
1360 1356 parents = tuple(p.node() for p in parents)
1361 1357 repo.obsstore.create(tr, prec, succs, opts['flags'],
1362 1358 parents=parents, date=date,
1363 1359 metadata=metadata)
1364 1360 tr.close()
1365 1361 except ValueError as exc:
1366 1362 raise error.Abort(_('bad obsmarker input: %s') % exc)
1367 1363 finally:
1368 1364 tr.release()
1369 1365 finally:
1370 1366 l.release()
1371 1367 else:
1372 1368 if opts['rev']:
1373 1369 revs = scmutil.revrange(repo, opts['rev'])
1374 1370 nodes = [repo[r].node() for r in revs]
1375 1371 markers = list(obsolete.getmarkers(repo, nodes=nodes))
1376 1372 markers.sort(key=lambda x: x._data)
1377 1373 else:
1378 1374 markers = obsolete.getmarkers(repo)
1379 1375
1380 1376 markerstoiter = markers
1381 1377 isrelevant = lambda m: True
1382 1378 if opts.get('rev') and opts.get('index'):
1383 1379 markerstoiter = obsolete.getmarkers(repo)
1384 1380 markerset = set(markers)
1385 1381 isrelevant = lambda m: m in markerset
1386 1382
1387 1383 fm = ui.formatter('debugobsolete', opts)
1388 1384 for i, m in enumerate(markerstoiter):
1389 1385 if not isrelevant(m):
1390 1386 # marker can be irrelevant when we're iterating over a set
1391 1387 # of markers (markerstoiter) which is bigger than the set
1392 1388 # of markers we want to display (markers)
1393 1389 # this can happen if both --index and --rev options are
1394 1390 # provided and thus we need to iterate over all of the markers
1395 1391 # to get the correct indices, but only display the ones that
1396 1392 # are relevant to --rev value
1397 1393 continue
1398 1394 fm.startitem()
1399 1395 ind = i if opts.get('index') else None
1400 1396 cmdutil.showmarker(fm, m, index=ind)
1401 1397 fm.end()
1402 1398
1403 1399 @command('debugpathcomplete',
1404 1400 [('f', 'full', None, _('complete an entire path')),
1405 1401 ('n', 'normal', None, _('show only normal files')),
1406 1402 ('a', 'added', None, _('show only added files')),
1407 1403 ('r', 'removed', None, _('show only removed files'))],
1408 1404 _('FILESPEC...'))
1409 1405 def debugpathcomplete(ui, repo, *specs, **opts):
1410 1406 '''complete part or all of a tracked path
1411 1407
1412 1408 This command supports shells that offer path name completion. It
1413 1409 currently completes only files already known to the dirstate.
1414 1410
1415 1411 Completion extends only to the next path segment unless
1416 1412 --full is specified, in which case entire paths are used.'''
1417 1413
1418 1414 def complete(path, acceptable):
1419 1415 dirstate = repo.dirstate
1420 1416 spec = os.path.normpath(os.path.join(pycompat.getcwd(), path))
1421 1417 rootdir = repo.root + pycompat.ossep
1422 1418 if spec != repo.root and not spec.startswith(rootdir):
1423 1419 return [], []
1424 1420 if os.path.isdir(spec):
1425 1421 spec += '/'
1426 1422 spec = spec[len(rootdir):]
1427 1423 fixpaths = pycompat.ossep != '/'
1428 1424 if fixpaths:
1429 1425 spec = spec.replace(pycompat.ossep, '/')
1430 1426 speclen = len(spec)
1431 1427 fullpaths = opts['full']
1432 1428 files, dirs = set(), set()
1433 1429 adddir, addfile = dirs.add, files.add
1434 1430 for f, st in dirstate.iteritems():
1435 1431 if f.startswith(spec) and st[0] in acceptable:
1436 1432 if fixpaths:
1437 1433 f = f.replace('/', pycompat.ossep)
1438 1434 if fullpaths:
1439 1435 addfile(f)
1440 1436 continue
1441 1437 s = f.find(pycompat.ossep, speclen)
1442 1438 if s >= 0:
1443 1439 adddir(f[:s])
1444 1440 else:
1445 1441 addfile(f)
1446 1442 return files, dirs
1447 1443
1448 1444 acceptable = ''
1449 1445 if opts['normal']:
1450 1446 acceptable += 'nm'
1451 1447 if opts['added']:
1452 1448 acceptable += 'a'
1453 1449 if opts['removed']:
1454 1450 acceptable += 'r'
1455 1451 cwd = repo.getcwd()
1456 1452 if not specs:
1457 1453 specs = ['.']
1458 1454
1459 1455 files, dirs = set(), set()
1460 1456 for spec in specs:
1461 1457 f, d = complete(spec, acceptable or 'nmar')
1462 1458 files.update(f)
1463 1459 dirs.update(d)
1464 1460 files.update(dirs)
1465 1461 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1466 1462 ui.write('\n')
1467 1463
1468 1464 @command('debugpickmergetool',
1469 1465 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1470 1466 ('', 'changedelete', None, _('emulate merging change and delete')),
1471 1467 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1472 1468 _('[PATTERN]...'),
1473 1469 inferrepo=True)
1474 1470 def debugpickmergetool(ui, repo, *pats, **opts):
1475 1471 """examine which merge tool is chosen for specified file
1476 1472
1477 1473 As described in :hg:`help merge-tools`, Mercurial examines
1478 1474 configurations below in this order to decide which merge tool is
1479 1475 chosen for specified file.
1480 1476
1481 1477 1. ``--tool`` option
1482 1478 2. ``HGMERGE`` environment variable
1483 1479 3. configurations in ``merge-patterns`` section
1484 1480 4. configuration of ``ui.merge``
1485 1481 5. configurations in ``merge-tools`` section
1486 1482 6. ``hgmerge`` tool (for historical reason only)
1487 1483 7. default tool for fallback (``:merge`` or ``:prompt``)
1488 1484
1489 1485 This command writes out examination result in the style below::
1490 1486
1491 1487 FILE = MERGETOOL
1492 1488
1493 1489 By default, all files known in the first parent context of the
1494 1490 working directory are examined. Use file patterns and/or -I/-X
1495 1491 options to limit target files. -r/--rev is also useful to examine
1496 1492 files in another context without actual updating to it.
1497 1493
1498 1494 With --debug, this command shows warning messages while matching
1499 1495 against ``merge-patterns`` and so on, too. It is recommended to
1500 1496 use this option with explicit file patterns and/or -I/-X options,
1501 1497 because this option increases amount of output per file according
1502 1498 to configurations in hgrc.
1503 1499
1504 1500 With -v/--verbose, this command shows configurations below at
1505 1501 first (only if specified).
1506 1502
1507 1503 - ``--tool`` option
1508 1504 - ``HGMERGE`` environment variable
1509 1505 - configuration of ``ui.merge``
1510 1506
1511 1507 If merge tool is chosen before matching against
1512 1508 ``merge-patterns``, this command can't show any helpful
1513 1509 information, even with --debug. In such case, information above is
1514 1510 useful to know why a merge tool is chosen.
1515 1511 """
1516 1512 overrides = {}
1517 1513 if opts['tool']:
1518 1514 overrides[('ui', 'forcemerge')] = opts['tool']
1519 1515 ui.note(('with --tool %r\n') % (opts['tool']))
1520 1516
1521 1517 with ui.configoverride(overrides, 'debugmergepatterns'):
1522 1518 hgmerge = encoding.environ.get("HGMERGE")
1523 1519 if hgmerge is not None:
1524 1520 ui.note(('with HGMERGE=%r\n') % (hgmerge))
1525 1521 uimerge = ui.config("ui", "merge")
1526 1522 if uimerge:
1527 1523 ui.note(('with ui.merge=%r\n') % (uimerge))
1528 1524
1529 1525 ctx = scmutil.revsingle(repo, opts.get('rev'))
1530 1526 m = scmutil.match(ctx, pats, opts)
1531 1527 changedelete = opts['changedelete']
1532 1528 for path in ctx.walk(m):
1533 1529 fctx = ctx[path]
1534 1530 try:
1535 1531 if not ui.debugflag:
1536 1532 ui.pushbuffer(error=True)
1537 1533 tool, toolpath = filemerge._picktool(repo, ui, path,
1538 1534 fctx.isbinary(),
1539 1535 'l' in fctx.flags(),
1540 1536 changedelete)
1541 1537 finally:
1542 1538 if not ui.debugflag:
1543 1539 ui.popbuffer()
1544 1540 ui.write(('%s = %s\n') % (path, tool))
1545 1541
1546 1542 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
1547 1543 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
1548 1544 '''access the pushkey key/value protocol
1549 1545
1550 1546 With two args, list the keys in the given namespace.
1551 1547
1552 1548 With five args, set a key to new if it currently is set to old.
1553 1549 Reports success or failure.
1554 1550 '''
1555 1551
1556 1552 target = hg.peer(ui, {}, repopath)
1557 1553 if keyinfo:
1558 1554 key, old, new = keyinfo
1559 1555 r = target.pushkey(namespace, key, old, new)
1560 1556 ui.status(str(r) + '\n')
1561 1557 return not r
1562 1558 else:
1563 1559 for k, v in sorted(target.listkeys(namespace).iteritems()):
1564 1560 ui.write("%s\t%s\n" % (util.escapestr(k),
1565 1561 util.escapestr(v)))
1566 1562
1567 1563 @command('debugpvec', [], _('A B'))
1568 1564 def debugpvec(ui, repo, a, b=None):
1569 1565 ca = scmutil.revsingle(repo, a)
1570 1566 cb = scmutil.revsingle(repo, b)
1571 1567 pa = pvec.ctxpvec(ca)
1572 1568 pb = pvec.ctxpvec(cb)
1573 1569 if pa == pb:
1574 1570 rel = "="
1575 1571 elif pa > pb:
1576 1572 rel = ">"
1577 1573 elif pa < pb:
1578 1574 rel = "<"
1579 1575 elif pa | pb:
1580 1576 rel = "|"
1581 1577 ui.write(_("a: %s\n") % pa)
1582 1578 ui.write(_("b: %s\n") % pb)
1583 1579 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
1584 1580 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
1585 1581 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
1586 1582 pa.distance(pb), rel))
1587 1583
1588 1584 @command('debugrebuilddirstate|debugrebuildstate',
1589 1585 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
1590 1586 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
1591 1587 'the working copy parent')),
1592 1588 ],
1593 1589 _('[-r REV]'))
1594 1590 def debugrebuilddirstate(ui, repo, rev, **opts):
1595 1591 """rebuild the dirstate as it would look like for the given revision
1596 1592
1597 1593 If no revision is specified the first current parent will be used.
1598 1594
1599 1595 The dirstate will be set to the files of the given revision.
1600 1596 The actual working directory content or existing dirstate
1601 1597 information such as adds or removes is not considered.
1602 1598
1603 1599 ``minimal`` will only rebuild the dirstate status for files that claim to be
1604 1600 tracked but are not in the parent manifest, or that exist in the parent
1605 1601 manifest but are not in the dirstate. It will not change adds, removes, or
1606 1602 modified files that are in the working copy parent.
1607 1603
1608 1604 One use of this command is to make the next :hg:`status` invocation
1609 1605 check the actual file content.
1610 1606 """
1611 1607 ctx = scmutil.revsingle(repo, rev)
1612 1608 with repo.wlock():
1613 1609 dirstate = repo.dirstate
1614 1610 changedfiles = None
1615 1611 # See command doc for what minimal does.
1616 1612 if opts.get('minimal'):
1617 1613 manifestfiles = set(ctx.manifest().keys())
1618 1614 dirstatefiles = set(dirstate)
1619 1615 manifestonly = manifestfiles - dirstatefiles
1620 1616 dsonly = dirstatefiles - manifestfiles
1621 1617 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
1622 1618 changedfiles = manifestonly | dsnotadded
1623 1619
1624 1620 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
1625 1621
1626 1622 @command('debugrebuildfncache', [], '')
1627 1623 def debugrebuildfncache(ui, repo):
1628 1624 """rebuild the fncache file"""
1629 1625 repair.rebuildfncache(ui, repo)
1630 1626
1631 1627 @command('debugrename',
1632 1628 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1633 1629 _('[-r REV] FILE'))
1634 1630 def debugrename(ui, repo, file1, *pats, **opts):
1635 1631 """dump rename information"""
1636 1632
1637 1633 ctx = scmutil.revsingle(repo, opts.get('rev'))
1638 1634 m = scmutil.match(ctx, (file1,) + pats, opts)
1639 1635 for abs in ctx.walk(m):
1640 1636 fctx = ctx[abs]
1641 1637 o = fctx.filelog().renamed(fctx.filenode())
1642 1638 rel = m.rel(abs)
1643 1639 if o:
1644 1640 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1645 1641 else:
1646 1642 ui.write(_("%s not renamed\n") % rel)
1647 1643
1648 1644 @command('debugrevlog', cmdutil.debugrevlogopts +
1649 1645 [('d', 'dump', False, _('dump index data'))],
1650 1646 _('-c|-m|FILE'),
1651 1647 optionalrepo=True)
1652 1648 def debugrevlog(ui, repo, file_=None, **opts):
1653 1649 """show data and statistics about a revlog"""
1654 1650 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
1655 1651
1656 1652 if opts.get("dump"):
1657 1653 numrevs = len(r)
1658 1654 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
1659 1655 " rawsize totalsize compression heads chainlen\n"))
1660 1656 ts = 0
1661 1657 heads = set()
1662 1658
1663 1659 for rev in xrange(numrevs):
1664 1660 dbase = r.deltaparent(rev)
1665 1661 if dbase == -1:
1666 1662 dbase = rev
1667 1663 cbase = r.chainbase(rev)
1668 1664 clen = r.chainlen(rev)
1669 1665 p1, p2 = r.parentrevs(rev)
1670 1666 rs = r.rawsize(rev)
1671 1667 ts = ts + rs
1672 1668 heads -= set(r.parentrevs(rev))
1673 1669 heads.add(rev)
1674 1670 try:
1675 1671 compression = ts / r.end(rev)
1676 1672 except ZeroDivisionError:
1677 1673 compression = 0
1678 1674 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
1679 1675 "%11d %5d %8d\n" %
1680 1676 (rev, p1, p2, r.start(rev), r.end(rev),
1681 1677 r.start(dbase), r.start(cbase),
1682 1678 r.start(p1), r.start(p2),
1683 1679 rs, ts, compression, len(heads), clen))
1684 1680 return 0
1685 1681
1686 1682 v = r.version
1687 1683 format = v & 0xFFFF
1688 1684 flags = []
1689 1685 gdelta = False
1690 1686 if v & revlog.FLAG_INLINE_DATA:
1691 1687 flags.append('inline')
1692 1688 if v & revlog.FLAG_GENERALDELTA:
1693 1689 gdelta = True
1694 1690 flags.append('generaldelta')
1695 1691 if not flags:
1696 1692 flags = ['(none)']
1697 1693
1698 1694 nummerges = 0
1699 1695 numfull = 0
1700 1696 numprev = 0
1701 1697 nump1 = 0
1702 1698 nump2 = 0
1703 1699 numother = 0
1704 1700 nump1prev = 0
1705 1701 nump2prev = 0
1706 1702 chainlengths = []
1707 1703
1708 1704 datasize = [None, 0, 0]
1709 1705 fullsize = [None, 0, 0]
1710 1706 deltasize = [None, 0, 0]
1711 1707 chunktypecounts = {}
1712 1708 chunktypesizes = {}
1713 1709
1714 1710 def addsize(size, l):
1715 1711 if l[0] is None or size < l[0]:
1716 1712 l[0] = size
1717 1713 if size > l[1]:
1718 1714 l[1] = size
1719 1715 l[2] += size
1720 1716
1721 1717 numrevs = len(r)
1722 1718 for rev in xrange(numrevs):
1723 1719 p1, p2 = r.parentrevs(rev)
1724 1720 delta = r.deltaparent(rev)
1725 1721 if format > 0:
1726 1722 addsize(r.rawsize(rev), datasize)
1727 1723 if p2 != nullrev:
1728 1724 nummerges += 1
1729 1725 size = r.length(rev)
1730 1726 if delta == nullrev:
1731 1727 chainlengths.append(0)
1732 1728 numfull += 1
1733 1729 addsize(size, fullsize)
1734 1730 else:
1735 1731 chainlengths.append(chainlengths[delta] + 1)
1736 1732 addsize(size, deltasize)
1737 1733 if delta == rev - 1:
1738 1734 numprev += 1
1739 1735 if delta == p1:
1740 1736 nump1prev += 1
1741 1737 elif delta == p2:
1742 1738 nump2prev += 1
1743 1739 elif delta == p1:
1744 1740 nump1 += 1
1745 1741 elif delta == p2:
1746 1742 nump2 += 1
1747 1743 elif delta != nullrev:
1748 1744 numother += 1
1749 1745
1750 1746 # Obtain data on the raw chunks in the revlog.
1751 1747 segment = r._getsegmentforrevs(rev, rev)[1]
1752 1748 if segment:
1753 1749 chunktype = segment[0]
1754 1750 else:
1755 1751 chunktype = 'empty'
1756 1752
1757 1753 if chunktype not in chunktypecounts:
1758 1754 chunktypecounts[chunktype] = 0
1759 1755 chunktypesizes[chunktype] = 0
1760 1756
1761 1757 chunktypecounts[chunktype] += 1
1762 1758 chunktypesizes[chunktype] += size
1763 1759
1764 1760 # Adjust size min value for empty cases
1765 1761 for size in (datasize, fullsize, deltasize):
1766 1762 if size[0] is None:
1767 1763 size[0] = 0
1768 1764
1769 1765 numdeltas = numrevs - numfull
1770 1766 numoprev = numprev - nump1prev - nump2prev
1771 1767 totalrawsize = datasize[2]
1772 1768 datasize[2] /= numrevs
1773 1769 fulltotal = fullsize[2]
1774 1770 fullsize[2] /= numfull
1775 1771 deltatotal = deltasize[2]
1776 1772 if numrevs - numfull > 0:
1777 1773 deltasize[2] /= numrevs - numfull
1778 1774 totalsize = fulltotal + deltatotal
1779 1775 avgchainlen = sum(chainlengths) / numrevs
1780 1776 maxchainlen = max(chainlengths)
1781 1777 compratio = 1
1782 1778 if totalsize:
1783 1779 compratio = totalrawsize / totalsize
1784 1780
1785 1781 basedfmtstr = '%%%dd\n'
1786 1782 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
1787 1783
1788 1784 def dfmtstr(max):
1789 1785 return basedfmtstr % len(str(max))
1790 1786 def pcfmtstr(max, padding=0):
1791 1787 return basepcfmtstr % (len(str(max)), ' ' * padding)
1792 1788
1793 1789 def pcfmt(value, total):
1794 1790 if total:
1795 1791 return (value, 100 * float(value) / total)
1796 1792 else:
1797 1793 return value, 100.0
1798 1794
1799 1795 ui.write(('format : %d\n') % format)
1800 1796 ui.write(('flags : %s\n') % ', '.join(flags))
1801 1797
1802 1798 ui.write('\n')
1803 1799 fmt = pcfmtstr(totalsize)
1804 1800 fmt2 = dfmtstr(totalsize)
1805 1801 ui.write(('revisions : ') + fmt2 % numrevs)
1806 1802 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
1807 1803 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
1808 1804 ui.write(('revisions : ') + fmt2 % numrevs)
1809 1805 ui.write((' full : ') + fmt % pcfmt(numfull, numrevs))
1810 1806 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
1811 1807 ui.write(('revision size : ') + fmt2 % totalsize)
1812 1808 ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize))
1813 1809 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
1814 1810
1815 1811 def fmtchunktype(chunktype):
1816 1812 if chunktype == 'empty':
1817 1813 return ' %s : ' % chunktype
1818 1814 elif chunktype in string.ascii_letters:
1819 1815 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
1820 1816 else:
1821 1817 return ' 0x%s : ' % hex(chunktype)
1822 1818
1823 1819 ui.write('\n')
1824 1820 ui.write(('chunks : ') + fmt2 % numrevs)
1825 1821 for chunktype in sorted(chunktypecounts):
1826 1822 ui.write(fmtchunktype(chunktype))
1827 1823 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
1828 1824 ui.write(('chunks size : ') + fmt2 % totalsize)
1829 1825 for chunktype in sorted(chunktypecounts):
1830 1826 ui.write(fmtchunktype(chunktype))
1831 1827 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
1832 1828
1833 1829 ui.write('\n')
1834 1830 fmt = dfmtstr(max(avgchainlen, compratio))
1835 1831 ui.write(('avg chain length : ') + fmt % avgchainlen)
1836 1832 ui.write(('max chain length : ') + fmt % maxchainlen)
1837 1833 ui.write(('compression ratio : ') + fmt % compratio)
1838 1834
1839 1835 if format > 0:
1840 1836 ui.write('\n')
1841 1837 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
1842 1838 % tuple(datasize))
1843 1839 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
1844 1840 % tuple(fullsize))
1845 1841 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
1846 1842 % tuple(deltasize))
1847 1843
1848 1844 if numdeltas > 0:
1849 1845 ui.write('\n')
1850 1846 fmt = pcfmtstr(numdeltas)
1851 1847 fmt2 = pcfmtstr(numdeltas, 4)
1852 1848 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
1853 1849 if numprev > 0:
1854 1850 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
1855 1851 numprev))
1856 1852 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
1857 1853 numprev))
1858 1854 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
1859 1855 numprev))
1860 1856 if gdelta:
1861 1857 ui.write(('deltas against p1 : ')
1862 1858 + fmt % pcfmt(nump1, numdeltas))
1863 1859 ui.write(('deltas against p2 : ')
1864 1860 + fmt % pcfmt(nump2, numdeltas))
1865 1861 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
1866 1862 numdeltas))
1867 1863
1868 1864 @command('debugrevspec',
1869 1865 [('', 'optimize', None,
1870 1866 _('print parsed tree after optimizing (DEPRECATED)')),
1871 1867 ('p', 'show-stage', [],
1872 1868 _('print parsed tree at the given stage'), _('NAME')),
1873 1869 ('', 'no-optimized', False, _('evaluate tree without optimization')),
1874 1870 ('', 'verify-optimized', False, _('verify optimized result')),
1875 1871 ],
1876 1872 ('REVSPEC'))
1877 1873 def debugrevspec(ui, repo, expr, **opts):
1878 1874 """parse and apply a revision specification
1879 1875
1880 1876 Use -p/--show-stage option to print the parsed tree at the given stages.
1881 1877 Use -p all to print tree at every stage.
1882 1878
1883 1879 Use --verify-optimized to compare the optimized result with the unoptimized
1884 1880 one. Returns 1 if the optimized result differs.
1885 1881 """
1886 1882 stages = [
1887 1883 ('parsed', lambda tree: tree),
1888 1884 ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)),
1889 1885 ('concatenated', revsetlang.foldconcat),
1890 1886 ('analyzed', revsetlang.analyze),
1891 1887 ('optimized', revsetlang.optimize),
1892 1888 ]
1893 1889 if opts['no_optimized']:
1894 1890 stages = stages[:-1]
1895 1891 if opts['verify_optimized'] and opts['no_optimized']:
1896 1892 raise error.Abort(_('cannot use --verify-optimized with '
1897 1893 '--no-optimized'))
1898 1894 stagenames = set(n for n, f in stages)
1899 1895
1900 1896 showalways = set()
1901 1897 showchanged = set()
1902 1898 if ui.verbose and not opts['show_stage']:
1903 1899 # show parsed tree by --verbose (deprecated)
1904 1900 showalways.add('parsed')
1905 1901 showchanged.update(['expanded', 'concatenated'])
1906 1902 if opts['optimize']:
1907 1903 showalways.add('optimized')
1908 1904 if opts['show_stage'] and opts['optimize']:
1909 1905 raise error.Abort(_('cannot use --optimize with --show-stage'))
1910 1906 if opts['show_stage'] == ['all']:
1911 1907 showalways.update(stagenames)
1912 1908 else:
1913 1909 for n in opts['show_stage']:
1914 1910 if n not in stagenames:
1915 1911 raise error.Abort(_('invalid stage name: %s') % n)
1916 1912 showalways.update(opts['show_stage'])
1917 1913
1918 1914 treebystage = {}
1919 1915 printedtree = None
1920 1916 tree = revsetlang.parse(expr, lookup=repo.__contains__)
1921 1917 for n, f in stages:
1922 1918 treebystage[n] = tree = f(tree)
1923 1919 if n in showalways or (n in showchanged and tree != printedtree):
1924 1920 if opts['show_stage'] or n != 'parsed':
1925 1921 ui.write(("* %s:\n") % n)
1926 1922 ui.write(revsetlang.prettyformat(tree), "\n")
1927 1923 printedtree = tree
1928 1924
1929 1925 if opts['verify_optimized']:
1930 1926 arevs = revset.makematcher(treebystage['analyzed'])(repo)
1931 1927 brevs = revset.makematcher(treebystage['optimized'])(repo)
1932 1928 if ui.verbose:
1933 1929 ui.note(("* analyzed set:\n"), smartset.prettyformat(arevs), "\n")
1934 1930 ui.note(("* optimized set:\n"), smartset.prettyformat(brevs), "\n")
1935 1931 arevs = list(arevs)
1936 1932 brevs = list(brevs)
1937 1933 if arevs == brevs:
1938 1934 return 0
1939 1935 ui.write(('--- analyzed\n'), label='diff.file_a')
1940 1936 ui.write(('+++ optimized\n'), label='diff.file_b')
1941 1937 sm = difflib.SequenceMatcher(None, arevs, brevs)
1942 1938 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1943 1939 if tag in ('delete', 'replace'):
1944 1940 for c in arevs[alo:ahi]:
1945 1941 ui.write('-%s\n' % c, label='diff.deleted')
1946 1942 if tag in ('insert', 'replace'):
1947 1943 for c in brevs[blo:bhi]:
1948 1944 ui.write('+%s\n' % c, label='diff.inserted')
1949 1945 if tag == 'equal':
1950 1946 for c in arevs[alo:ahi]:
1951 1947 ui.write(' %s\n' % c)
1952 1948 return 1
1953 1949
1954 1950 func = revset.makematcher(tree)
1955 1951 revs = func(repo)
1956 1952 if ui.verbose:
1957 1953 ui.note(("* set:\n"), smartset.prettyformat(revs), "\n")
1958 1954 for c in revs:
1959 1955 ui.write("%s\n" % c)
1960 1956
1961 1957 @command('debugsetparents', [], _('REV1 [REV2]'))
1962 1958 def debugsetparents(ui, repo, rev1, rev2=None):
1963 1959 """manually set the parents of the current working directory
1964 1960
1965 1961 This is useful for writing repository conversion tools, but should
1966 1962 be used with care. For example, neither the working directory nor the
1967 1963 dirstate is updated, so file status may be incorrect after running this
1968 1964 command.
1969 1965
1970 1966 Returns 0 on success.
1971 1967 """
1972 1968
1973 1969 r1 = scmutil.revsingle(repo, rev1).node()
1974 1970 r2 = scmutil.revsingle(repo, rev2, 'null').node()
1975 1971
1976 1972 with repo.wlock():
1977 1973 repo.setparents(r1, r2)
1978 1974
1979 1975 @command('debugsub',
1980 1976 [('r', 'rev', '',
1981 1977 _('revision to check'), _('REV'))],
1982 1978 _('[-r REV] [REV]'))
1983 1979 def debugsub(ui, repo, rev=None):
1984 1980 ctx = scmutil.revsingle(repo, rev, None)
1985 1981 for k, v in sorted(ctx.substate.items()):
1986 1982 ui.write(('path %s\n') % k)
1987 1983 ui.write((' source %s\n') % v[0])
1988 1984 ui.write((' revision %s\n') % v[1])
1989 1985
1990 1986 @command('debugsuccessorssets',
1991 1987 [],
1992 1988 _('[REV]'))
1993 1989 def debugsuccessorssets(ui, repo, *revs):
1994 1990 """show set of successors for revision
1995 1991
1996 1992 A successors set of changeset A is a consistent group of revisions that
1997 1993 succeed A. It contains non-obsolete changesets only.
1998 1994
1999 1995 In most cases a changeset A has a single successors set containing a single
2000 1996 successor (changeset A replaced by A').
2001 1997
2002 1998 A changeset that is made obsolete with no successors are called "pruned".
2003 1999 Such changesets have no successors sets at all.
2004 2000
2005 2001 A changeset that has been "split" will have a successors set containing
2006 2002 more than one successor.
2007 2003
2008 2004 A changeset that has been rewritten in multiple different ways is called
2009 2005 "divergent". Such changesets have multiple successor sets (each of which
2010 2006 may also be split, i.e. have multiple successors).
2011 2007
2012 2008 Results are displayed as follows::
2013 2009
2014 2010 <rev1>
2015 2011 <successors-1A>
2016 2012 <rev2>
2017 2013 <successors-2A>
2018 2014 <successors-2B1> <successors-2B2> <successors-2B3>
2019 2015
2020 2016 Here rev2 has two possible (i.e. divergent) successors sets. The first
2021 2017 holds one element, whereas the second holds three (i.e. the changeset has
2022 2018 been split).
2023 2019 """
2024 2020 # passed to successorssets caching computation from one call to another
2025 2021 cache = {}
2026 2022 ctx2str = str
2027 2023 node2str = short
2028 2024 if ui.debug():
2029 2025 def ctx2str(ctx):
2030 2026 return ctx.hex()
2031 2027 node2str = hex
2032 2028 for rev in scmutil.revrange(repo, revs):
2033 2029 ctx = repo[rev]
2034 2030 ui.write('%s\n'% ctx2str(ctx))
2035 2031 for succsset in obsolete.successorssets(repo, ctx.node(), cache):
2036 2032 if succsset:
2037 2033 ui.write(' ')
2038 2034 ui.write(node2str(succsset[0]))
2039 2035 for node in succsset[1:]:
2040 2036 ui.write(' ')
2041 2037 ui.write(node2str(node))
2042 2038 ui.write('\n')
2043 2039
2044 2040 @command('debugtemplate',
2045 2041 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2046 2042 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2047 2043 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2048 2044 optionalrepo=True)
2049 2045 def debugtemplate(ui, repo, tmpl, **opts):
2050 2046 """parse and apply a template
2051 2047
2052 2048 If -r/--rev is given, the template is processed as a log template and
2053 2049 applied to the given changesets. Otherwise, it is processed as a generic
2054 2050 template.
2055 2051
2056 2052 Use --verbose to print the parsed tree.
2057 2053 """
2058 2054 revs = None
2059 2055 if opts['rev']:
2060 2056 if repo is None:
2061 2057 raise error.RepoError(_('there is no Mercurial repository here '
2062 2058 '(.hg not found)'))
2063 2059 revs = scmutil.revrange(repo, opts['rev'])
2064 2060
2065 2061 props = {}
2066 2062 for d in opts['define']:
2067 2063 try:
2068 2064 k, v = (e.strip() for e in d.split('=', 1))
2069 2065 if not k or k == 'ui':
2070 2066 raise ValueError
2071 2067 props[k] = v
2072 2068 except ValueError:
2073 2069 raise error.Abort(_('malformed keyword definition: %s') % d)
2074 2070
2075 2071 if ui.verbose:
2076 2072 aliases = ui.configitems('templatealias')
2077 2073 tree = templater.parse(tmpl)
2078 2074 ui.note(templater.prettyformat(tree), '\n')
2079 2075 newtree = templater.expandaliases(tree, aliases)
2080 2076 if newtree != tree:
2081 2077 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2082 2078
2083 2079 mapfile = None
2084 2080 if revs is None:
2085 2081 k = 'debugtemplate'
2086 2082 t = formatter.maketemplater(ui, k, tmpl)
2087 2083 ui.write(templater.stringify(t(k, ui=ui, **props)))
2088 2084 else:
2089 2085 displayer = cmdutil.changeset_templater(ui, repo, None, opts, tmpl,
2090 2086 mapfile, buffered=False)
2091 2087 for r in revs:
2092 2088 displayer.show(repo[r], **props)
2093 2089 displayer.close()
2094 2090
2095 2091 @command('debugupdatecaches', [])
2096 2092 def debugupdatecaches(ui, repo, *pats, **opts):
2097 2093 """warm all known caches in the repository"""
2098 2094 with repo.wlock():
2099 2095 with repo.lock():
2100 2096 repo.updatecaches()
2101 2097
2102 2098 @command('debugupgraderepo', [
2103 2099 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2104 2100 ('', 'run', False, _('performs an upgrade')),
2105 2101 ])
2106 2102 def debugupgraderepo(ui, repo, run=False, optimize=None):
2107 2103 """upgrade a repository to use different features
2108 2104
2109 2105 If no arguments are specified, the repository is evaluated for upgrade
2110 2106 and a list of problems and potential optimizations is printed.
2111 2107
2112 2108 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2113 2109 can be influenced via additional arguments. More details will be provided
2114 2110 by the command output when run without ``--run``.
2115 2111
2116 2112 During the upgrade, the repository will be locked and no writes will be
2117 2113 allowed.
2118 2114
2119 2115 At the end of the upgrade, the repository may not be readable while new
2120 2116 repository data is swapped in. This window will be as long as it takes to
2121 2117 rename some directories inside the ``.hg`` directory. On most machines, this
2122 2118 should complete almost instantaneously and the chances of a consumer being
2123 2119 unable to access the repository should be low.
2124 2120 """
2125 2121 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
2126 2122
2127 2123 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2128 2124 inferrepo=True)
2129 2125 def debugwalk(ui, repo, *pats, **opts):
2130 2126 """show how files match on given patterns"""
2131 2127 m = scmutil.match(repo[None], pats, opts)
2132 2128 items = list(repo[None].walk(m))
2133 2129 if not items:
2134 2130 return
2135 2131 f = lambda fn: fn
2136 2132 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2137 2133 f = lambda fn: util.normpath(fn)
2138 2134 fmt = 'f %%-%ds %%-%ds %%s' % (
2139 2135 max([len(abs) for abs in items]),
2140 2136 max([len(m.rel(abs)) for abs in items]))
2141 2137 for abs in items:
2142 2138 line = fmt % (abs, f(m.rel(abs)), m.exact(abs) and 'exact' or '')
2143 2139 ui.write("%s\n" % line.rstrip())
2144 2140
2145 2141 @command('debugwireargs',
2146 2142 [('', 'three', '', 'three'),
2147 2143 ('', 'four', '', 'four'),
2148 2144 ('', 'five', '', 'five'),
2149 2145 ] + cmdutil.remoteopts,
2150 2146 _('REPO [OPTIONS]... [ONE [TWO]]'),
2151 2147 norepo=True)
2152 2148 def debugwireargs(ui, repopath, *vals, **opts):
2153 2149 repo = hg.peer(ui, opts, repopath)
2154 2150 for opt in cmdutil.remoteopts:
2155 2151 del opts[opt[1]]
2156 2152 args = {}
2157 2153 for k, v in opts.iteritems():
2158 2154 if v:
2159 2155 args[k] = v
2160 2156 # run twice to check that we don't mess up the stream for the next command
2161 2157 res1 = repo.debugwireargs(*vals, **args)
2162 2158 res2 = repo.debugwireargs(*vals, **args)
2163 2159 ui.write("%s\n" % res1)
2164 2160 if res1 != res2:
2165 2161 ui.warn("%s\n" % res2)
@@ -1,788 +1,796 b''
1 1 # match.py - filename matching
2 2 #
3 3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy
11 11 import os
12 12 import re
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 error,
17 17 pathutil,
18 18 util,
19 19 )
20 20
21 21 propertycache = util.propertycache
22 22
23 23 def _rematcher(regex):
24 24 '''compile the regexp with the best available regexp engine and return a
25 25 matcher function'''
26 26 m = util.re.compile(regex)
27 27 try:
28 28 # slightly faster, provided by facebook's re2 bindings
29 29 return m.test_match
30 30 except AttributeError:
31 31 return m.match
32 32
33 33 def _expandsets(kindpats, ctx, listsubrepos):
34 34 '''Returns the kindpats list with the 'set' patterns expanded.'''
35 35 fset = set()
36 36 other = []
37 37
38 38 for kind, pat, source in kindpats:
39 39 if kind == 'set':
40 40 if not ctx:
41 41 raise error.Abort(_("fileset expression with no context"))
42 42 s = ctx.getfileset(pat)
43 43 fset.update(s)
44 44
45 45 if listsubrepos:
46 46 for subpath in ctx.substate:
47 47 s = ctx.sub(subpath).getfileset(pat)
48 48 fset.update(subpath + '/' + f for f in s)
49 49
50 50 continue
51 51 other.append((kind, pat, source))
52 52 return fset, other
53 53
54 54 def _expandsubinclude(kindpats, root):
55 55 '''Returns the list of subinclude matcher args and the kindpats without the
56 56 subincludes in it.'''
57 57 relmatchers = []
58 58 other = []
59 59
60 60 for kind, pat, source in kindpats:
61 61 if kind == 'subinclude':
62 62 sourceroot = pathutil.dirname(util.normpath(source))
63 63 pat = util.pconvert(pat)
64 64 path = pathutil.join(sourceroot, pat)
65 65
66 66 newroot = pathutil.dirname(path)
67 67 matcherargs = (newroot, '', [], ['include:%s' % path])
68 68
69 69 prefix = pathutil.canonpath(root, root, newroot)
70 70 if prefix:
71 71 prefix += '/'
72 72 relmatchers.append((prefix, matcherargs))
73 73 else:
74 74 other.append((kind, pat, source))
75 75
76 76 return relmatchers, other
77 77
78 78 def _kindpatsalwaysmatch(kindpats):
79 79 """"Checks whether the kindspats match everything, as e.g.
80 80 'relpath:.' does.
81 81 """
82 82 for kind, pat, source in kindpats:
83 83 if pat != '' or kind not in ['relpath', 'glob']:
84 84 return False
85 85 return True
86 86
87 87 def match(root, cwd, patterns, include=None, exclude=None, default='glob',
88 88 exact=False, auditor=None, ctx=None, listsubrepos=False, warn=None,
89 89 badfn=None, icasefs=False):
90 90 """build an object to match a set of file patterns
91 91
92 92 arguments:
93 93 root - the canonical root of the tree you're matching against
94 94 cwd - the current working directory, if relevant
95 95 patterns - patterns to find
96 96 include - patterns to include (unless they are excluded)
97 97 exclude - patterns to exclude (even if they are included)
98 98 default - if a pattern in patterns has no explicit type, assume this one
99 99 exact - patterns are actually filenames (include/exclude still apply)
100 100 warn - optional function used for printing warnings
101 101 badfn - optional bad() callback for this matcher instead of the default
102 102 icasefs - make a matcher for wdir on case insensitive filesystems, which
103 103 normalizes the given patterns to the case in the filesystem
104 104
105 105 a pattern is one of:
106 106 'glob:<glob>' - a glob relative to cwd
107 107 're:<regexp>' - a regular expression
108 108 'path:<path>' - a path relative to repository root, which is matched
109 109 recursively
110 110 'rootfilesin:<path>' - a path relative to repository root, which is
111 111 matched non-recursively (will not match subdirectories)
112 112 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
113 113 'relpath:<path>' - a path relative to cwd
114 114 'relre:<regexp>' - a regexp that needn't match the start of a name
115 115 'set:<fileset>' - a fileset expression
116 116 'include:<path>' - a file of patterns to read and include
117 117 'subinclude:<path>' - a file of patterns to match against files under
118 118 the same directory
119 119 '<something>' - a pattern of the specified default type
120 120 """
121 121 normalize = _donormalize
122 122 if icasefs:
123 123 dirstate = ctx.repo().dirstate
124 124 dsnormalize = dirstate.normalize
125 125
126 126 def normalize(patterns, default, root, cwd, auditor, warn):
127 127 kp = _donormalize(patterns, default, root, cwd, auditor, warn)
128 128 kindpats = []
129 129 for kind, pats, source in kp:
130 130 if kind not in ('re', 'relre'): # regex can't be normalized
131 131 p = pats
132 132 pats = dsnormalize(pats)
133 133
134 134 # Preserve the original to handle a case only rename.
135 135 if p != pats and p in dirstate:
136 136 kindpats.append((kind, p, source))
137 137
138 138 kindpats.append((kind, pats, source))
139 139 return kindpats
140 140
141 141 return matcher(root, cwd, normalize, patterns, include=include,
142 142 exclude=exclude, default=default, exact=exact,
143 143 auditor=auditor, ctx=ctx, listsubrepos=listsubrepos,
144 144 warn=warn, badfn=badfn)
145 145
146 146 def exact(root, cwd, files, badfn=None):
147 147 return match(root, cwd, files, exact=True, badfn=badfn)
148 148
149 149 def always(root, cwd):
150 150 return match(root, cwd, [])
151 151
152 152 def badmatch(match, badfn):
153 153 """Make a copy of the given matcher, replacing its bad method with the given
154 154 one.
155 155 """
156 156 m = copy.copy(match)
157 157 m.bad = badfn
158 158 return m
159 159
160 160 def _donormalize(patterns, default, root, cwd, auditor, warn):
161 161 '''Convert 'kind:pat' from the patterns list to tuples with kind and
162 162 normalized and rooted patterns and with listfiles expanded.'''
163 163 kindpats = []
164 164 for kind, pat in [_patsplit(p, default) for p in patterns]:
165 165 if kind in ('glob', 'relpath'):
166 166 pat = pathutil.canonpath(root, cwd, pat, auditor)
167 167 elif kind in ('relglob', 'path', 'rootfilesin'):
168 168 pat = util.normpath(pat)
169 169 elif kind in ('listfile', 'listfile0'):
170 170 try:
171 171 files = util.readfile(pat)
172 172 if kind == 'listfile0':
173 173 files = files.split('\0')
174 174 else:
175 175 files = files.splitlines()
176 176 files = [f for f in files if f]
177 177 except EnvironmentError:
178 178 raise error.Abort(_("unable to read file list (%s)") % pat)
179 179 for k, p, source in _donormalize(files, default, root, cwd,
180 180 auditor, warn):
181 181 kindpats.append((k, p, pat))
182 182 continue
183 183 elif kind == 'include':
184 184 try:
185 185 fullpath = os.path.join(root, util.localpath(pat))
186 186 includepats = readpatternfile(fullpath, warn)
187 187 for k, p, source in _donormalize(includepats, default,
188 188 root, cwd, auditor, warn):
189 189 kindpats.append((k, p, source or pat))
190 190 except error.Abort as inst:
191 191 raise error.Abort('%s: %s' % (pat, inst[0]))
192 192 except IOError as inst:
193 193 if warn:
194 194 warn(_("skipping unreadable pattern file '%s': %s\n") %
195 195 (pat, inst.strerror))
196 196 continue
197 197 # else: re or relre - which cannot be normalized
198 198 kindpats.append((kind, pat, ''))
199 199 return kindpats
200 200
201 201 class matcher(object):
202 202
203 203 def __init__(self, root, cwd, normalize, patterns, include=None,
204 204 exclude=None, default='glob', exact=False, auditor=None,
205 205 ctx=None, listsubrepos=False, warn=None, badfn=None):
206 206 if include is None:
207 207 include = []
208 208 if exclude is None:
209 209 exclude = []
210 210
211 211 self._root = root
212 212 self._cwd = cwd
213 213 self._files = [] # exact files and roots of patterns
214 214 self._anypats = bool(include or exclude)
215 215 self._always = False
216 216 self._pathrestricted = bool(include or exclude or patterns)
217 self.patternspat = None
218 self.includepat = None
219 self.excludepat = None
217 220
218 221 # roots are directories which are recursively included/excluded.
219 222 self._includeroots = set()
220 223 self._excluderoots = set()
221 224 # dirs are directories which are non-recursively included.
222 225 self._includedirs = set()
223 226
224 227 if badfn is not None:
225 228 self.bad = badfn
226 229
227 230 matchfns = []
228 231 if include:
229 232 kindpats = normalize(include, 'glob', root, cwd, auditor, warn)
230 233 self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)',
231 234 listsubrepos, root)
232 235 roots, dirs = _rootsanddirs(kindpats)
233 236 self._includeroots.update(roots)
234 237 self._includedirs.update(dirs)
235 238 matchfns.append(im)
236 239 if exclude:
237 240 kindpats = normalize(exclude, 'glob', root, cwd, auditor, warn)
238 241 self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)',
239 242 listsubrepos, root)
240 243 if not _anypats(kindpats):
241 244 # Only consider recursive excludes as such - if a non-recursive
242 245 # exclude is used, we must still recurse into the excluded
243 246 # directory, at least to find subdirectories. In such a case,
244 247 # the regex still won't match the non-recursively-excluded
245 248 # files.
246 249 self._excluderoots.update(_roots(kindpats))
247 250 matchfns.append(lambda f: not em(f))
248 251 if exact:
249 252 if isinstance(patterns, list):
250 253 self._files = patterns
251 254 else:
252 255 self._files = list(patterns)
253 256 matchfns.append(self.exact)
254 257 elif patterns:
255 258 kindpats = normalize(patterns, default, root, cwd, auditor, warn)
256 259 if not _kindpatsalwaysmatch(kindpats):
257 260 self._files = _explicitfiles(kindpats)
258 261 self._anypats = self._anypats or _anypats(kindpats)
259 262 self.patternspat, pm = _buildmatch(ctx, kindpats, '$',
260 263 listsubrepos, root)
261 264 matchfns.append(pm)
262 265
263 266 if not matchfns:
264 267 m = util.always
265 268 self._always = True
266 269 elif len(matchfns) == 1:
267 270 m = matchfns[0]
268 271 else:
269 272 def m(f):
270 273 for matchfn in matchfns:
271 274 if not matchfn(f):
272 275 return False
273 276 return True
274 277
275 278 self.matchfn = m
276 279
277 280 def __call__(self, fn):
278 281 return self.matchfn(fn)
279 282 def __iter__(self):
280 283 for f in self._files:
281 284 yield f
282 285
283 286 # Callbacks related to how the matcher is used by dirstate.walk.
284 287 # Subscribers to these events must monkeypatch the matcher object.
285 288 def bad(self, f, msg):
286 289 '''Callback from dirstate.walk for each explicit file that can't be
287 290 found/accessed, with an error message.'''
288 291 pass
289 292
290 293 # If an explicitdir is set, it will be called when an explicitly listed
291 294 # directory is visited.
292 295 explicitdir = None
293 296
294 297 # If an traversedir is set, it will be called when a directory discovered
295 298 # by recursive traversal is visited.
296 299 traversedir = None
297 300
298 301 def abs(self, f):
299 302 '''Convert a repo path back to path that is relative to the root of the
300 303 matcher.'''
301 304 return f
302 305
303 306 def rel(self, f):
304 307 '''Convert repo path back to path that is relative to cwd of matcher.'''
305 308 return util.pathto(self._root, self._cwd, f)
306 309
307 310 def uipath(self, f):
308 311 '''Convert repo path to a display path. If patterns or -I/-X were used
309 312 to create this matcher, the display path will be relative to cwd.
310 313 Otherwise it is relative to the root of the repo.'''
311 314 return (self._pathrestricted and self.rel(f)) or self.abs(f)
312 315
313 316 def files(self):
314 317 '''Explicitly listed files or patterns or roots:
315 318 if no patterns or .always(): empty list,
316 319 if exact: list exact files,
317 320 if not .anypats(): list all files and dirs,
318 321 else: optimal roots'''
319 322 return self._files
320 323
321 324 @propertycache
322 325 def _fileset(self):
323 326 return set(self._files)
324 327
325 328 @propertycache
326 329 def _dirs(self):
327 330 return set(util.dirs(self._fileset)) | {'.'}
328 331
329 332 def visitdir(self, dir):
330 333 '''Decides whether a directory should be visited based on whether it
331 334 has potential matches in it or one of its subdirectories. This is
332 335 based on the match's primary, included, and excluded patterns.
333 336
334 337 Returns the string 'all' if the given directory and all subdirectories
335 338 should be visited. Otherwise returns True or False indicating whether
336 339 the given directory should be visited.
337 340
338 341 This function's behavior is undefined if it has returned False for
339 342 one of the dir's parent directories.
340 343 '''
341 344 if self.prefix() and dir in self._fileset:
342 345 return 'all'
343 346 if dir in self._excluderoots:
344 347 return False
345 348 if ((self._includeroots or self._includedirs) and
346 349 '.' not in self._includeroots and
347 350 dir not in self._includeroots and
348 351 dir not in self._includedirs and
349 352 not any(parent in self._includeroots
350 353 for parent in util.finddirs(dir))):
351 354 return False
352 355 return (not self._fileset or
353 356 '.' in self._fileset or
354 357 dir in self._fileset or
355 358 dir in self._dirs or
356 359 any(parentdir in self._fileset
357 360 for parentdir in util.finddirs(dir)))
358 361
359 362 def exact(self, f):
360 363 '''Returns True if f is in .files().'''
361 364 return f in self._fileset
362 365
363 366 def anypats(self):
364 367 '''Matcher uses patterns or include/exclude.'''
365 368 return self._anypats
366 369
367 370 def always(self):
368 371 '''Matcher will match everything and .files() will be empty
369 372 - optimization might be possible and necessary.'''
370 373 return self._always
371 374
372 375 def isexact(self):
373 376 return self.matchfn == self.exact
374 377
375 378 def prefix(self):
376 379 return not self.always() and not self.isexact() and not self.anypats()
377 380
381 def __repr__(self):
382 return ('<matcher files=%r, patterns=%r, includes=%r, excludes=%r>' %
383 (self._files, self.patternspat, self.includepat,
384 self.excludepat))
385
378 386 class subdirmatcher(matcher):
379 387 """Adapt a matcher to work on a subdirectory only.
380 388
381 389 The paths are remapped to remove/insert the path as needed:
382 390
383 391 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
384 392 >>> m2 = subdirmatcher('sub', m1)
385 393 >>> bool(m2('a.txt'))
386 394 False
387 395 >>> bool(m2('b.txt'))
388 396 True
389 397 >>> bool(m2.matchfn('a.txt'))
390 398 False
391 399 >>> bool(m2.matchfn('b.txt'))
392 400 True
393 401 >>> m2.files()
394 402 ['b.txt']
395 403 >>> m2.exact('b.txt')
396 404 True
397 405 >>> util.pconvert(m2.rel('b.txt'))
398 406 'sub/b.txt'
399 407 >>> def bad(f, msg):
400 408 ... print "%s: %s" % (f, msg)
401 409 >>> m1.bad = bad
402 410 >>> m2.bad('x.txt', 'No such file')
403 411 sub/x.txt: No such file
404 412 >>> m2.abs('c.txt')
405 413 'sub/c.txt'
406 414 """
407 415
408 416 def __init__(self, path, matcher):
409 417 self._root = matcher._root
410 418 self._cwd = matcher._cwd
411 419 self._path = path
412 420 self._matcher = matcher
413 421 self._always = matcher._always
414 422
415 423 self._files = [f[len(path) + 1:] for f in matcher._files
416 424 if f.startswith(path + "/")]
417 425
418 426 # If the parent repo had a path to this subrepo and the matcher is
419 427 # a prefix matcher, this submatcher always matches.
420 428 if matcher.prefix():
421 429 self._always = any(f == path for f in matcher._files)
422 430
423 431 self._anypats = matcher._anypats
424 432 # Some information is lost in the superclass's constructor, so we
425 433 # can not accurately create the matching function for the subdirectory
426 434 # from the inputs. Instead, we override matchfn() and visitdir() to
427 435 # call the original matcher with the subdirectory path prepended.
428 436 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
429 437
430 438 def bad(self, f, msg):
431 439 self._matcher.bad(self._path + "/" + f, msg)
432 440
433 441 def abs(self, f):
434 442 return self._matcher.abs(self._path + "/" + f)
435 443
436 444 def rel(self, f):
437 445 return self._matcher.rel(self._path + "/" + f)
438 446
439 447 def uipath(self, f):
440 448 return self._matcher.uipath(self._path + "/" + f)
441 449
442 450 def visitdir(self, dir):
443 451 if dir == '.':
444 452 dir = self._path
445 453 else:
446 454 dir = self._path + "/" + dir
447 455 return self._matcher.visitdir(dir)
448 456
449 457 def patkind(pattern, default=None):
450 458 '''If pattern is 'kind:pat' with a known kind, return kind.'''
451 459 return _patsplit(pattern, default)[0]
452 460
453 461 def _patsplit(pattern, default):
454 462 """Split a string into the optional pattern kind prefix and the actual
455 463 pattern."""
456 464 if ':' in pattern:
457 465 kind, pat = pattern.split(':', 1)
458 466 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
459 467 'listfile', 'listfile0', 'set', 'include', 'subinclude',
460 468 'rootfilesin'):
461 469 return kind, pat
462 470 return default, pattern
463 471
464 472 def _globre(pat):
465 473 r'''Convert an extended glob string to a regexp string.
466 474
467 475 >>> print _globre(r'?')
468 476 .
469 477 >>> print _globre(r'*')
470 478 [^/]*
471 479 >>> print _globre(r'**')
472 480 .*
473 481 >>> print _globre(r'**/a')
474 482 (?:.*/)?a
475 483 >>> print _globre(r'a/**/b')
476 484 a\/(?:.*/)?b
477 485 >>> print _globre(r'[a*?!^][^b][!c]')
478 486 [a*?!^][\^b][^c]
479 487 >>> print _globre(r'{a,b}')
480 488 (?:a|b)
481 489 >>> print _globre(r'.\*\?')
482 490 \.\*\?
483 491 '''
484 492 i, n = 0, len(pat)
485 493 res = ''
486 494 group = 0
487 495 escape = util.re.escape
488 496 def peek():
489 497 return i < n and pat[i:i + 1]
490 498 while i < n:
491 499 c = pat[i:i + 1]
492 500 i += 1
493 501 if c not in '*?[{},\\':
494 502 res += escape(c)
495 503 elif c == '*':
496 504 if peek() == '*':
497 505 i += 1
498 506 if peek() == '/':
499 507 i += 1
500 508 res += '(?:.*/)?'
501 509 else:
502 510 res += '.*'
503 511 else:
504 512 res += '[^/]*'
505 513 elif c == '?':
506 514 res += '.'
507 515 elif c == '[':
508 516 j = i
509 517 if j < n and pat[j:j + 1] in '!]':
510 518 j += 1
511 519 while j < n and pat[j:j + 1] != ']':
512 520 j += 1
513 521 if j >= n:
514 522 res += '\\['
515 523 else:
516 524 stuff = pat[i:j].replace('\\','\\\\')
517 525 i = j + 1
518 526 if stuff[0:1] == '!':
519 527 stuff = '^' + stuff[1:]
520 528 elif stuff[0:1] == '^':
521 529 stuff = '\\' + stuff
522 530 res = '%s[%s]' % (res, stuff)
523 531 elif c == '{':
524 532 group += 1
525 533 res += '(?:'
526 534 elif c == '}' and group:
527 535 res += ')'
528 536 group -= 1
529 537 elif c == ',' and group:
530 538 res += '|'
531 539 elif c == '\\':
532 540 p = peek()
533 541 if p:
534 542 i += 1
535 543 res += escape(p)
536 544 else:
537 545 res += escape(c)
538 546 else:
539 547 res += escape(c)
540 548 return res
541 549
542 550 def _regex(kind, pat, globsuffix):
543 551 '''Convert a (normalized) pattern of any kind into a regular expression.
544 552 globsuffix is appended to the regexp of globs.'''
545 553 if not pat:
546 554 return ''
547 555 if kind == 're':
548 556 return pat
549 557 if kind == 'path':
550 558 if pat == '.':
551 559 return ''
552 560 return '^' + util.re.escape(pat) + '(?:/|$)'
553 561 if kind == 'rootfilesin':
554 562 if pat == '.':
555 563 escaped = ''
556 564 else:
557 565 # Pattern is a directory name.
558 566 escaped = util.re.escape(pat) + '/'
559 567 # Anything after the pattern must be a non-directory.
560 568 return '^' + escaped + '[^/]+$'
561 569 if kind == 'relglob':
562 570 return '(?:|.*/)' + _globre(pat) + globsuffix
563 571 if kind == 'relpath':
564 572 return util.re.escape(pat) + '(?:/|$)'
565 573 if kind == 'relre':
566 574 if pat.startswith('^'):
567 575 return pat
568 576 return '.*' + pat
569 577 return _globre(pat) + globsuffix
570 578
571 579 def _buildmatch(ctx, kindpats, globsuffix, listsubrepos, root):
572 580 '''Return regexp string and a matcher function for kindpats.
573 581 globsuffix is appended to the regexp of globs.'''
574 582 matchfuncs = []
575 583
576 584 subincludes, kindpats = _expandsubinclude(kindpats, root)
577 585 if subincludes:
578 586 submatchers = {}
579 587 def matchsubinclude(f):
580 588 for prefix, matcherargs in subincludes:
581 589 if f.startswith(prefix):
582 590 mf = submatchers.get(prefix)
583 591 if mf is None:
584 592 mf = match(*matcherargs)
585 593 submatchers[prefix] = mf
586 594
587 595 if mf(f[len(prefix):]):
588 596 return True
589 597 return False
590 598 matchfuncs.append(matchsubinclude)
591 599
592 600 fset, kindpats = _expandsets(kindpats, ctx, listsubrepos)
593 601 if fset:
594 602 matchfuncs.append(fset.__contains__)
595 603
596 604 regex = ''
597 605 if kindpats:
598 606 regex, mf = _buildregexmatch(kindpats, globsuffix)
599 607 matchfuncs.append(mf)
600 608
601 609 if len(matchfuncs) == 1:
602 610 return regex, matchfuncs[0]
603 611 else:
604 612 return regex, lambda f: any(mf(f) for mf in matchfuncs)
605 613
606 614 def _buildregexmatch(kindpats, globsuffix):
607 615 """Build a match function from a list of kinds and kindpats,
608 616 return regexp string and a matcher function."""
609 617 try:
610 618 regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
611 619 for (k, p, s) in kindpats])
612 620 if len(regex) > 20000:
613 621 raise OverflowError
614 622 return regex, _rematcher(regex)
615 623 except OverflowError:
616 624 # We're using a Python with a tiny regex engine and we
617 625 # made it explode, so we'll divide the pattern list in two
618 626 # until it works
619 627 l = len(kindpats)
620 628 if l < 2:
621 629 raise
622 630 regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
623 631 regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
624 632 return regex, lambda s: a(s) or b(s)
625 633 except re.error:
626 634 for k, p, s in kindpats:
627 635 try:
628 636 _rematcher('(?:%s)' % _regex(k, p, globsuffix))
629 637 except re.error:
630 638 if s:
631 639 raise error.Abort(_("%s: invalid pattern (%s): %s") %
632 640 (s, k, p))
633 641 else:
634 642 raise error.Abort(_("invalid pattern (%s): %s") % (k, p))
635 643 raise error.Abort(_("invalid pattern"))
636 644
637 645 def _patternrootsanddirs(kindpats):
638 646 '''Returns roots and directories corresponding to each pattern.
639 647
640 648 This calculates the roots and directories exactly matching the patterns and
641 649 returns a tuple of (roots, dirs) for each. It does not return other
642 650 directories which may also need to be considered, like the parent
643 651 directories.
644 652 '''
645 653 r = []
646 654 d = []
647 655 for kind, pat, source in kindpats:
648 656 if kind == 'glob': # find the non-glob prefix
649 657 root = []
650 658 for p in pat.split('/'):
651 659 if '[' in p or '{' in p or '*' in p or '?' in p:
652 660 break
653 661 root.append(p)
654 662 r.append('/'.join(root) or '.')
655 663 elif kind in ('relpath', 'path'):
656 664 r.append(pat or '.')
657 665 elif kind in ('rootfilesin',):
658 666 d.append(pat or '.')
659 667 else: # relglob, re, relre
660 668 r.append('.')
661 669 return r, d
662 670
663 671 def _roots(kindpats):
664 672 '''Returns root directories to match recursively from the given patterns.'''
665 673 roots, dirs = _patternrootsanddirs(kindpats)
666 674 return roots
667 675
668 676 def _rootsanddirs(kindpats):
669 677 '''Returns roots and exact directories from patterns.
670 678
671 679 roots are directories to match recursively, whereas exact directories should
672 680 be matched non-recursively. The returned (roots, dirs) tuple will also
673 681 include directories that need to be implicitly considered as either, such as
674 682 parent directories.
675 683
676 684 >>> _rootsanddirs(\
677 685 [('glob', 'g/h/*', ''), ('glob', 'g/h', ''), ('glob', 'g*', '')])
678 686 (['g/h', 'g/h', '.'], ['g', '.'])
679 687 >>> _rootsanddirs(\
680 688 [('rootfilesin', 'g/h', ''), ('rootfilesin', '', '')])
681 689 ([], ['g/h', '.', 'g', '.'])
682 690 >>> _rootsanddirs(\
683 691 [('relpath', 'r', ''), ('path', 'p/p', ''), ('path', '', '')])
684 692 (['r', 'p/p', '.'], ['p', '.'])
685 693 >>> _rootsanddirs(\
686 694 [('relglob', 'rg*', ''), ('re', 're/', ''), ('relre', 'rr', '')])
687 695 (['.', '.', '.'], ['.'])
688 696 '''
689 697 r, d = _patternrootsanddirs(kindpats)
690 698
691 699 # Append the parents as non-recursive/exact directories, since they must be
692 700 # scanned to get to either the roots or the other exact directories.
693 701 d.extend(util.dirs(d))
694 702 d.extend(util.dirs(r))
695 703 # util.dirs() does not include the root directory, so add it manually
696 704 d.append('.')
697 705
698 706 return r, d
699 707
700 708 def _explicitfiles(kindpats):
701 709 '''Returns the potential explicit filenames from the patterns.
702 710
703 711 >>> _explicitfiles([('path', 'foo/bar', '')])
704 712 ['foo/bar']
705 713 >>> _explicitfiles([('rootfilesin', 'foo/bar', '')])
706 714 []
707 715 '''
708 716 # Keep only the pattern kinds where one can specify filenames (vs only
709 717 # directory names).
710 718 filable = [kp for kp in kindpats if kp[0] not in ('rootfilesin',)]
711 719 return _roots(filable)
712 720
713 721 def _anypats(kindpats):
714 722 for kind, pat, source in kindpats:
715 723 if kind in ('glob', 're', 'relglob', 'relre', 'set', 'rootfilesin'):
716 724 return True
717 725
718 726 _commentre = None
719 727
720 728 def readpatternfile(filepath, warn, sourceinfo=False):
721 729 '''parse a pattern file, returning a list of
722 730 patterns. These patterns should be given to compile()
723 731 to be validated and converted into a match function.
724 732
725 733 trailing white space is dropped.
726 734 the escape character is backslash.
727 735 comments start with #.
728 736 empty lines are skipped.
729 737
730 738 lines can be of the following formats:
731 739
732 740 syntax: regexp # defaults following lines to non-rooted regexps
733 741 syntax: glob # defaults following lines to non-rooted globs
734 742 re:pattern # non-rooted regular expression
735 743 glob:pattern # non-rooted glob
736 744 pattern # pattern of the current default type
737 745
738 746 if sourceinfo is set, returns a list of tuples:
739 747 (pattern, lineno, originalline). This is useful to debug ignore patterns.
740 748 '''
741 749
742 750 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:',
743 751 'include': 'include', 'subinclude': 'subinclude'}
744 752 syntax = 'relre:'
745 753 patterns = []
746 754
747 755 fp = open(filepath, 'rb')
748 756 for lineno, line in enumerate(util.iterfile(fp), start=1):
749 757 if "#" in line:
750 758 global _commentre
751 759 if not _commentre:
752 760 _commentre = util.re.compile(br'((?:^|[^\\])(?:\\\\)*)#.*')
753 761 # remove comments prefixed by an even number of escapes
754 762 m = _commentre.search(line)
755 763 if m:
756 764 line = line[:m.end(1)]
757 765 # fixup properly escaped comments that survived the above
758 766 line = line.replace("\\#", "#")
759 767 line = line.rstrip()
760 768 if not line:
761 769 continue
762 770
763 771 if line.startswith('syntax:'):
764 772 s = line[7:].strip()
765 773 try:
766 774 syntax = syntaxes[s]
767 775 except KeyError:
768 776 if warn:
769 777 warn(_("%s: ignoring invalid syntax '%s'\n") %
770 778 (filepath, s))
771 779 continue
772 780
773 781 linesyntax = syntax
774 782 for s, rels in syntaxes.iteritems():
775 783 if line.startswith(rels):
776 784 linesyntax = rels
777 785 line = line[len(rels):]
778 786 break
779 787 elif line.startswith(s+':'):
780 788 linesyntax = rels
781 789 line = line[len(s) + 1:]
782 790 break
783 791 if sourceinfo:
784 792 patterns.append((linesyntax + line, lineno, line))
785 793 else:
786 794 patterns.append(linesyntax + line)
787 795 fp.close()
788 796 return patterns
@@ -1,301 +1,301 b''
1 1 $ hg init ignorerepo
2 2 $ cd ignorerepo
3 3
4 4 Issue562: .hgignore requires newline at end:
5 5
6 6 $ touch foo
7 7 $ touch bar
8 8 $ touch baz
9 9 $ cat > makeignore.py <<EOF
10 10 > f = open(".hgignore", "w")
11 11 > f.write("ignore\n")
12 12 > f.write("foo\n")
13 13 > # No EOL here
14 14 > f.write("bar")
15 15 > f.close()
16 16 > EOF
17 17
18 18 $ python makeignore.py
19 19
20 20 Should display baz only:
21 21
22 22 $ hg status
23 23 ? baz
24 24
25 25 $ rm foo bar baz .hgignore makeignore.py
26 26
27 27 $ touch a.o
28 28 $ touch a.c
29 29 $ touch syntax
30 30 $ mkdir dir
31 31 $ touch dir/a.o
32 32 $ touch dir/b.o
33 33 $ touch dir/c.o
34 34
35 35 $ hg add dir/a.o
36 36 $ hg commit -m 0
37 37 $ hg add dir/b.o
38 38
39 39 $ hg status
40 40 A dir/b.o
41 41 ? a.c
42 42 ? a.o
43 43 ? dir/c.o
44 44 ? syntax
45 45
46 46 $ echo "*.o" > .hgignore
47 47 $ hg status
48 48 abort: $TESTTMP/ignorerepo/.hgignore: invalid pattern (relre): *.o (glob)
49 49 [255]
50 50
51 51 $ echo ".*\.o" > .hgignore
52 52 $ hg status
53 53 A dir/b.o
54 54 ? .hgignore
55 55 ? a.c
56 56 ? syntax
57 57
58 58 Ensure that comments work:
59 59
60 60 $ touch 'foo#bar' 'quux#'
61 61 #if no-windows
62 62 $ touch 'baz\#wat'
63 63 #endif
64 64 $ cat <<'EOF' >> .hgignore
65 65 > # full-line comment
66 66 > # whitespace-only comment line
67 67 > syntax# pattern, no whitespace, then comment
68 68 > a.c # pattern, then whitespace, then comment
69 69 > baz\\# # escaped comment character
70 70 > foo\#b # escaped comment character
71 71 > quux\## escaped comment character at end of name
72 72 > EOF
73 73 $ hg status
74 74 A dir/b.o
75 75 ? .hgignore
76 76 $ rm 'foo#bar' 'quux#'
77 77 #if no-windows
78 78 $ rm 'baz\#wat'
79 79 #endif
80 80
81 81 Check it does not ignore the current directory '.':
82 82
83 83 $ echo "^\." > .hgignore
84 84 $ hg status
85 85 A dir/b.o
86 86 ? a.c
87 87 ? a.o
88 88 ? dir/c.o
89 89 ? syntax
90 90
91 91 Test that patterns from ui.ignore options are read:
92 92
93 93 $ echo > .hgignore
94 94 $ cat >> $HGRCPATH << EOF
95 95 > [ui]
96 96 > ignore.other = $TESTTMP/ignorerepo/.hg/testhgignore
97 97 > EOF
98 98 $ echo "glob:**.o" > .hg/testhgignore
99 99 $ hg status
100 100 A dir/b.o
101 101 ? .hgignore
102 102 ? a.c
103 103 ? syntax
104 104
105 105 empty out testhgignore
106 106 $ echo > .hg/testhgignore
107 107
108 108 Test relative ignore path (issue4473):
109 109
110 110 $ cat >> $HGRCPATH << EOF
111 111 > [ui]
112 112 > ignore.relative = .hg/testhgignorerel
113 113 > EOF
114 114 $ echo "glob:*.o" > .hg/testhgignorerel
115 115 $ cd dir
116 116 $ hg status
117 117 A dir/b.o
118 118 ? .hgignore
119 119 ? a.c
120 120 ? syntax
121 121
122 122 $ cd ..
123 123 $ echo > .hg/testhgignorerel
124 124 $ echo "syntax: glob" > .hgignore
125 125 $ echo "re:.*\.o" >> .hgignore
126 126 $ hg status
127 127 A dir/b.o
128 128 ? .hgignore
129 129 ? a.c
130 130 ? syntax
131 131
132 132 $ echo "syntax: invalid" > .hgignore
133 133 $ hg status
134 134 $TESTTMP/ignorerepo/.hgignore: ignoring invalid syntax 'invalid' (glob)
135 135 A dir/b.o
136 136 ? .hgignore
137 137 ? a.c
138 138 ? a.o
139 139 ? dir/c.o
140 140 ? syntax
141 141
142 142 $ echo "syntax: glob" > .hgignore
143 143 $ echo "*.o" >> .hgignore
144 144 $ hg status
145 145 A dir/b.o
146 146 ? .hgignore
147 147 ? a.c
148 148 ? syntax
149 149
150 150 $ echo "relglob:syntax*" > .hgignore
151 151 $ hg status
152 152 A dir/b.o
153 153 ? .hgignore
154 154 ? a.c
155 155 ? a.o
156 156 ? dir/c.o
157 157
158 158 $ echo "relglob:*" > .hgignore
159 159 $ hg status
160 160 A dir/b.o
161 161
162 162 $ cd dir
163 163 $ hg status .
164 164 A b.o
165 165
166 166 $ hg debugignore
167 (?:(?:|.*/)[^/]*(?:/|$))
167 <matcher files=[], patterns=None, includes='(?:(?:|.*/)[^/]*(?:/|$))', excludes=None>
168 168
169 169 $ hg debugignore b.o
170 170 b.o is ignored
171 171 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 1: '*') (glob)
172 172
173 173 $ cd ..
174 174
175 175 Check patterns that match only the directory
176 176
177 177 $ echo "^dir\$" > .hgignore
178 178 $ hg status
179 179 A dir/b.o
180 180 ? .hgignore
181 181 ? a.c
182 182 ? a.o
183 183 ? syntax
184 184
185 185 Check recursive glob pattern matches no directories (dir/**/c.o matches dir/c.o)
186 186
187 187 $ echo "syntax: glob" > .hgignore
188 188 $ echo "dir/**/c.o" >> .hgignore
189 189 $ touch dir/c.o
190 190 $ mkdir dir/subdir
191 191 $ touch dir/subdir/c.o
192 192 $ hg status
193 193 A dir/b.o
194 194 ? .hgignore
195 195 ? a.c
196 196 ? a.o
197 197 ? syntax
198 198 $ hg debugignore a.c
199 199 a.c is not ignored
200 200 $ hg debugignore dir/c.o
201 201 dir/c.o is ignored
202 202 (ignore rule in $TESTTMP/ignorerepo/.hgignore, line 2: 'dir/**/c.o') (glob)
203 203
204 204 Check using 'include:' in ignore file
205 205
206 206 $ hg purge --all --config extensions.purge=
207 207 $ touch foo.included
208 208
209 209 $ echo ".*.included" > otherignore
210 210 $ hg status -I "include:otherignore"
211 211 ? foo.included
212 212
213 213 $ echo "include:otherignore" >> .hgignore
214 214 $ hg status
215 215 A dir/b.o
216 216 ? .hgignore
217 217 ? otherignore
218 218
219 219 Check recursive uses of 'include:'
220 220
221 221 $ echo "include:nested/ignore" >> otherignore
222 222 $ mkdir nested
223 223 $ echo "glob:*ignore" > nested/ignore
224 224 $ hg status
225 225 A dir/b.o
226 226
227 227 $ cp otherignore goodignore
228 228 $ echo "include:badignore" >> otherignore
229 229 $ hg status
230 230 skipping unreadable pattern file 'badignore': No such file or directory
231 231 A dir/b.o
232 232
233 233 $ mv goodignore otherignore
234 234
235 235 Check using 'include:' while in a non-root directory
236 236
237 237 $ cd ..
238 238 $ hg -R ignorerepo status
239 239 A dir/b.o
240 240 $ cd ignorerepo
241 241
242 242 Check including subincludes
243 243
244 244 $ hg revert -q --all
245 245 $ hg purge --all --config extensions.purge=
246 246 $ echo ".hgignore" > .hgignore
247 247 $ mkdir dir1 dir2
248 248 $ touch dir1/file1 dir1/file2 dir2/file1 dir2/file2
249 249 $ echo "subinclude:dir2/.hgignore" >> .hgignore
250 250 $ echo "glob:file*2" > dir2/.hgignore
251 251 $ hg status
252 252 ? dir1/file1
253 253 ? dir1/file2
254 254 ? dir2/file1
255 255
256 256 Check including subincludes with regexs
257 257
258 258 $ echo "subinclude:dir1/.hgignore" >> .hgignore
259 259 $ echo "regexp:f.le1" > dir1/.hgignore
260 260
261 261 $ hg status
262 262 ? dir1/file2
263 263 ? dir2/file1
264 264
265 265 Check multiple levels of sub-ignores
266 266
267 267 $ mkdir dir1/subdir
268 268 $ touch dir1/subdir/subfile1 dir1/subdir/subfile3 dir1/subdir/subfile4
269 269 $ echo "subinclude:subdir/.hgignore" >> dir1/.hgignore
270 270 $ echo "glob:subfil*3" >> dir1/subdir/.hgignore
271 271
272 272 $ hg status
273 273 ? dir1/file2
274 274 ? dir1/subdir/subfile4
275 275 ? dir2/file1
276 276
277 277 Check include subignore at the same level
278 278
279 279 $ mv dir1/subdir/.hgignore dir1/.hgignoretwo
280 280 $ echo "regexp:f.le1" > dir1/.hgignore
281 281 $ echo "subinclude:.hgignoretwo" >> dir1/.hgignore
282 282 $ echo "glob:file*2" > dir1/.hgignoretwo
283 283
284 284 $ hg status | grep file2
285 285 [1]
286 286 $ hg debugignore dir1/file2
287 287 dir1/file2 is ignored
288 288 (ignore rule in dir2/.hgignore, line 1: 'file*2')
289 289
290 290 #if windows
291 291
292 292 Windows paths are accepted on input
293 293
294 294 $ rm dir1/.hgignore
295 295 $ echo "dir1/file*" >> .hgignore
296 296 $ hg debugignore "dir1\file2"
297 297 dir1\file2 is ignored
298 298 (ignore rule in $TESTTMP\ignorerepo\.hgignore, line 4: 'dir1/file*')
299 299 $ hg up -qC .
300 300
301 301 #endif
General Comments 0
You need to be logged in to leave comments. Login now