##// END OF EJS Templates
pathauditor: disable cache of audited paths by default (issue5628)...
Yuya Nishihara -
r33722:377e8dda stable
parent child Browse files
Show More
@@ -1,712 +1,712
1 1 # __init__.py - fsmonitor initialization and overrides
2 2 #
3 3 # Copyright 2013-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
9 9
10 10 Integrates the file-watching program Watchman with Mercurial to produce faster
11 11 status results.
12 12
13 13 On a particular Linux system, for a real-world repository with over 400,000
14 14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
15 15 system, with fsmonitor it takes about 0.3 seconds.
16 16
17 17 fsmonitor requires no configuration -- it will tell Watchman about your
18 18 repository as necessary. You'll need to install Watchman from
19 19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
20 20
21 21 The following configuration options exist:
22 22
23 23 ::
24 24
25 25 [fsmonitor]
26 26 mode = {off, on, paranoid}
27 27
28 28 When `mode = off`, fsmonitor will disable itself (similar to not loading the
29 29 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
30 30 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
31 31 and ensure that the results are consistent.
32 32
33 33 ::
34 34
35 35 [fsmonitor]
36 36 timeout = (float)
37 37
38 38 A value, in seconds, that determines how long fsmonitor will wait for Watchman
39 39 to return results. Defaults to `2.0`.
40 40
41 41 ::
42 42
43 43 [fsmonitor]
44 44 blacklistusers = (list of userids)
45 45
46 46 A list of usernames for which fsmonitor will disable itself altogether.
47 47
48 48 ::
49 49
50 50 [fsmonitor]
51 51 walk_on_invalidate = (boolean)
52 52
53 53 Whether or not to walk the whole repo ourselves when our cached state has been
54 54 invalidated, for example when Watchman has been restarted or .hgignore rules
55 55 have been changed. Walking the repo in that case can result in competing for
56 56 I/O with Watchman. For large repos it is recommended to set this value to
57 57 false. You may wish to set this to true if you have a very fast filesystem
58 58 that can outpace the IPC overhead of getting the result data for the full repo
59 59 from Watchman. Defaults to false.
60 60
61 61 fsmonitor is incompatible with the largefiles and eol extensions, and
62 62 will disable itself if any of those are active.
63 63
64 64 '''
65 65
66 66 # Platforms Supported
67 67 # ===================
68 68 #
69 69 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
70 70 # even under severe loads.
71 71 #
72 72 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
73 73 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
74 74 # user testing under normal loads.
75 75 #
76 76 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
77 77 # very little testing has been done.
78 78 #
79 79 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
80 80 #
81 81 # Known Issues
82 82 # ============
83 83 #
84 84 # * fsmonitor will disable itself if any of the following extensions are
85 85 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
86 86 # * fsmonitor will produce incorrect results if nested repos that are not
87 87 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
88 88 #
89 89 # The issues related to nested repos and subrepos are probably not fundamental
90 90 # ones. Patches to fix them are welcome.
91 91
92 92 from __future__ import absolute_import
93 93
94 94 import codecs
95 95 import hashlib
96 96 import os
97 97 import stat
98 98 import sys
99 99
100 100 from mercurial.i18n import _
101 101 from mercurial import (
102 102 context,
103 103 encoding,
104 104 error,
105 105 extensions,
106 106 localrepo,
107 107 merge,
108 108 pathutil,
109 109 pycompat,
110 110 scmutil,
111 111 util,
112 112 )
113 113 from mercurial import match as matchmod
114 114
115 115 from . import (
116 116 pywatchman,
117 117 state,
118 118 watchmanclient,
119 119 )
120 120
121 121 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
122 122 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
123 123 # be specifying the version(s) of Mercurial they are tested with, or
124 124 # leave the attribute unspecified.
125 125 testedwith = 'ships-with-hg-core'
126 126
127 127 # This extension is incompatible with the following blacklisted extensions
128 128 # and will disable itself when encountering one of these:
129 129 _blacklist = ['largefiles', 'eol']
130 130
131 131 def _handleunavailable(ui, state, ex):
132 132 """Exception handler for Watchman interaction exceptions"""
133 133 if isinstance(ex, watchmanclient.Unavailable):
134 134 if ex.warn:
135 135 ui.warn(str(ex) + '\n')
136 136 if ex.invalidate:
137 137 state.invalidate()
138 138 ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg)
139 139 else:
140 140 ui.log('fsmonitor', 'Watchman exception: %s\n', ex)
141 141
142 142 def _hashignore(ignore):
143 143 """Calculate hash for ignore patterns and filenames
144 144
145 145 If this information changes between Mercurial invocations, we can't
146 146 rely on Watchman information anymore and have to re-scan the working
147 147 copy.
148 148
149 149 """
150 150 sha1 = hashlib.sha1()
151 151 sha1.update(repr(ignore))
152 152 return sha1.hexdigest()
153 153
154 154 _watchmanencoding = pywatchman.encoding.get_local_encoding()
155 155 _fsencoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
156 156 _fixencoding = codecs.lookup(_watchmanencoding) != codecs.lookup(_fsencoding)
157 157
158 158 def _watchmantofsencoding(path):
159 159 """Fix path to match watchman and local filesystem encoding
160 160
161 161 watchman's paths encoding can differ from filesystem encoding. For example,
162 162 on Windows, it's always utf-8.
163 163 """
164 164 try:
165 165 decoded = path.decode(_watchmanencoding)
166 166 except UnicodeDecodeError as e:
167 167 raise error.Abort(str(e), hint='watchman encoding error')
168 168
169 169 try:
170 170 encoded = decoded.encode(_fsencoding, 'strict')
171 171 except UnicodeEncodeError as e:
172 172 raise error.Abort(str(e))
173 173
174 174 return encoded
175 175
176 176 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
177 177 '''Replacement for dirstate.walk, hooking into Watchman.
178 178
179 179 Whenever full is False, ignored is False, and the Watchman client is
180 180 available, use Watchman combined with saved state to possibly return only a
181 181 subset of files.'''
182 182 def bail():
183 183 return orig(match, subrepos, unknown, ignored, full=True)
184 184
185 185 if full or ignored or not self._watchmanclient.available():
186 186 return bail()
187 187 state = self._fsmonitorstate
188 188 clock, ignorehash, notefiles = state.get()
189 189 if not clock:
190 190 if state.walk_on_invalidate:
191 191 return bail()
192 192 # Initial NULL clock value, see
193 193 # https://facebook.github.io/watchman/docs/clockspec.html
194 194 clock = 'c:0:0'
195 195 notefiles = []
196 196
197 197 def fwarn(f, msg):
198 198 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
199 199 return False
200 200
201 201 def badtype(mode):
202 202 kind = _('unknown')
203 203 if stat.S_ISCHR(mode):
204 204 kind = _('character device')
205 205 elif stat.S_ISBLK(mode):
206 206 kind = _('block device')
207 207 elif stat.S_ISFIFO(mode):
208 208 kind = _('fifo')
209 209 elif stat.S_ISSOCK(mode):
210 210 kind = _('socket')
211 211 elif stat.S_ISDIR(mode):
212 212 kind = _('directory')
213 213 return _('unsupported file type (type is %s)') % kind
214 214
215 215 ignore = self._ignore
216 216 dirignore = self._dirignore
217 217 if unknown:
218 218 if _hashignore(ignore) != ignorehash and clock != 'c:0:0':
219 219 # ignore list changed -- can't rely on Watchman state any more
220 220 if state.walk_on_invalidate:
221 221 return bail()
222 222 notefiles = []
223 223 clock = 'c:0:0'
224 224 else:
225 225 # always ignore
226 226 ignore = util.always
227 227 dirignore = util.always
228 228
229 229 matchfn = match.matchfn
230 230 matchalways = match.always()
231 231 dmap = self._map
232 232 nonnormalset = getattr(self, '_nonnormalset', None)
233 233
234 234 copymap = self._copymap
235 235 getkind = stat.S_IFMT
236 236 dirkind = stat.S_IFDIR
237 237 regkind = stat.S_IFREG
238 238 lnkkind = stat.S_IFLNK
239 239 join = self._join
240 240 normcase = util.normcase
241 241 fresh_instance = False
242 242
243 243 exact = skipstep3 = False
244 244 if match.isexact(): # match.exact
245 245 exact = True
246 246 dirignore = util.always # skip step 2
247 247 elif match.prefix(): # match.match, no patterns
248 248 skipstep3 = True
249 249
250 250 if not exact and self._checkcase:
251 251 # note that even though we could receive directory entries, we're only
252 252 # interested in checking if a file with the same name exists. So only
253 253 # normalize files if possible.
254 254 normalize = self._normalizefile
255 255 skipstep3 = False
256 256 else:
257 257 normalize = None
258 258
259 259 # step 1: find all explicit files
260 260 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
261 261
262 262 skipstep3 = skipstep3 and not (work or dirsnotfound)
263 263 work = [d for d in work if not dirignore(d[0])]
264 264
265 265 if not work and (exact or skipstep3):
266 266 for s in subrepos:
267 267 del results[s]
268 268 del results['.hg']
269 269 return results
270 270
271 271 # step 2: query Watchman
272 272 try:
273 273 # Use the user-configured timeout for the query.
274 274 # Add a little slack over the top of the user query to allow for
275 275 # overheads while transferring the data
276 276 self._watchmanclient.settimeout(state.timeout + 0.1)
277 277 result = self._watchmanclient.command('query', {
278 278 'fields': ['mode', 'mtime', 'size', 'exists', 'name'],
279 279 'since': clock,
280 280 'expression': [
281 281 'not', [
282 282 'anyof', ['dirname', '.hg'],
283 283 ['name', '.hg', 'wholename']
284 284 ]
285 285 ],
286 286 'sync_timeout': int(state.timeout * 1000),
287 287 'empty_on_fresh_instance': state.walk_on_invalidate,
288 288 })
289 289 except Exception as ex:
290 290 _handleunavailable(self._ui, state, ex)
291 291 self._watchmanclient.clearconnection()
292 292 return bail()
293 293 else:
294 294 # We need to propagate the last observed clock up so that we
295 295 # can use it for our next query
296 296 state.setlastclock(result['clock'])
297 297 if result['is_fresh_instance']:
298 298 if state.walk_on_invalidate:
299 299 state.invalidate()
300 300 return bail()
301 301 fresh_instance = True
302 302 # Ignore any prior noteable files from the state info
303 303 notefiles = []
304 304
305 305 # for file paths which require normalization and we encounter a case
306 306 # collision, we store our own foldmap
307 307 if normalize:
308 308 foldmap = dict((normcase(k), k) for k in results)
309 309
310 310 switch_slashes = pycompat.ossep == '\\'
311 311 # The order of the results is, strictly speaking, undefined.
312 312 # For case changes on a case insensitive filesystem we may receive
313 313 # two entries, one with exists=True and another with exists=False.
314 314 # The exists=True entries in the same response should be interpreted
315 315 # as being happens-after the exists=False entries due to the way that
316 316 # Watchman tracks files. We use this property to reconcile deletes
317 317 # for name case changes.
318 318 for entry in result['files']:
319 319 fname = entry['name']
320 320 if _fixencoding:
321 321 fname = _watchmantofsencoding(fname)
322 322 if switch_slashes:
323 323 fname = fname.replace('\\', '/')
324 324 if normalize:
325 325 normed = normcase(fname)
326 326 fname = normalize(fname, True, True)
327 327 foldmap[normed] = fname
328 328 fmode = entry['mode']
329 329 fexists = entry['exists']
330 330 kind = getkind(fmode)
331 331
332 332 if not fexists:
333 333 # if marked as deleted and we don't already have a change
334 334 # record, mark it as deleted. If we already have an entry
335 335 # for fname then it was either part of walkexplicit or was
336 336 # an earlier result that was a case change
337 337 if fname not in results and fname in dmap and (
338 338 matchalways or matchfn(fname)):
339 339 results[fname] = None
340 340 elif kind == dirkind:
341 341 if fname in dmap and (matchalways or matchfn(fname)):
342 342 results[fname] = None
343 343 elif kind == regkind or kind == lnkkind:
344 344 if fname in dmap:
345 345 if matchalways or matchfn(fname):
346 346 results[fname] = entry
347 347 elif (matchalways or matchfn(fname)) and not ignore(fname):
348 348 results[fname] = entry
349 349 elif fname in dmap and (matchalways or matchfn(fname)):
350 350 results[fname] = None
351 351
352 352 # step 3: query notable files we don't already know about
353 353 # XXX try not to iterate over the entire dmap
354 354 if normalize:
355 355 # any notable files that have changed case will already be handled
356 356 # above, so just check membership in the foldmap
357 357 notefiles = set((normalize(f, True, True) for f in notefiles
358 358 if normcase(f) not in foldmap))
359 359 visit = set((f for f in notefiles if (f not in results and matchfn(f)
360 360 and (f in dmap or not ignore(f)))))
361 361
362 362 if nonnormalset is not None and not fresh_instance:
363 363 if matchalways:
364 364 visit.update(f for f in nonnormalset if f not in results)
365 365 visit.update(f for f in copymap if f not in results)
366 366 else:
367 367 visit.update(f for f in nonnormalset
368 368 if f not in results and matchfn(f))
369 369 visit.update(f for f in copymap
370 370 if f not in results and matchfn(f))
371 371 else:
372 372 if matchalways:
373 373 visit.update(f for f, st in dmap.iteritems()
374 374 if (f not in results and
375 375 (st[2] < 0 or st[0] != 'n' or fresh_instance)))
376 376 visit.update(f for f in copymap if f not in results)
377 377 else:
378 378 visit.update(f for f, st in dmap.iteritems()
379 379 if (f not in results and
380 380 (st[2] < 0 or st[0] != 'n' or fresh_instance)
381 381 and matchfn(f)))
382 382 visit.update(f for f in copymap
383 383 if f not in results and matchfn(f))
384 384
385 audit = pathutil.pathauditor(self._root).check
385 audit = pathutil.pathauditor(self._root, cached=True).check
386 386 auditpass = [f for f in visit if audit(f)]
387 387 auditpass.sort()
388 388 auditfail = visit.difference(auditpass)
389 389 for f in auditfail:
390 390 results[f] = None
391 391
392 392 nf = iter(auditpass).next
393 393 for st in util.statfiles([join(f) for f in auditpass]):
394 394 f = nf()
395 395 if st or f in dmap:
396 396 results[f] = st
397 397
398 398 for s in subrepos:
399 399 del results[s]
400 400 del results['.hg']
401 401 return results
402 402
403 403 def overridestatus(
404 404 orig, self, node1='.', node2=None, match=None, ignored=False,
405 405 clean=False, unknown=False, listsubrepos=False):
406 406 listignored = ignored
407 407 listclean = clean
408 408 listunknown = unknown
409 409
410 410 def _cmpsets(l1, l2):
411 411 try:
412 412 if 'FSMONITOR_LOG_FILE' in encoding.environ:
413 413 fn = encoding.environ['FSMONITOR_LOG_FILE']
414 414 f = open(fn, 'wb')
415 415 else:
416 416 fn = 'fsmonitorfail.log'
417 417 f = self.opener(fn, 'wb')
418 418 except (IOError, OSError):
419 419 self.ui.warn(_('warning: unable to write to %s\n') % fn)
420 420 return
421 421
422 422 try:
423 423 for i, (s1, s2) in enumerate(zip(l1, l2)):
424 424 if set(s1) != set(s2):
425 425 f.write('sets at position %d are unequal\n' % i)
426 426 f.write('watchman returned: %s\n' % s1)
427 427 f.write('stat returned: %s\n' % s2)
428 428 finally:
429 429 f.close()
430 430
431 431 if isinstance(node1, context.changectx):
432 432 ctx1 = node1
433 433 else:
434 434 ctx1 = self[node1]
435 435 if isinstance(node2, context.changectx):
436 436 ctx2 = node2
437 437 else:
438 438 ctx2 = self[node2]
439 439
440 440 working = ctx2.rev() is None
441 441 parentworking = working and ctx1 == self['.']
442 442 match = match or matchmod.always(self.root, self.getcwd())
443 443
444 444 # Maybe we can use this opportunity to update Watchman's state.
445 445 # Mercurial uses workingcommitctx and/or memctx to represent the part of
446 446 # the workingctx that is to be committed. So don't update the state in
447 447 # that case.
448 448 # HG_PENDING is set in the environment when the dirstate is being updated
449 449 # in the middle of a transaction; we must not update our state in that
450 450 # case, or we risk forgetting about changes in the working copy.
451 451 updatestate = (parentworking and match.always() and
452 452 not isinstance(ctx2, (context.workingcommitctx,
453 453 context.memctx)) and
454 454 'HG_PENDING' not in encoding.environ)
455 455
456 456 try:
457 457 if self._fsmonitorstate.walk_on_invalidate:
458 458 # Use a short timeout to query the current clock. If that
459 459 # takes too long then we assume that the service will be slow
460 460 # to answer our query.
461 461 # walk_on_invalidate indicates that we prefer to walk the
462 462 # tree ourselves because we can ignore portions that Watchman
463 463 # cannot and we tend to be faster in the warmer buffer cache
464 464 # cases.
465 465 self._watchmanclient.settimeout(0.1)
466 466 else:
467 467 # Give Watchman more time to potentially complete its walk
468 468 # and return the initial clock. In this mode we assume that
469 469 # the filesystem will be slower than parsing a potentially
470 470 # very large Watchman result set.
471 471 self._watchmanclient.settimeout(
472 472 self._fsmonitorstate.timeout + 0.1)
473 473 startclock = self._watchmanclient.getcurrentclock()
474 474 except Exception as ex:
475 475 self._watchmanclient.clearconnection()
476 476 _handleunavailable(self.ui, self._fsmonitorstate, ex)
477 477 # boo, Watchman failed. bail
478 478 return orig(node1, node2, match, listignored, listclean,
479 479 listunknown, listsubrepos)
480 480
481 481 if updatestate:
482 482 # We need info about unknown files. This may make things slower the
483 483 # first time, but whatever.
484 484 stateunknown = True
485 485 else:
486 486 stateunknown = listunknown
487 487
488 488 if updatestate:
489 489 ps = poststatus(startclock)
490 490 self.addpostdsstatus(ps)
491 491
492 492 r = orig(node1, node2, match, listignored, listclean, stateunknown,
493 493 listsubrepos)
494 494 modified, added, removed, deleted, unknown, ignored, clean = r
495 495
496 496 if not listunknown:
497 497 unknown = []
498 498
499 499 # don't do paranoid checks if we're not going to query Watchman anyway
500 500 full = listclean or match.traversedir is not None
501 501 if self._fsmonitorstate.mode == 'paranoid' and not full:
502 502 # run status again and fall back to the old walk this time
503 503 self.dirstate._fsmonitordisable = True
504 504
505 505 # shut the UI up
506 506 quiet = self.ui.quiet
507 507 self.ui.quiet = True
508 508 fout, ferr = self.ui.fout, self.ui.ferr
509 509 self.ui.fout = self.ui.ferr = open(os.devnull, 'wb')
510 510
511 511 try:
512 512 rv2 = orig(
513 513 node1, node2, match, listignored, listclean, listunknown,
514 514 listsubrepos)
515 515 finally:
516 516 self.dirstate._fsmonitordisable = False
517 517 self.ui.quiet = quiet
518 518 self.ui.fout, self.ui.ferr = fout, ferr
519 519
520 520 # clean isn't tested since it's set to True above
521 521 _cmpsets([modified, added, removed, deleted, unknown, ignored, clean],
522 522 rv2)
523 523 modified, added, removed, deleted, unknown, ignored, clean = rv2
524 524
525 525 return scmutil.status(
526 526 modified, added, removed, deleted, unknown, ignored, clean)
527 527
528 528 class poststatus(object):
529 529 def __init__(self, startclock):
530 530 self._startclock = startclock
531 531
532 532 def __call__(self, wctx, status):
533 533 clock = wctx.repo()._fsmonitorstate.getlastclock() or self._startclock
534 534 hashignore = _hashignore(wctx.repo().dirstate._ignore)
535 535 notefiles = (status.modified + status.added + status.removed +
536 536 status.deleted + status.unknown)
537 537 wctx.repo()._fsmonitorstate.set(clock, hashignore, notefiles)
538 538
539 539 def makedirstate(repo, dirstate):
540 540 class fsmonitordirstate(dirstate.__class__):
541 541 def _fsmonitorinit(self, fsmonitorstate, watchmanclient):
542 542 # _fsmonitordisable is used in paranoid mode
543 543 self._fsmonitordisable = False
544 544 self._fsmonitorstate = fsmonitorstate
545 545 self._watchmanclient = watchmanclient
546 546
547 547 def walk(self, *args, **kwargs):
548 548 orig = super(fsmonitordirstate, self).walk
549 549 if self._fsmonitordisable:
550 550 return orig(*args, **kwargs)
551 551 return overridewalk(orig, self, *args, **kwargs)
552 552
553 553 def rebuild(self, *args, **kwargs):
554 554 self._fsmonitorstate.invalidate()
555 555 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
556 556
557 557 def invalidate(self, *args, **kwargs):
558 558 self._fsmonitorstate.invalidate()
559 559 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
560 560
561 561 dirstate.__class__ = fsmonitordirstate
562 562 dirstate._fsmonitorinit(repo._fsmonitorstate, repo._watchmanclient)
563 563
564 564 def wrapdirstate(orig, self):
565 565 ds = orig(self)
566 566 # only override the dirstate when Watchman is available for the repo
567 567 if util.safehasattr(self, '_fsmonitorstate'):
568 568 makedirstate(self, ds)
569 569 return ds
570 570
571 571 def extsetup(ui):
572 572 extensions.wrapfilecache(
573 573 localrepo.localrepository, 'dirstate', wrapdirstate)
574 574 if pycompat.sysplatform == 'darwin':
575 575 # An assist for avoiding the dangling-symlink fsevents bug
576 576 extensions.wrapfunction(os, 'symlink', wrapsymlink)
577 577
578 578 extensions.wrapfunction(merge, 'update', wrapupdate)
579 579
580 580 def wrapsymlink(orig, source, link_name):
581 581 ''' if we create a dangling symlink, also touch the parent dir
582 582 to encourage fsevents notifications to work more correctly '''
583 583 try:
584 584 return orig(source, link_name)
585 585 finally:
586 586 try:
587 587 os.utime(os.path.dirname(link_name), None)
588 588 except OSError:
589 589 pass
590 590
591 591 class state_update(object):
592 592 ''' This context manager is responsible for dispatching the state-enter
593 593 and state-leave signals to the watchman service '''
594 594
595 595 def __init__(self, repo, node, distance, partial):
596 596 self.repo = repo
597 597 self.node = node
598 598 self.distance = distance
599 599 self.partial = partial
600 600 self._lock = None
601 601 self.need_leave = False
602 602
603 603 def __enter__(self):
604 604 # We explicitly need to take a lock here, before we proceed to update
605 605 # watchman about the update operation, so that we don't race with
606 606 # some other actor. merge.update is going to take the wlock almost
607 607 # immediately anyway, so this is effectively extending the lock
608 608 # around a couple of short sanity checks.
609 609 self._lock = self.repo.wlock()
610 610 self.need_leave = self._state('state-enter')
611 611 return self
612 612
613 613 def __exit__(self, type_, value, tb):
614 614 try:
615 615 if self.need_leave:
616 616 status = 'ok' if type_ is None else 'failed'
617 617 self._state('state-leave', status=status)
618 618 finally:
619 619 if self._lock:
620 620 self._lock.release()
621 621
622 622 def _state(self, cmd, status='ok'):
623 623 if not util.safehasattr(self.repo, '_watchmanclient'):
624 624 return False
625 625 try:
626 626 commithash = self.repo[self.node].hex()
627 627 self.repo._watchmanclient.command(cmd, {
628 628 'name': 'hg.update',
629 629 'metadata': {
630 630 # the target revision
631 631 'rev': commithash,
632 632 # approximate number of commits between current and target
633 633 'distance': self.distance,
634 634 # success/failure (only really meaningful for state-leave)
635 635 'status': status,
636 636 # whether the working copy parent is changing
637 637 'partial': self.partial,
638 638 }})
639 639 return True
640 640 except Exception as e:
641 641 # Swallow any errors; fire and forget
642 642 self.repo.ui.log(
643 643 'watchman', 'Exception %s while running %s\n', e, cmd)
644 644 return False
645 645
646 646 # Bracket working copy updates with calls to the watchman state-enter
647 647 # and state-leave commands. This allows clients to perform more intelligent
648 648 # settling during bulk file change scenarios
649 649 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
650 650 def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None,
651 651 mergeancestor=False, labels=None, matcher=None, **kwargs):
652 652
653 653 distance = 0
654 654 partial = True
655 655 if matcher is None or matcher.always():
656 656 partial = False
657 657 wc = repo[None]
658 658 parents = wc.parents()
659 659 if len(parents) == 2:
660 660 anc = repo.changelog.ancestor(parents[0].node(), parents[1].node())
661 661 ancrev = repo[anc].rev()
662 662 distance = abs(repo[node].rev() - ancrev)
663 663 elif len(parents) == 1:
664 664 distance = abs(repo[node].rev() - parents[0].rev())
665 665
666 666 with state_update(repo, node, distance, partial):
667 667 return orig(
668 668 repo, node, branchmerge, force, ancestor, mergeancestor,
669 669 labels, matcher, **kwargs)
670 670
671 671 def reposetup(ui, repo):
672 672 # We don't work with largefiles or inotify
673 673 exts = extensions.enabled()
674 674 for ext in _blacklist:
675 675 if ext in exts:
676 676 ui.warn(_('The fsmonitor extension is incompatible with the %s '
677 677 'extension and has been disabled.\n') % ext)
678 678 return
679 679
680 680 if repo.local():
681 681 # We don't work with subrepos either.
682 682 #
683 683 # if repo[None].substate can cause a dirstate parse, which is too
684 684 # slow. Instead, look for a file called hgsubstate,
685 685 if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'):
686 686 return
687 687
688 688 fsmonitorstate = state.state(repo)
689 689 if fsmonitorstate.mode == 'off':
690 690 return
691 691
692 692 try:
693 693 client = watchmanclient.client(repo)
694 694 except Exception as ex:
695 695 _handleunavailable(ui, fsmonitorstate, ex)
696 696 return
697 697
698 698 repo._fsmonitorstate = fsmonitorstate
699 699 repo._watchmanclient = client
700 700
701 701 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
702 702 if cached:
703 703 # at this point since fsmonitorstate wasn't present,
704 704 # repo.dirstate is not a fsmonitordirstate
705 705 makedirstate(repo, dirstate)
706 706
707 707 class fsmonitorrepo(repo.__class__):
708 708 def status(self, *args, **kwargs):
709 709 orig = super(fsmonitorrepo, self).status
710 710 return overridestatus(orig, self, *args, **kwargs)
711 711
712 712 repo.__class__ = fsmonitorrepo
@@ -1,3761 +1,3761
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import itertools
12 12 import os
13 13 import re
14 14 import tempfile
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 hex,
19 19 nullid,
20 20 nullrev,
21 21 short,
22 22 )
23 23
24 24 from . import (
25 25 bookmarks,
26 26 changelog,
27 27 copies,
28 28 crecord as crecordmod,
29 29 dirstateguard,
30 30 encoding,
31 31 error,
32 32 formatter,
33 33 graphmod,
34 34 match as matchmod,
35 35 obsolete,
36 36 patch,
37 37 pathutil,
38 38 phases,
39 39 pycompat,
40 40 registrar,
41 41 revlog,
42 42 revset,
43 43 scmutil,
44 44 smartset,
45 45 templatekw,
46 46 templater,
47 47 util,
48 48 vfs as vfsmod,
49 49 )
50 50 stringio = util.stringio
51 51
52 52 # templates of common command options
53 53
54 54 dryrunopts = [
55 55 ('n', 'dry-run', None,
56 56 _('do not perform actions, just print output')),
57 57 ]
58 58
59 59 remoteopts = [
60 60 ('e', 'ssh', '',
61 61 _('specify ssh command to use'), _('CMD')),
62 62 ('', 'remotecmd', '',
63 63 _('specify hg command to run on the remote side'), _('CMD')),
64 64 ('', 'insecure', None,
65 65 _('do not verify server certificate (ignoring web.cacerts config)')),
66 66 ]
67 67
68 68 walkopts = [
69 69 ('I', 'include', [],
70 70 _('include names matching the given patterns'), _('PATTERN')),
71 71 ('X', 'exclude', [],
72 72 _('exclude names matching the given patterns'), _('PATTERN')),
73 73 ]
74 74
75 75 commitopts = [
76 76 ('m', 'message', '',
77 77 _('use text as commit message'), _('TEXT')),
78 78 ('l', 'logfile', '',
79 79 _('read commit message from file'), _('FILE')),
80 80 ]
81 81
82 82 commitopts2 = [
83 83 ('d', 'date', '',
84 84 _('record the specified date as commit date'), _('DATE')),
85 85 ('u', 'user', '',
86 86 _('record the specified user as committer'), _('USER')),
87 87 ]
88 88
89 89 # hidden for now
90 90 formatteropts = [
91 91 ('T', 'template', '',
92 92 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
93 93 ]
94 94
95 95 templateopts = [
96 96 ('', 'style', '',
97 97 _('display using template map file (DEPRECATED)'), _('STYLE')),
98 98 ('T', 'template', '',
99 99 _('display with template'), _('TEMPLATE')),
100 100 ]
101 101
102 102 logopts = [
103 103 ('p', 'patch', None, _('show patch')),
104 104 ('g', 'git', None, _('use git extended diff format')),
105 105 ('l', 'limit', '',
106 106 _('limit number of changes displayed'), _('NUM')),
107 107 ('M', 'no-merges', None, _('do not show merges')),
108 108 ('', 'stat', None, _('output diffstat-style summary of changes')),
109 109 ('G', 'graph', None, _("show the revision DAG")),
110 110 ] + templateopts
111 111
112 112 diffopts = [
113 113 ('a', 'text', None, _('treat all files as text')),
114 114 ('g', 'git', None, _('use git extended diff format')),
115 115 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
116 116 ('', 'nodates', None, _('omit dates from diff headers'))
117 117 ]
118 118
119 119 diffwsopts = [
120 120 ('w', 'ignore-all-space', None,
121 121 _('ignore white space when comparing lines')),
122 122 ('b', 'ignore-space-change', None,
123 123 _('ignore changes in the amount of white space')),
124 124 ('B', 'ignore-blank-lines', None,
125 125 _('ignore changes whose lines are all blank')),
126 126 ]
127 127
128 128 diffopts2 = [
129 129 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
130 130 ('p', 'show-function', None, _('show which function each change is in')),
131 131 ('', 'reverse', None, _('produce a diff that undoes the changes')),
132 132 ] + diffwsopts + [
133 133 ('U', 'unified', '',
134 134 _('number of lines of context to show'), _('NUM')),
135 135 ('', 'stat', None, _('output diffstat-style summary of changes')),
136 136 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
137 137 ]
138 138
139 139 mergetoolopts = [
140 140 ('t', 'tool', '', _('specify merge tool')),
141 141 ]
142 142
143 143 similarityopts = [
144 144 ('s', 'similarity', '',
145 145 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
146 146 ]
147 147
148 148 subrepoopts = [
149 149 ('S', 'subrepos', None,
150 150 _('recurse into subrepositories'))
151 151 ]
152 152
153 153 debugrevlogopts = [
154 154 ('c', 'changelog', False, _('open changelog')),
155 155 ('m', 'manifest', False, _('open manifest')),
156 156 ('', 'dir', '', _('open directory manifest')),
157 157 ]
158 158
159 159 # special string such that everything below this line will be ingored in the
160 160 # editor text
161 161 _linebelow = "^HG: ------------------------ >8 ------------------------$"
162 162
163 163 def ishunk(x):
164 164 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
165 165 return isinstance(x, hunkclasses)
166 166
167 167 def newandmodified(chunks, originalchunks):
168 168 newlyaddedandmodifiedfiles = set()
169 169 for chunk in chunks:
170 170 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
171 171 originalchunks:
172 172 newlyaddedandmodifiedfiles.add(chunk.header.filename())
173 173 return newlyaddedandmodifiedfiles
174 174
175 175 def parsealiases(cmd):
176 176 return cmd.lstrip("^").split("|")
177 177
178 178 def setupwrapcolorwrite(ui):
179 179 # wrap ui.write so diff output can be labeled/colorized
180 180 def wrapwrite(orig, *args, **kw):
181 181 label = kw.pop('label', '')
182 182 for chunk, l in patch.difflabel(lambda: args):
183 183 orig(chunk, label=label + l)
184 184
185 185 oldwrite = ui.write
186 186 def wrap(*args, **kwargs):
187 187 return wrapwrite(oldwrite, *args, **kwargs)
188 188 setattr(ui, 'write', wrap)
189 189 return oldwrite
190 190
191 191 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
192 192 if usecurses:
193 193 if testfile:
194 194 recordfn = crecordmod.testdecorator(testfile,
195 195 crecordmod.testchunkselector)
196 196 else:
197 197 recordfn = crecordmod.chunkselector
198 198
199 199 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
200 200
201 201 else:
202 202 return patch.filterpatch(ui, originalhunks, operation)
203 203
204 204 def recordfilter(ui, originalhunks, operation=None):
205 205 """ Prompts the user to filter the originalhunks and return a list of
206 206 selected hunks.
207 207 *operation* is used for to build ui messages to indicate the user what
208 208 kind of filtering they are doing: reverting, committing, shelving, etc.
209 209 (see patch.filterpatch).
210 210 """
211 211 usecurses = crecordmod.checkcurses(ui)
212 212 testfile = ui.config('experimental', 'crecordtest')
213 213 oldwrite = setupwrapcolorwrite(ui)
214 214 try:
215 215 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
216 216 testfile, operation)
217 217 finally:
218 218 ui.write = oldwrite
219 219 return newchunks, newopts
220 220
221 221 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
222 222 filterfn, *pats, **opts):
223 223 from . import merge as mergemod
224 224 opts = pycompat.byteskwargs(opts)
225 225 if not ui.interactive():
226 226 if cmdsuggest:
227 227 msg = _('running non-interactively, use %s instead') % cmdsuggest
228 228 else:
229 229 msg = _('running non-interactively')
230 230 raise error.Abort(msg)
231 231
232 232 # make sure username is set before going interactive
233 233 if not opts.get('user'):
234 234 ui.username() # raise exception, username not provided
235 235
236 236 def recordfunc(ui, repo, message, match, opts):
237 237 """This is generic record driver.
238 238
239 239 Its job is to interactively filter local changes, and
240 240 accordingly prepare working directory into a state in which the
241 241 job can be delegated to a non-interactive commit command such as
242 242 'commit' or 'qrefresh'.
243 243
244 244 After the actual job is done by non-interactive command, the
245 245 working directory is restored to its original state.
246 246
247 247 In the end we'll record interesting changes, and everything else
248 248 will be left in place, so the user can continue working.
249 249 """
250 250
251 251 checkunfinished(repo, commit=True)
252 252 wctx = repo[None]
253 253 merge = len(wctx.parents()) > 1
254 254 if merge:
255 255 raise error.Abort(_('cannot partially commit a merge '
256 256 '(use "hg commit" instead)'))
257 257
258 258 def fail(f, msg):
259 259 raise error.Abort('%s: %s' % (f, msg))
260 260
261 261 force = opts.get('force')
262 262 if not force:
263 263 vdirs = []
264 264 match.explicitdir = vdirs.append
265 265 match.bad = fail
266 266
267 267 status = repo.status(match=match)
268 268 if not force:
269 269 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
270 270 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
271 271 diffopts.nodates = True
272 272 diffopts.git = True
273 273 diffopts.showfunc = True
274 274 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
275 275 originalchunks = patch.parsepatch(originaldiff)
276 276
277 277 # 1. filter patch, since we are intending to apply subset of it
278 278 try:
279 279 chunks, newopts = filterfn(ui, originalchunks)
280 280 except patch.PatchError as err:
281 281 raise error.Abort(_('error parsing patch: %s') % err)
282 282 opts.update(newopts)
283 283
284 284 # We need to keep a backup of files that have been newly added and
285 285 # modified during the recording process because there is a previous
286 286 # version without the edit in the workdir
287 287 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
288 288 contenders = set()
289 289 for h in chunks:
290 290 try:
291 291 contenders.update(set(h.files()))
292 292 except AttributeError:
293 293 pass
294 294
295 295 changed = status.modified + status.added + status.removed
296 296 newfiles = [f for f in changed if f in contenders]
297 297 if not newfiles:
298 298 ui.status(_('no changes to record\n'))
299 299 return 0
300 300
301 301 modified = set(status.modified)
302 302
303 303 # 2. backup changed files, so we can restore them in the end
304 304
305 305 if backupall:
306 306 tobackup = changed
307 307 else:
308 308 tobackup = [f for f in newfiles if f in modified or f in \
309 309 newlyaddedandmodifiedfiles]
310 310 backups = {}
311 311 if tobackup:
312 312 backupdir = repo.vfs.join('record-backups')
313 313 try:
314 314 os.mkdir(backupdir)
315 315 except OSError as err:
316 316 if err.errno != errno.EEXIST:
317 317 raise
318 318 try:
319 319 # backup continues
320 320 for f in tobackup:
321 321 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
322 322 dir=backupdir)
323 323 os.close(fd)
324 324 ui.debug('backup %r as %r\n' % (f, tmpname))
325 325 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
326 326 backups[f] = tmpname
327 327
328 328 fp = stringio()
329 329 for c in chunks:
330 330 fname = c.filename()
331 331 if fname in backups:
332 332 c.write(fp)
333 333 dopatch = fp.tell()
334 334 fp.seek(0)
335 335
336 336 # 2.5 optionally review / modify patch in text editor
337 337 if opts.get('review', False):
338 338 patchtext = (crecordmod.diffhelptext
339 339 + crecordmod.patchhelptext
340 340 + fp.read())
341 341 reviewedpatch = ui.edit(patchtext, "",
342 342 extra={"suffix": ".diff"},
343 343 repopath=repo.path)
344 344 fp.truncate(0)
345 345 fp.write(reviewedpatch)
346 346 fp.seek(0)
347 347
348 348 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
349 349 # 3a. apply filtered patch to clean repo (clean)
350 350 if backups:
351 351 # Equivalent to hg.revert
352 352 m = scmutil.matchfiles(repo, backups.keys())
353 353 mergemod.update(repo, repo.dirstate.p1(),
354 354 False, True, matcher=m)
355 355
356 356 # 3b. (apply)
357 357 if dopatch:
358 358 try:
359 359 ui.debug('applying patch\n')
360 360 ui.debug(fp.getvalue())
361 361 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
362 362 except patch.PatchError as err:
363 363 raise error.Abort(str(err))
364 364 del fp
365 365
366 366 # 4. We prepared working directory according to filtered
367 367 # patch. Now is the time to delegate the job to
368 368 # commit/qrefresh or the like!
369 369
370 370 # Make all of the pathnames absolute.
371 371 newfiles = [repo.wjoin(nf) for nf in newfiles]
372 372 return commitfunc(ui, repo, *newfiles, **opts)
373 373 finally:
374 374 # 5. finally restore backed-up files
375 375 try:
376 376 dirstate = repo.dirstate
377 377 for realname, tmpname in backups.iteritems():
378 378 ui.debug('restoring %r to %r\n' % (tmpname, realname))
379 379
380 380 if dirstate[realname] == 'n':
381 381 # without normallookup, restoring timestamp
382 382 # may cause partially committed files
383 383 # to be treated as unmodified
384 384 dirstate.normallookup(realname)
385 385
386 386 # copystat=True here and above are a hack to trick any
387 387 # editors that have f open that we haven't modified them.
388 388 #
389 389 # Also note that this racy as an editor could notice the
390 390 # file's mtime before we've finished writing it.
391 391 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
392 392 os.unlink(tmpname)
393 393 if tobackup:
394 394 os.rmdir(backupdir)
395 395 except OSError:
396 396 pass
397 397
398 398 def recordinwlock(ui, repo, message, match, opts):
399 399 with repo.wlock():
400 400 return recordfunc(ui, repo, message, match, opts)
401 401
402 402 return commit(ui, repo, recordinwlock, pats, opts)
403 403
404 404 def tersestatus(root, statlist, status, ignorefn, ignore):
405 405 """
406 406 Returns a list of statuses with directory collapsed if all the files in the
407 407 directory has the same status.
408 408 """
409 409
410 410 def numfiles(dirname):
411 411 """
412 412 Calculates the number of tracked files in a given directory which also
413 413 includes files which were removed or deleted. Considers ignored files
414 414 if ignore argument is True or 'i' is present in status argument.
415 415 """
416 416 if lencache.get(dirname):
417 417 return lencache[dirname]
418 418 if 'i' in status or ignore:
419 419 def match(localpath):
420 420 absolutepath = os.path.join(root, localpath)
421 421 if os.path.isdir(absolutepath) and isemptydir(absolutepath):
422 422 return True
423 423 return False
424 424 else:
425 425 def match(localpath):
426 426 # there can be directory whose all the files are ignored and
427 427 # hence the drectory should also be ignored while counting
428 428 # number of files or subdirs in it's parent directory. This
429 429 # checks the same.
430 430 # XXX: We need a better logic here.
431 431 if os.path.isdir(os.path.join(root, localpath)):
432 432 return isignoreddir(localpath)
433 433 else:
434 434 # XXX: there can be files which have the ignored pattern but
435 435 # are not ignored. That leads to bug in counting number of
436 436 # tracked files in the directory.
437 437 return ignorefn(localpath)
438 438 lendir = 0
439 439 abspath = os.path.join(root, dirname)
440 440 # There might be cases when a directory does not exists as the whole
441 441 # directory can be removed and/or deleted.
442 442 try:
443 443 for f in os.listdir(abspath):
444 444 localpath = os.path.join(dirname, f)
445 445 if not match(localpath):
446 446 lendir += 1
447 447 except OSError:
448 448 pass
449 449 lendir += len(absentdir.get(dirname, []))
450 450 lencache[dirname] = lendir
451 451 return lendir
452 452
453 453 def isemptydir(abspath):
454 454 """
455 455 Check whether a directory is empty or not, i.e. there is no files in the
456 456 directory and all its subdirectories.
457 457 """
458 458 for f in os.listdir(abspath):
459 459 fullpath = os.path.join(abspath, f)
460 460 if os.path.isdir(fullpath):
461 461 # recursion here
462 462 ret = isemptydir(fullpath)
463 463 if not ret:
464 464 return False
465 465 else:
466 466 return False
467 467 return True
468 468
469 469 def isignoreddir(localpath):
470 470 """
471 471 This function checks whether the directory contains only ignored files
472 472 and hence should the directory be considered ignored. Returns True, if
473 473 that should be ignored otherwise False.
474 474 """
475 475 dirpath = os.path.join(root, localpath)
476 476 for f in os.listdir(dirpath):
477 477 filepath = os.path.join(dirpath, f)
478 478 if os.path.isdir(filepath):
479 479 # recursion here
480 480 ret = isignoreddir(os.path.join(localpath, f))
481 481 if not ret:
482 482 return False
483 483 else:
484 484 if not ignorefn(os.path.join(localpath, f)):
485 485 return False
486 486 return True
487 487
488 488 def absentones(removedfiles, missingfiles):
489 489 """
490 490 Returns a dictionary of directories with files in it which are either
491 491 removed or missing (deleted) in them.
492 492 """
493 493 absentdir = {}
494 494 absentfiles = removedfiles + missingfiles
495 495 while absentfiles:
496 496 f = absentfiles.pop()
497 497 par = os.path.dirname(f)
498 498 if par == '':
499 499 continue
500 500 # we need to store files rather than number of files as some files
501 501 # or subdirectories in a directory can be counted twice. This is
502 502 # also we have used sets here.
503 503 try:
504 504 absentdir[par].add(f)
505 505 except KeyError:
506 506 absentdir[par] = set([f])
507 507 absentfiles.append(par)
508 508 return absentdir
509 509
510 510 indexes = {'m': 0, 'a': 1, 'r': 2, 'd': 3, 'u': 4, 'i': 5, 'c': 6}
511 511 # get a dictonary of directories and files which are missing as os.listdir()
512 512 # won't be able to list them.
513 513 absentdir = absentones(statlist[2], statlist[3])
514 514 finalrs = [[]] * len(indexes)
515 515 didsomethingchanged = False
516 516 # dictionary to store number of files and subdir in a directory so that we
517 517 # don't compute that again.
518 518 lencache = {}
519 519
520 520 for st in pycompat.bytestr(status):
521 521
522 522 try:
523 523 ind = indexes[st]
524 524 except KeyError:
525 525 # TODO: Need a better error message here
526 526 raise error.Abort("'%s' not recognized" % st)
527 527
528 528 sfiles = statlist[ind]
529 529 if not sfiles:
530 530 continue
531 531 pardict = {}
532 532 for a in sfiles:
533 533 par = os.path.dirname(a)
534 534 pardict.setdefault(par, []).append(a)
535 535
536 536 rs = []
537 537 newls = []
538 538 for par, files in pardict.iteritems():
539 539 lenpar = numfiles(par)
540 540 if lenpar == len(files):
541 541 newls.append(par)
542 542
543 543 if not newls:
544 544 continue
545 545
546 546 while newls:
547 547 newel = newls.pop()
548 548 if newel == '':
549 549 continue
550 550 parn = os.path.dirname(newel)
551 551 pardict[newel] = []
552 552 # Adding pycompat.ossep as newel is a directory.
553 553 pardict.setdefault(parn, []).append(newel + pycompat.ossep)
554 554 lenpar = numfiles(parn)
555 555 if lenpar == len(pardict[parn]):
556 556 newls.append(parn)
557 557
558 558 # dict.values() for Py3 compatibility
559 559 for files in pardict.values():
560 560 rs.extend(files)
561 561
562 562 rs.sort()
563 563 finalrs[ind] = rs
564 564 didsomethingchanged = True
565 565
566 566 # If nothing is changed, make sure the order of files is preserved.
567 567 if not didsomethingchanged:
568 568 return statlist
569 569
570 570 for x in xrange(len(indexes)):
571 571 if not finalrs[x]:
572 572 finalrs[x] = statlist[x]
573 573
574 574 return finalrs
575 575
576 576 def findpossible(cmd, table, strict=False):
577 577 """
578 578 Return cmd -> (aliases, command table entry)
579 579 for each matching command.
580 580 Return debug commands (or their aliases) only if no normal command matches.
581 581 """
582 582 choice = {}
583 583 debugchoice = {}
584 584
585 585 if cmd in table:
586 586 # short-circuit exact matches, "log" alias beats "^log|history"
587 587 keys = [cmd]
588 588 else:
589 589 keys = table.keys()
590 590
591 591 allcmds = []
592 592 for e in keys:
593 593 aliases = parsealiases(e)
594 594 allcmds.extend(aliases)
595 595 found = None
596 596 if cmd in aliases:
597 597 found = cmd
598 598 elif not strict:
599 599 for a in aliases:
600 600 if a.startswith(cmd):
601 601 found = a
602 602 break
603 603 if found is not None:
604 604 if aliases[0].startswith("debug") or found.startswith("debug"):
605 605 debugchoice[found] = (aliases, table[e])
606 606 else:
607 607 choice[found] = (aliases, table[e])
608 608
609 609 if not choice and debugchoice:
610 610 choice = debugchoice
611 611
612 612 return choice, allcmds
613 613
614 614 def findcmd(cmd, table, strict=True):
615 615 """Return (aliases, command table entry) for command string."""
616 616 choice, allcmds = findpossible(cmd, table, strict)
617 617
618 618 if cmd in choice:
619 619 return choice[cmd]
620 620
621 621 if len(choice) > 1:
622 622 clist = sorted(choice)
623 623 raise error.AmbiguousCommand(cmd, clist)
624 624
625 625 if choice:
626 626 return list(choice.values())[0]
627 627
628 628 raise error.UnknownCommand(cmd, allcmds)
629 629
630 630 def findrepo(p):
631 631 while not os.path.isdir(os.path.join(p, ".hg")):
632 632 oldp, p = p, os.path.dirname(p)
633 633 if p == oldp:
634 634 return None
635 635
636 636 return p
637 637
638 638 def bailifchanged(repo, merge=True, hint=None):
639 639 """ enforce the precondition that working directory must be clean.
640 640
641 641 'merge' can be set to false if a pending uncommitted merge should be
642 642 ignored (such as when 'update --check' runs).
643 643
644 644 'hint' is the usual hint given to Abort exception.
645 645 """
646 646
647 647 if merge and repo.dirstate.p2() != nullid:
648 648 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
649 649 modified, added, removed, deleted = repo.status()[:4]
650 650 if modified or added or removed or deleted:
651 651 raise error.Abort(_('uncommitted changes'), hint=hint)
652 652 ctx = repo[None]
653 653 for s in sorted(ctx.substate):
654 654 ctx.sub(s).bailifchanged(hint=hint)
655 655
656 656 def logmessage(ui, opts):
657 657 """ get the log message according to -m and -l option """
658 658 message = opts.get('message')
659 659 logfile = opts.get('logfile')
660 660
661 661 if message and logfile:
662 662 raise error.Abort(_('options --message and --logfile are mutually '
663 663 'exclusive'))
664 664 if not message and logfile:
665 665 try:
666 666 if isstdiofilename(logfile):
667 667 message = ui.fin.read()
668 668 else:
669 669 message = '\n'.join(util.readfile(logfile).splitlines())
670 670 except IOError as inst:
671 671 raise error.Abort(_("can't read commit message '%s': %s") %
672 672 (logfile, inst.strerror))
673 673 return message
674 674
675 675 def mergeeditform(ctxorbool, baseformname):
676 676 """return appropriate editform name (referencing a committemplate)
677 677
678 678 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
679 679 merging is committed.
680 680
681 681 This returns baseformname with '.merge' appended if it is a merge,
682 682 otherwise '.normal' is appended.
683 683 """
684 684 if isinstance(ctxorbool, bool):
685 685 if ctxorbool:
686 686 return baseformname + ".merge"
687 687 elif 1 < len(ctxorbool.parents()):
688 688 return baseformname + ".merge"
689 689
690 690 return baseformname + ".normal"
691 691
692 692 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
693 693 editform='', **opts):
694 694 """get appropriate commit message editor according to '--edit' option
695 695
696 696 'finishdesc' is a function to be called with edited commit message
697 697 (= 'description' of the new changeset) just after editing, but
698 698 before checking empty-ness. It should return actual text to be
699 699 stored into history. This allows to change description before
700 700 storing.
701 701
702 702 'extramsg' is a extra message to be shown in the editor instead of
703 703 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
704 704 is automatically added.
705 705
706 706 'editform' is a dot-separated list of names, to distinguish
707 707 the purpose of commit text editing.
708 708
709 709 'getcommiteditor' returns 'commitforceeditor' regardless of
710 710 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
711 711 they are specific for usage in MQ.
712 712 """
713 713 if edit or finishdesc or extramsg:
714 714 return lambda r, c, s: commitforceeditor(r, c, s,
715 715 finishdesc=finishdesc,
716 716 extramsg=extramsg,
717 717 editform=editform)
718 718 elif editform:
719 719 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
720 720 else:
721 721 return commiteditor
722 722
723 723 def loglimit(opts):
724 724 """get the log limit according to option -l/--limit"""
725 725 limit = opts.get('limit')
726 726 if limit:
727 727 try:
728 728 limit = int(limit)
729 729 except ValueError:
730 730 raise error.Abort(_('limit must be a positive integer'))
731 731 if limit <= 0:
732 732 raise error.Abort(_('limit must be positive'))
733 733 else:
734 734 limit = None
735 735 return limit
736 736
737 737 def makefilename(repo, pat, node, desc=None,
738 738 total=None, seqno=None, revwidth=None, pathname=None):
739 739 node_expander = {
740 740 'H': lambda: hex(node),
741 741 'R': lambda: str(repo.changelog.rev(node)),
742 742 'h': lambda: short(node),
743 743 'm': lambda: re.sub('[^\w]', '_', str(desc))
744 744 }
745 745 expander = {
746 746 '%': lambda: '%',
747 747 'b': lambda: os.path.basename(repo.root),
748 748 }
749 749
750 750 try:
751 751 if node:
752 752 expander.update(node_expander)
753 753 if node:
754 754 expander['r'] = (lambda:
755 755 str(repo.changelog.rev(node)).zfill(revwidth or 0))
756 756 if total is not None:
757 757 expander['N'] = lambda: str(total)
758 758 if seqno is not None:
759 759 expander['n'] = lambda: str(seqno)
760 760 if total is not None and seqno is not None:
761 761 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
762 762 if pathname is not None:
763 763 expander['s'] = lambda: os.path.basename(pathname)
764 764 expander['d'] = lambda: os.path.dirname(pathname) or '.'
765 765 expander['p'] = lambda: pathname
766 766
767 767 newname = []
768 768 patlen = len(pat)
769 769 i = 0
770 770 while i < patlen:
771 771 c = pat[i:i + 1]
772 772 if c == '%':
773 773 i += 1
774 774 c = pat[i:i + 1]
775 775 c = expander[c]()
776 776 newname.append(c)
777 777 i += 1
778 778 return ''.join(newname)
779 779 except KeyError as inst:
780 780 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
781 781 inst.args[0])
782 782
783 783 def isstdiofilename(pat):
784 784 """True if the given pat looks like a filename denoting stdin/stdout"""
785 785 return not pat or pat == '-'
786 786
787 787 class _unclosablefile(object):
788 788 def __init__(self, fp):
789 789 self._fp = fp
790 790
791 791 def close(self):
792 792 pass
793 793
794 794 def __iter__(self):
795 795 return iter(self._fp)
796 796
797 797 def __getattr__(self, attr):
798 798 return getattr(self._fp, attr)
799 799
800 800 def __enter__(self):
801 801 return self
802 802
803 803 def __exit__(self, exc_type, exc_value, exc_tb):
804 804 pass
805 805
806 806 def makefileobj(repo, pat, node=None, desc=None, total=None,
807 807 seqno=None, revwidth=None, mode='wb', modemap=None,
808 808 pathname=None):
809 809
810 810 writable = mode not in ('r', 'rb')
811 811
812 812 if isstdiofilename(pat):
813 813 if writable:
814 814 fp = repo.ui.fout
815 815 else:
816 816 fp = repo.ui.fin
817 817 return _unclosablefile(fp)
818 818 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
819 819 if modemap is not None:
820 820 mode = modemap.get(fn, mode)
821 821 if mode == 'wb':
822 822 modemap[fn] = 'ab'
823 823 return open(fn, mode)
824 824
825 825 def openrevlog(repo, cmd, file_, opts):
826 826 """opens the changelog, manifest, a filelog or a given revlog"""
827 827 cl = opts['changelog']
828 828 mf = opts['manifest']
829 829 dir = opts['dir']
830 830 msg = None
831 831 if cl and mf:
832 832 msg = _('cannot specify --changelog and --manifest at the same time')
833 833 elif cl and dir:
834 834 msg = _('cannot specify --changelog and --dir at the same time')
835 835 elif cl or mf or dir:
836 836 if file_:
837 837 msg = _('cannot specify filename with --changelog or --manifest')
838 838 elif not repo:
839 839 msg = _('cannot specify --changelog or --manifest or --dir '
840 840 'without a repository')
841 841 if msg:
842 842 raise error.Abort(msg)
843 843
844 844 r = None
845 845 if repo:
846 846 if cl:
847 847 r = repo.unfiltered().changelog
848 848 elif dir:
849 849 if 'treemanifest' not in repo.requirements:
850 850 raise error.Abort(_("--dir can only be used on repos with "
851 851 "treemanifest enabled"))
852 852 dirlog = repo.manifestlog._revlog.dirlog(dir)
853 853 if len(dirlog):
854 854 r = dirlog
855 855 elif mf:
856 856 r = repo.manifestlog._revlog
857 857 elif file_:
858 858 filelog = repo.file(file_)
859 859 if len(filelog):
860 860 r = filelog
861 861 if not r:
862 862 if not file_:
863 863 raise error.CommandError(cmd, _('invalid arguments'))
864 864 if not os.path.isfile(file_):
865 865 raise error.Abort(_("revlog '%s' not found") % file_)
866 866 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
867 867 file_[:-2] + ".i")
868 868 return r
869 869
870 870 def copy(ui, repo, pats, opts, rename=False):
871 871 # called with the repo lock held
872 872 #
873 873 # hgsep => pathname that uses "/" to separate directories
874 874 # ossep => pathname that uses os.sep to separate directories
875 875 cwd = repo.getcwd()
876 876 targets = {}
877 877 after = opts.get("after")
878 878 dryrun = opts.get("dry_run")
879 879 wctx = repo[None]
880 880
881 881 def walkpat(pat):
882 882 srcs = []
883 883 if after:
884 884 badstates = '?'
885 885 else:
886 886 badstates = '?r'
887 887 m = scmutil.match(wctx, [pat], opts, globbed=True)
888 888 for abs in wctx.walk(m):
889 889 state = repo.dirstate[abs]
890 890 rel = m.rel(abs)
891 891 exact = m.exact(abs)
892 892 if state in badstates:
893 893 if exact and state == '?':
894 894 ui.warn(_('%s: not copying - file is not managed\n') % rel)
895 895 if exact and state == 'r':
896 896 ui.warn(_('%s: not copying - file has been marked for'
897 897 ' remove\n') % rel)
898 898 continue
899 899 # abs: hgsep
900 900 # rel: ossep
901 901 srcs.append((abs, rel, exact))
902 902 return srcs
903 903
904 904 # abssrc: hgsep
905 905 # relsrc: ossep
906 906 # otarget: ossep
907 907 def copyfile(abssrc, relsrc, otarget, exact):
908 908 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
909 909 if '/' in abstarget:
910 910 # We cannot normalize abstarget itself, this would prevent
911 911 # case only renames, like a => A.
912 912 abspath, absname = abstarget.rsplit('/', 1)
913 913 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
914 914 reltarget = repo.pathto(abstarget, cwd)
915 915 target = repo.wjoin(abstarget)
916 916 src = repo.wjoin(abssrc)
917 917 state = repo.dirstate[abstarget]
918 918
919 919 scmutil.checkportable(ui, abstarget)
920 920
921 921 # check for collisions
922 922 prevsrc = targets.get(abstarget)
923 923 if prevsrc is not None:
924 924 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
925 925 (reltarget, repo.pathto(abssrc, cwd),
926 926 repo.pathto(prevsrc, cwd)))
927 927 return
928 928
929 929 # check for overwrites
930 930 exists = os.path.lexists(target)
931 931 samefile = False
932 932 if exists and abssrc != abstarget:
933 933 if (repo.dirstate.normalize(abssrc) ==
934 934 repo.dirstate.normalize(abstarget)):
935 935 if not rename:
936 936 ui.warn(_("%s: can't copy - same file\n") % reltarget)
937 937 return
938 938 exists = False
939 939 samefile = True
940 940
941 941 if not after and exists or after and state in 'mn':
942 942 if not opts['force']:
943 943 if state in 'mn':
944 944 msg = _('%s: not overwriting - file already committed\n')
945 945 if after:
946 946 flags = '--after --force'
947 947 else:
948 948 flags = '--force'
949 949 if rename:
950 950 hint = _('(hg rename %s to replace the file by '
951 951 'recording a rename)\n') % flags
952 952 else:
953 953 hint = _('(hg copy %s to replace the file by '
954 954 'recording a copy)\n') % flags
955 955 else:
956 956 msg = _('%s: not overwriting - file exists\n')
957 957 if rename:
958 958 hint = _('(hg rename --after to record the rename)\n')
959 959 else:
960 960 hint = _('(hg copy --after to record the copy)\n')
961 961 ui.warn(msg % reltarget)
962 962 ui.warn(hint)
963 963 return
964 964
965 965 if after:
966 966 if not exists:
967 967 if rename:
968 968 ui.warn(_('%s: not recording move - %s does not exist\n') %
969 969 (relsrc, reltarget))
970 970 else:
971 971 ui.warn(_('%s: not recording copy - %s does not exist\n') %
972 972 (relsrc, reltarget))
973 973 return
974 974 elif not dryrun:
975 975 try:
976 976 if exists:
977 977 os.unlink(target)
978 978 targetdir = os.path.dirname(target) or '.'
979 979 if not os.path.isdir(targetdir):
980 980 os.makedirs(targetdir)
981 981 if samefile:
982 982 tmp = target + "~hgrename"
983 983 os.rename(src, tmp)
984 984 os.rename(tmp, target)
985 985 else:
986 986 util.copyfile(src, target)
987 987 srcexists = True
988 988 except IOError as inst:
989 989 if inst.errno == errno.ENOENT:
990 990 ui.warn(_('%s: deleted in working directory\n') % relsrc)
991 991 srcexists = False
992 992 else:
993 993 ui.warn(_('%s: cannot copy - %s\n') %
994 994 (relsrc, inst.strerror))
995 995 return True # report a failure
996 996
997 997 if ui.verbose or not exact:
998 998 if rename:
999 999 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1000 1000 else:
1001 1001 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1002 1002
1003 1003 targets[abstarget] = abssrc
1004 1004
1005 1005 # fix up dirstate
1006 1006 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1007 1007 dryrun=dryrun, cwd=cwd)
1008 1008 if rename and not dryrun:
1009 1009 if not after and srcexists and not samefile:
1010 1010 repo.wvfs.unlinkpath(abssrc)
1011 1011 wctx.forget([abssrc])
1012 1012
1013 1013 # pat: ossep
1014 1014 # dest ossep
1015 1015 # srcs: list of (hgsep, hgsep, ossep, bool)
1016 1016 # return: function that takes hgsep and returns ossep
1017 1017 def targetpathfn(pat, dest, srcs):
1018 1018 if os.path.isdir(pat):
1019 1019 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1020 1020 abspfx = util.localpath(abspfx)
1021 1021 if destdirexists:
1022 1022 striplen = len(os.path.split(abspfx)[0])
1023 1023 else:
1024 1024 striplen = len(abspfx)
1025 1025 if striplen:
1026 1026 striplen += len(pycompat.ossep)
1027 1027 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1028 1028 elif destdirexists:
1029 1029 res = lambda p: os.path.join(dest,
1030 1030 os.path.basename(util.localpath(p)))
1031 1031 else:
1032 1032 res = lambda p: dest
1033 1033 return res
1034 1034
1035 1035 # pat: ossep
1036 1036 # dest ossep
1037 1037 # srcs: list of (hgsep, hgsep, ossep, bool)
1038 1038 # return: function that takes hgsep and returns ossep
1039 1039 def targetpathafterfn(pat, dest, srcs):
1040 1040 if matchmod.patkind(pat):
1041 1041 # a mercurial pattern
1042 1042 res = lambda p: os.path.join(dest,
1043 1043 os.path.basename(util.localpath(p)))
1044 1044 else:
1045 1045 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1046 1046 if len(abspfx) < len(srcs[0][0]):
1047 1047 # A directory. Either the target path contains the last
1048 1048 # component of the source path or it does not.
1049 1049 def evalpath(striplen):
1050 1050 score = 0
1051 1051 for s in srcs:
1052 1052 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1053 1053 if os.path.lexists(t):
1054 1054 score += 1
1055 1055 return score
1056 1056
1057 1057 abspfx = util.localpath(abspfx)
1058 1058 striplen = len(abspfx)
1059 1059 if striplen:
1060 1060 striplen += len(pycompat.ossep)
1061 1061 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1062 1062 score = evalpath(striplen)
1063 1063 striplen1 = len(os.path.split(abspfx)[0])
1064 1064 if striplen1:
1065 1065 striplen1 += len(pycompat.ossep)
1066 1066 if evalpath(striplen1) > score:
1067 1067 striplen = striplen1
1068 1068 res = lambda p: os.path.join(dest,
1069 1069 util.localpath(p)[striplen:])
1070 1070 else:
1071 1071 # a file
1072 1072 if destdirexists:
1073 1073 res = lambda p: os.path.join(dest,
1074 1074 os.path.basename(util.localpath(p)))
1075 1075 else:
1076 1076 res = lambda p: dest
1077 1077 return res
1078 1078
1079 1079 pats = scmutil.expandpats(pats)
1080 1080 if not pats:
1081 1081 raise error.Abort(_('no source or destination specified'))
1082 1082 if len(pats) == 1:
1083 1083 raise error.Abort(_('no destination specified'))
1084 1084 dest = pats.pop()
1085 1085 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1086 1086 if not destdirexists:
1087 1087 if len(pats) > 1 or matchmod.patkind(pats[0]):
1088 1088 raise error.Abort(_('with multiple sources, destination must be an '
1089 1089 'existing directory'))
1090 1090 if util.endswithsep(dest):
1091 1091 raise error.Abort(_('destination %s is not a directory') % dest)
1092 1092
1093 1093 tfn = targetpathfn
1094 1094 if after:
1095 1095 tfn = targetpathafterfn
1096 1096 copylist = []
1097 1097 for pat in pats:
1098 1098 srcs = walkpat(pat)
1099 1099 if not srcs:
1100 1100 continue
1101 1101 copylist.append((tfn(pat, dest, srcs), srcs))
1102 1102 if not copylist:
1103 1103 raise error.Abort(_('no files to copy'))
1104 1104
1105 1105 errors = 0
1106 1106 for targetpath, srcs in copylist:
1107 1107 for abssrc, relsrc, exact in srcs:
1108 1108 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1109 1109 errors += 1
1110 1110
1111 1111 if errors:
1112 1112 ui.warn(_('(consider using --after)\n'))
1113 1113
1114 1114 return errors != 0
1115 1115
1116 1116 ## facility to let extension process additional data into an import patch
1117 1117 # list of identifier to be executed in order
1118 1118 extrapreimport = [] # run before commit
1119 1119 extrapostimport = [] # run after commit
1120 1120 # mapping from identifier to actual import function
1121 1121 #
1122 1122 # 'preimport' are run before the commit is made and are provided the following
1123 1123 # arguments:
1124 1124 # - repo: the localrepository instance,
1125 1125 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1126 1126 # - extra: the future extra dictionary of the changeset, please mutate it,
1127 1127 # - opts: the import options.
1128 1128 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1129 1129 # mutation of in memory commit and more. Feel free to rework the code to get
1130 1130 # there.
1131 1131 extrapreimportmap = {}
1132 1132 # 'postimport' are run after the commit is made and are provided the following
1133 1133 # argument:
1134 1134 # - ctx: the changectx created by import.
1135 1135 extrapostimportmap = {}
1136 1136
1137 1137 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1138 1138 """Utility function used by commands.import to import a single patch
1139 1139
1140 1140 This function is explicitly defined here to help the evolve extension to
1141 1141 wrap this part of the import logic.
1142 1142
1143 1143 The API is currently a bit ugly because it a simple code translation from
1144 1144 the import command. Feel free to make it better.
1145 1145
1146 1146 :hunk: a patch (as a binary string)
1147 1147 :parents: nodes that will be parent of the created commit
1148 1148 :opts: the full dict of option passed to the import command
1149 1149 :msgs: list to save commit message to.
1150 1150 (used in case we need to save it when failing)
1151 1151 :updatefunc: a function that update a repo to a given node
1152 1152 updatefunc(<repo>, <node>)
1153 1153 """
1154 1154 # avoid cycle context -> subrepo -> cmdutil
1155 1155 from . import context
1156 1156 extractdata = patch.extract(ui, hunk)
1157 1157 tmpname = extractdata.get('filename')
1158 1158 message = extractdata.get('message')
1159 1159 user = opts.get('user') or extractdata.get('user')
1160 1160 date = opts.get('date') or extractdata.get('date')
1161 1161 branch = extractdata.get('branch')
1162 1162 nodeid = extractdata.get('nodeid')
1163 1163 p1 = extractdata.get('p1')
1164 1164 p2 = extractdata.get('p2')
1165 1165
1166 1166 nocommit = opts.get('no_commit')
1167 1167 importbranch = opts.get('import_branch')
1168 1168 update = not opts.get('bypass')
1169 1169 strip = opts["strip"]
1170 1170 prefix = opts["prefix"]
1171 1171 sim = float(opts.get('similarity') or 0)
1172 1172 if not tmpname:
1173 1173 return (None, None, False)
1174 1174
1175 1175 rejects = False
1176 1176
1177 1177 try:
1178 1178 cmdline_message = logmessage(ui, opts)
1179 1179 if cmdline_message:
1180 1180 # pickup the cmdline msg
1181 1181 message = cmdline_message
1182 1182 elif message:
1183 1183 # pickup the patch msg
1184 1184 message = message.strip()
1185 1185 else:
1186 1186 # launch the editor
1187 1187 message = None
1188 1188 ui.debug('message:\n%s\n' % message)
1189 1189
1190 1190 if len(parents) == 1:
1191 1191 parents.append(repo[nullid])
1192 1192 if opts.get('exact'):
1193 1193 if not nodeid or not p1:
1194 1194 raise error.Abort(_('not a Mercurial patch'))
1195 1195 p1 = repo[p1]
1196 1196 p2 = repo[p2 or nullid]
1197 1197 elif p2:
1198 1198 try:
1199 1199 p1 = repo[p1]
1200 1200 p2 = repo[p2]
1201 1201 # Without any options, consider p2 only if the
1202 1202 # patch is being applied on top of the recorded
1203 1203 # first parent.
1204 1204 if p1 != parents[0]:
1205 1205 p1 = parents[0]
1206 1206 p2 = repo[nullid]
1207 1207 except error.RepoError:
1208 1208 p1, p2 = parents
1209 1209 if p2.node() == nullid:
1210 1210 ui.warn(_("warning: import the patch as a normal revision\n"
1211 1211 "(use --exact to import the patch as a merge)\n"))
1212 1212 else:
1213 1213 p1, p2 = parents
1214 1214
1215 1215 n = None
1216 1216 if update:
1217 1217 if p1 != parents[0]:
1218 1218 updatefunc(repo, p1.node())
1219 1219 if p2 != parents[1]:
1220 1220 repo.setparents(p1.node(), p2.node())
1221 1221
1222 1222 if opts.get('exact') or importbranch:
1223 1223 repo.dirstate.setbranch(branch or 'default')
1224 1224
1225 1225 partial = opts.get('partial', False)
1226 1226 files = set()
1227 1227 try:
1228 1228 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1229 1229 files=files, eolmode=None, similarity=sim / 100.0)
1230 1230 except patch.PatchError as e:
1231 1231 if not partial:
1232 1232 raise error.Abort(str(e))
1233 1233 if partial:
1234 1234 rejects = True
1235 1235
1236 1236 files = list(files)
1237 1237 if nocommit:
1238 1238 if message:
1239 1239 msgs.append(message)
1240 1240 else:
1241 1241 if opts.get('exact') or p2:
1242 1242 # If you got here, you either use --force and know what
1243 1243 # you are doing or used --exact or a merge patch while
1244 1244 # being updated to its first parent.
1245 1245 m = None
1246 1246 else:
1247 1247 m = scmutil.matchfiles(repo, files or [])
1248 1248 editform = mergeeditform(repo[None], 'import.normal')
1249 1249 if opts.get('exact'):
1250 1250 editor = None
1251 1251 else:
1252 1252 editor = getcommiteditor(editform=editform, **opts)
1253 1253 extra = {}
1254 1254 for idfunc in extrapreimport:
1255 1255 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1256 1256 overrides = {}
1257 1257 if partial:
1258 1258 overrides[('ui', 'allowemptycommit')] = True
1259 1259 with repo.ui.configoverride(overrides, 'import'):
1260 1260 n = repo.commit(message, user,
1261 1261 date, match=m,
1262 1262 editor=editor, extra=extra)
1263 1263 for idfunc in extrapostimport:
1264 1264 extrapostimportmap[idfunc](repo[n])
1265 1265 else:
1266 1266 if opts.get('exact') or importbranch:
1267 1267 branch = branch or 'default'
1268 1268 else:
1269 1269 branch = p1.branch()
1270 1270 store = patch.filestore()
1271 1271 try:
1272 1272 files = set()
1273 1273 try:
1274 1274 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1275 1275 files, eolmode=None)
1276 1276 except patch.PatchError as e:
1277 1277 raise error.Abort(str(e))
1278 1278 if opts.get('exact'):
1279 1279 editor = None
1280 1280 else:
1281 1281 editor = getcommiteditor(editform='import.bypass')
1282 1282 memctx = context.memctx(repo, (p1.node(), p2.node()),
1283 1283 message,
1284 1284 files=files,
1285 1285 filectxfn=store,
1286 1286 user=user,
1287 1287 date=date,
1288 1288 branch=branch,
1289 1289 editor=editor)
1290 1290 n = memctx.commit()
1291 1291 finally:
1292 1292 store.close()
1293 1293 if opts.get('exact') and nocommit:
1294 1294 # --exact with --no-commit is still useful in that it does merge
1295 1295 # and branch bits
1296 1296 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1297 1297 elif opts.get('exact') and hex(n) != nodeid:
1298 1298 raise error.Abort(_('patch is damaged or loses information'))
1299 1299 msg = _('applied to working directory')
1300 1300 if n:
1301 1301 # i18n: refers to a short changeset id
1302 1302 msg = _('created %s') % short(n)
1303 1303 return (msg, n, rejects)
1304 1304 finally:
1305 1305 os.unlink(tmpname)
1306 1306
1307 1307 # facility to let extensions include additional data in an exported patch
1308 1308 # list of identifiers to be executed in order
1309 1309 extraexport = []
1310 1310 # mapping from identifier to actual export function
1311 1311 # function as to return a string to be added to the header or None
1312 1312 # it is given two arguments (sequencenumber, changectx)
1313 1313 extraexportmap = {}
1314 1314
1315 1315 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1316 1316 node = scmutil.binnode(ctx)
1317 1317 parents = [p.node() for p in ctx.parents() if p]
1318 1318 branch = ctx.branch()
1319 1319 if switch_parent:
1320 1320 parents.reverse()
1321 1321
1322 1322 if parents:
1323 1323 prev = parents[0]
1324 1324 else:
1325 1325 prev = nullid
1326 1326
1327 1327 write("# HG changeset patch\n")
1328 1328 write("# User %s\n" % ctx.user())
1329 1329 write("# Date %d %d\n" % ctx.date())
1330 1330 write("# %s\n" % util.datestr(ctx.date()))
1331 1331 if branch and branch != 'default':
1332 1332 write("# Branch %s\n" % branch)
1333 1333 write("# Node ID %s\n" % hex(node))
1334 1334 write("# Parent %s\n" % hex(prev))
1335 1335 if len(parents) > 1:
1336 1336 write("# Parent %s\n" % hex(parents[1]))
1337 1337
1338 1338 for headerid in extraexport:
1339 1339 header = extraexportmap[headerid](seqno, ctx)
1340 1340 if header is not None:
1341 1341 write('# %s\n' % header)
1342 1342 write(ctx.description().rstrip())
1343 1343 write("\n\n")
1344 1344
1345 1345 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1346 1346 write(chunk, label=label)
1347 1347
1348 1348 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1349 1349 opts=None, match=None):
1350 1350 '''export changesets as hg patches
1351 1351
1352 1352 Args:
1353 1353 repo: The repository from which we're exporting revisions.
1354 1354 revs: A list of revisions to export as revision numbers.
1355 1355 fntemplate: An optional string to use for generating patch file names.
1356 1356 fp: An optional file-like object to which patches should be written.
1357 1357 switch_parent: If True, show diffs against second parent when not nullid.
1358 1358 Default is false, which always shows diff against p1.
1359 1359 opts: diff options to use for generating the patch.
1360 1360 match: If specified, only export changes to files matching this matcher.
1361 1361
1362 1362 Returns:
1363 1363 Nothing.
1364 1364
1365 1365 Side Effect:
1366 1366 "HG Changeset Patch" data is emitted to one of the following
1367 1367 destinations:
1368 1368 fp is specified: All revs are written to the specified
1369 1369 file-like object.
1370 1370 fntemplate specified: Each rev is written to a unique file named using
1371 1371 the given template.
1372 1372 Neither fp nor template specified: All revs written to repo.ui.write()
1373 1373 '''
1374 1374
1375 1375 total = len(revs)
1376 1376 revwidth = max(len(str(rev)) for rev in revs)
1377 1377 filemode = {}
1378 1378
1379 1379 write = None
1380 1380 dest = '<unnamed>'
1381 1381 if fp:
1382 1382 dest = getattr(fp, 'name', dest)
1383 1383 def write(s, **kw):
1384 1384 fp.write(s)
1385 1385 elif not fntemplate:
1386 1386 write = repo.ui.write
1387 1387
1388 1388 for seqno, rev in enumerate(revs, 1):
1389 1389 ctx = repo[rev]
1390 1390 fo = None
1391 1391 if not fp and fntemplate:
1392 1392 desc_lines = ctx.description().rstrip().split('\n')
1393 1393 desc = desc_lines[0] #Commit always has a first line.
1394 1394 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1395 1395 total=total, seqno=seqno, revwidth=revwidth,
1396 1396 mode='wb', modemap=filemode)
1397 1397 dest = fo.name
1398 1398 def write(s, **kw):
1399 1399 fo.write(s)
1400 1400 if not dest.startswith('<'):
1401 1401 repo.ui.note("%s\n" % dest)
1402 1402 _exportsingle(
1403 1403 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1404 1404 if fo is not None:
1405 1405 fo.close()
1406 1406
1407 1407 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1408 1408 changes=None, stat=False, fp=None, prefix='',
1409 1409 root='', listsubrepos=False):
1410 1410 '''show diff or diffstat.'''
1411 1411 if fp is None:
1412 1412 write = ui.write
1413 1413 else:
1414 1414 def write(s, **kw):
1415 1415 fp.write(s)
1416 1416
1417 1417 if root:
1418 1418 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1419 1419 else:
1420 1420 relroot = ''
1421 1421 if relroot != '':
1422 1422 # XXX relative roots currently don't work if the root is within a
1423 1423 # subrepo
1424 1424 uirelroot = match.uipath(relroot)
1425 1425 relroot += '/'
1426 1426 for matchroot in match.files():
1427 1427 if not matchroot.startswith(relroot):
1428 1428 ui.warn(_('warning: %s not inside relative root %s\n') % (
1429 1429 match.uipath(matchroot), uirelroot))
1430 1430
1431 1431 if stat:
1432 1432 diffopts = diffopts.copy(context=0)
1433 1433 width = 80
1434 1434 if not ui.plain():
1435 1435 width = ui.termwidth()
1436 1436 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1437 1437 prefix=prefix, relroot=relroot)
1438 1438 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1439 1439 width=width):
1440 1440 write(chunk, label=label)
1441 1441 else:
1442 1442 for chunk, label in patch.diffui(repo, node1, node2, match,
1443 1443 changes, diffopts, prefix=prefix,
1444 1444 relroot=relroot):
1445 1445 write(chunk, label=label)
1446 1446
1447 1447 if listsubrepos:
1448 1448 ctx1 = repo[node1]
1449 1449 ctx2 = repo[node2]
1450 1450 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1451 1451 tempnode2 = node2
1452 1452 try:
1453 1453 if node2 is not None:
1454 1454 tempnode2 = ctx2.substate[subpath][1]
1455 1455 except KeyError:
1456 1456 # A subrepo that existed in node1 was deleted between node1 and
1457 1457 # node2 (inclusive). Thus, ctx2's substate won't contain that
1458 1458 # subpath. The best we can do is to ignore it.
1459 1459 tempnode2 = None
1460 1460 submatch = matchmod.subdirmatcher(subpath, match)
1461 1461 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1462 1462 stat=stat, fp=fp, prefix=prefix)
1463 1463
1464 1464 def _changesetlabels(ctx):
1465 1465 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1466 1466 if ctx.obsolete():
1467 1467 labels.append('changeset.obsolete')
1468 1468 if ctx.troubled():
1469 1469 labels.append('changeset.troubled')
1470 1470 for trouble in ctx.troubles():
1471 1471 labels.append('trouble.%s' % trouble)
1472 1472 return ' '.join(labels)
1473 1473
1474 1474 class changeset_printer(object):
1475 1475 '''show changeset information when templating not requested.'''
1476 1476
1477 1477 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1478 1478 self.ui = ui
1479 1479 self.repo = repo
1480 1480 self.buffered = buffered
1481 1481 self.matchfn = matchfn
1482 1482 self.diffopts = diffopts
1483 1483 self.header = {}
1484 1484 self.hunk = {}
1485 1485 self.lastheader = None
1486 1486 self.footer = None
1487 1487
1488 1488 def flush(self, ctx):
1489 1489 rev = ctx.rev()
1490 1490 if rev in self.header:
1491 1491 h = self.header[rev]
1492 1492 if h != self.lastheader:
1493 1493 self.lastheader = h
1494 1494 self.ui.write(h)
1495 1495 del self.header[rev]
1496 1496 if rev in self.hunk:
1497 1497 self.ui.write(self.hunk[rev])
1498 1498 del self.hunk[rev]
1499 1499 return 1
1500 1500 return 0
1501 1501
1502 1502 def close(self):
1503 1503 if self.footer:
1504 1504 self.ui.write(self.footer)
1505 1505
1506 1506 def show(self, ctx, copies=None, matchfn=None, **props):
1507 1507 props = pycompat.byteskwargs(props)
1508 1508 if self.buffered:
1509 1509 self.ui.pushbuffer(labeled=True)
1510 1510 self._show(ctx, copies, matchfn, props)
1511 1511 self.hunk[ctx.rev()] = self.ui.popbuffer()
1512 1512 else:
1513 1513 self._show(ctx, copies, matchfn, props)
1514 1514
1515 1515 def _show(self, ctx, copies, matchfn, props):
1516 1516 '''show a single changeset or file revision'''
1517 1517 changenode = ctx.node()
1518 1518 rev = ctx.rev()
1519 1519 if self.ui.debugflag:
1520 1520 hexfunc = hex
1521 1521 else:
1522 1522 hexfunc = short
1523 1523 # as of now, wctx.node() and wctx.rev() return None, but we want to
1524 1524 # show the same values as {node} and {rev} templatekw
1525 1525 revnode = (scmutil.intrev(ctx), hexfunc(scmutil.binnode(ctx)))
1526 1526
1527 1527 if self.ui.quiet:
1528 1528 self.ui.write("%d:%s\n" % revnode, label='log.node')
1529 1529 return
1530 1530
1531 1531 date = util.datestr(ctx.date())
1532 1532
1533 1533 # i18n: column positioning for "hg log"
1534 1534 self.ui.write(_("changeset: %d:%s\n") % revnode,
1535 1535 label=_changesetlabels(ctx))
1536 1536
1537 1537 # branches are shown first before any other names due to backwards
1538 1538 # compatibility
1539 1539 branch = ctx.branch()
1540 1540 # don't show the default branch name
1541 1541 if branch != 'default':
1542 1542 # i18n: column positioning for "hg log"
1543 1543 self.ui.write(_("branch: %s\n") % branch,
1544 1544 label='log.branch')
1545 1545
1546 1546 for nsname, ns in self.repo.names.iteritems():
1547 1547 # branches has special logic already handled above, so here we just
1548 1548 # skip it
1549 1549 if nsname == 'branches':
1550 1550 continue
1551 1551 # we will use the templatename as the color name since those two
1552 1552 # should be the same
1553 1553 for name in ns.names(self.repo, changenode):
1554 1554 self.ui.write(ns.logfmt % name,
1555 1555 label='log.%s' % ns.colorname)
1556 1556 if self.ui.debugflag:
1557 1557 # i18n: column positioning for "hg log"
1558 1558 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1559 1559 label='log.phase')
1560 1560 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1561 1561 label = 'log.parent changeset.%s' % pctx.phasestr()
1562 1562 # i18n: column positioning for "hg log"
1563 1563 self.ui.write(_("parent: %d:%s\n")
1564 1564 % (pctx.rev(), hexfunc(pctx.node())),
1565 1565 label=label)
1566 1566
1567 1567 if self.ui.debugflag and rev is not None:
1568 1568 mnode = ctx.manifestnode()
1569 1569 # i18n: column positioning for "hg log"
1570 1570 self.ui.write(_("manifest: %d:%s\n") %
1571 1571 (self.repo.manifestlog._revlog.rev(mnode),
1572 1572 hex(mnode)),
1573 1573 label='ui.debug log.manifest')
1574 1574 # i18n: column positioning for "hg log"
1575 1575 self.ui.write(_("user: %s\n") % ctx.user(),
1576 1576 label='log.user')
1577 1577 # i18n: column positioning for "hg log"
1578 1578 self.ui.write(_("date: %s\n") % date,
1579 1579 label='log.date')
1580 1580
1581 1581 if ctx.troubled():
1582 1582 # i18n: column positioning for "hg log"
1583 1583 self.ui.write(_("trouble: %s\n") % ', '.join(ctx.troubles()),
1584 1584 label='log.trouble')
1585 1585
1586 1586 self._exthook(ctx)
1587 1587
1588 1588 if self.ui.debugflag:
1589 1589 files = ctx.p1().status(ctx)[:3]
1590 1590 for key, value in zip([# i18n: column positioning for "hg log"
1591 1591 _("files:"),
1592 1592 # i18n: column positioning for "hg log"
1593 1593 _("files+:"),
1594 1594 # i18n: column positioning for "hg log"
1595 1595 _("files-:")], files):
1596 1596 if value:
1597 1597 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1598 1598 label='ui.debug log.files')
1599 1599 elif ctx.files() and self.ui.verbose:
1600 1600 # i18n: column positioning for "hg log"
1601 1601 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1602 1602 label='ui.note log.files')
1603 1603 if copies and self.ui.verbose:
1604 1604 copies = ['%s (%s)' % c for c in copies]
1605 1605 # i18n: column positioning for "hg log"
1606 1606 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1607 1607 label='ui.note log.copies')
1608 1608
1609 1609 extra = ctx.extra()
1610 1610 if extra and self.ui.debugflag:
1611 1611 for key, value in sorted(extra.items()):
1612 1612 # i18n: column positioning for "hg log"
1613 1613 self.ui.write(_("extra: %s=%s\n")
1614 1614 % (key, util.escapestr(value)),
1615 1615 label='ui.debug log.extra')
1616 1616
1617 1617 description = ctx.description().strip()
1618 1618 if description:
1619 1619 if self.ui.verbose:
1620 1620 self.ui.write(_("description:\n"),
1621 1621 label='ui.note log.description')
1622 1622 self.ui.write(description,
1623 1623 label='ui.note log.description')
1624 1624 self.ui.write("\n\n")
1625 1625 else:
1626 1626 # i18n: column positioning for "hg log"
1627 1627 self.ui.write(_("summary: %s\n") %
1628 1628 description.splitlines()[0],
1629 1629 label='log.summary')
1630 1630 self.ui.write("\n")
1631 1631
1632 1632 self.showpatch(ctx, matchfn)
1633 1633
1634 1634 def _exthook(self, ctx):
1635 1635 '''empty method used by extension as a hook point
1636 1636 '''
1637 1637 pass
1638 1638
1639 1639 def showpatch(self, ctx, matchfn):
1640 1640 if not matchfn:
1641 1641 matchfn = self.matchfn
1642 1642 if matchfn:
1643 1643 stat = self.diffopts.get('stat')
1644 1644 diff = self.diffopts.get('patch')
1645 1645 diffopts = patch.diffallopts(self.ui, self.diffopts)
1646 1646 node = ctx.node()
1647 1647 prev = ctx.p1().node()
1648 1648 if stat:
1649 1649 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1650 1650 match=matchfn, stat=True)
1651 1651 if diff:
1652 1652 if stat:
1653 1653 self.ui.write("\n")
1654 1654 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1655 1655 match=matchfn, stat=False)
1656 1656 self.ui.write("\n")
1657 1657
1658 1658 class jsonchangeset(changeset_printer):
1659 1659 '''format changeset information.'''
1660 1660
1661 1661 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1662 1662 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1663 1663 self.cache = {}
1664 1664 self._first = True
1665 1665
1666 1666 def close(self):
1667 1667 if not self._first:
1668 1668 self.ui.write("\n]\n")
1669 1669 else:
1670 1670 self.ui.write("[]\n")
1671 1671
1672 1672 def _show(self, ctx, copies, matchfn, props):
1673 1673 '''show a single changeset or file revision'''
1674 1674 rev = ctx.rev()
1675 1675 if rev is None:
1676 1676 jrev = jnode = 'null'
1677 1677 else:
1678 1678 jrev = '%d' % rev
1679 1679 jnode = '"%s"' % hex(ctx.node())
1680 1680 j = encoding.jsonescape
1681 1681
1682 1682 if self._first:
1683 1683 self.ui.write("[\n {")
1684 1684 self._first = False
1685 1685 else:
1686 1686 self.ui.write(",\n {")
1687 1687
1688 1688 if self.ui.quiet:
1689 1689 self.ui.write(('\n "rev": %s') % jrev)
1690 1690 self.ui.write((',\n "node": %s') % jnode)
1691 1691 self.ui.write('\n }')
1692 1692 return
1693 1693
1694 1694 self.ui.write(('\n "rev": %s') % jrev)
1695 1695 self.ui.write((',\n "node": %s') % jnode)
1696 1696 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1697 1697 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1698 1698 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1699 1699 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1700 1700 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1701 1701
1702 1702 self.ui.write((',\n "bookmarks": [%s]') %
1703 1703 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1704 1704 self.ui.write((',\n "tags": [%s]') %
1705 1705 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1706 1706 self.ui.write((',\n "parents": [%s]') %
1707 1707 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1708 1708
1709 1709 if self.ui.debugflag:
1710 1710 if rev is None:
1711 1711 jmanifestnode = 'null'
1712 1712 else:
1713 1713 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1714 1714 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1715 1715
1716 1716 self.ui.write((',\n "extra": {%s}') %
1717 1717 ", ".join('"%s": "%s"' % (j(k), j(v))
1718 1718 for k, v in ctx.extra().items()))
1719 1719
1720 1720 files = ctx.p1().status(ctx)
1721 1721 self.ui.write((',\n "modified": [%s]') %
1722 1722 ", ".join('"%s"' % j(f) for f in files[0]))
1723 1723 self.ui.write((',\n "added": [%s]') %
1724 1724 ", ".join('"%s"' % j(f) for f in files[1]))
1725 1725 self.ui.write((',\n "removed": [%s]') %
1726 1726 ", ".join('"%s"' % j(f) for f in files[2]))
1727 1727
1728 1728 elif self.ui.verbose:
1729 1729 self.ui.write((',\n "files": [%s]') %
1730 1730 ", ".join('"%s"' % j(f) for f in ctx.files()))
1731 1731
1732 1732 if copies:
1733 1733 self.ui.write((',\n "copies": {%s}') %
1734 1734 ", ".join('"%s": "%s"' % (j(k), j(v))
1735 1735 for k, v in copies))
1736 1736
1737 1737 matchfn = self.matchfn
1738 1738 if matchfn:
1739 1739 stat = self.diffopts.get('stat')
1740 1740 diff = self.diffopts.get('patch')
1741 1741 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1742 1742 node, prev = ctx.node(), ctx.p1().node()
1743 1743 if stat:
1744 1744 self.ui.pushbuffer()
1745 1745 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1746 1746 match=matchfn, stat=True)
1747 1747 self.ui.write((',\n "diffstat": "%s"')
1748 1748 % j(self.ui.popbuffer()))
1749 1749 if diff:
1750 1750 self.ui.pushbuffer()
1751 1751 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1752 1752 match=matchfn, stat=False)
1753 1753 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1754 1754
1755 1755 self.ui.write("\n }")
1756 1756
1757 1757 class changeset_templater(changeset_printer):
1758 1758 '''format changeset information.'''
1759 1759
1760 1760 # Arguments before "buffered" used to be positional. Consider not
1761 1761 # adding/removing arguments before "buffered" to not break callers.
1762 1762 def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
1763 1763 buffered=False):
1764 1764 diffopts = diffopts or {}
1765 1765
1766 1766 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1767 1767 self.t = formatter.loadtemplater(ui, tmplspec,
1768 1768 cache=templatekw.defaulttempl)
1769 1769 self._counter = itertools.count()
1770 1770 self.cache = {}
1771 1771
1772 1772 self._tref = tmplspec.ref
1773 1773 self._parts = {'header': '', 'footer': '',
1774 1774 tmplspec.ref: tmplspec.ref,
1775 1775 'docheader': '', 'docfooter': '',
1776 1776 'separator': ''}
1777 1777 if tmplspec.mapfile:
1778 1778 # find correct templates for current mode, for backward
1779 1779 # compatibility with 'log -v/-q/--debug' using a mapfile
1780 1780 tmplmodes = [
1781 1781 (True, ''),
1782 1782 (self.ui.verbose, '_verbose'),
1783 1783 (self.ui.quiet, '_quiet'),
1784 1784 (self.ui.debugflag, '_debug'),
1785 1785 ]
1786 1786 for mode, postfix in tmplmodes:
1787 1787 for t in self._parts:
1788 1788 cur = t + postfix
1789 1789 if mode and cur in self.t:
1790 1790 self._parts[t] = cur
1791 1791 else:
1792 1792 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
1793 1793 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
1794 1794 self._parts.update(m)
1795 1795
1796 1796 if self._parts['docheader']:
1797 1797 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1798 1798
1799 1799 def close(self):
1800 1800 if self._parts['docfooter']:
1801 1801 if not self.footer:
1802 1802 self.footer = ""
1803 1803 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1804 1804 return super(changeset_templater, self).close()
1805 1805
1806 1806 def _show(self, ctx, copies, matchfn, props):
1807 1807 '''show a single changeset or file revision'''
1808 1808 props = props.copy()
1809 1809 props.update(templatekw.keywords)
1810 1810 props['templ'] = self.t
1811 1811 props['ctx'] = ctx
1812 1812 props['repo'] = self.repo
1813 1813 props['ui'] = self.repo.ui
1814 1814 props['index'] = index = next(self._counter)
1815 1815 props['revcache'] = {'copies': copies}
1816 1816 props['cache'] = self.cache
1817 1817 props = pycompat.strkwargs(props)
1818 1818
1819 1819 # write separator, which wouldn't work well with the header part below
1820 1820 # since there's inherently a conflict between header (across items) and
1821 1821 # separator (per item)
1822 1822 if self._parts['separator'] and index > 0:
1823 1823 self.ui.write(templater.stringify(self.t(self._parts['separator'])))
1824 1824
1825 1825 # write header
1826 1826 if self._parts['header']:
1827 1827 h = templater.stringify(self.t(self._parts['header'], **props))
1828 1828 if self.buffered:
1829 1829 self.header[ctx.rev()] = h
1830 1830 else:
1831 1831 if self.lastheader != h:
1832 1832 self.lastheader = h
1833 1833 self.ui.write(h)
1834 1834
1835 1835 # write changeset metadata, then patch if requested
1836 1836 key = self._parts[self._tref]
1837 1837 self.ui.write(templater.stringify(self.t(key, **props)))
1838 1838 self.showpatch(ctx, matchfn)
1839 1839
1840 1840 if self._parts['footer']:
1841 1841 if not self.footer:
1842 1842 self.footer = templater.stringify(
1843 1843 self.t(self._parts['footer'], **props))
1844 1844
1845 1845 def logtemplatespec(tmpl, mapfile):
1846 1846 if mapfile:
1847 1847 return formatter.templatespec('changeset', tmpl, mapfile)
1848 1848 else:
1849 1849 return formatter.templatespec('', tmpl, None)
1850 1850
1851 1851 def _lookuplogtemplate(ui, tmpl, style):
1852 1852 """Find the template matching the given template spec or style
1853 1853
1854 1854 See formatter.lookuptemplate() for details.
1855 1855 """
1856 1856
1857 1857 # ui settings
1858 1858 if not tmpl and not style: # template are stronger than style
1859 1859 tmpl = ui.config('ui', 'logtemplate')
1860 1860 if tmpl:
1861 1861 return logtemplatespec(templater.unquotestring(tmpl), None)
1862 1862 else:
1863 1863 style = util.expandpath(ui.config('ui', 'style'))
1864 1864
1865 1865 if not tmpl and style:
1866 1866 mapfile = style
1867 1867 if not os.path.split(mapfile)[0]:
1868 1868 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1869 1869 or templater.templatepath(mapfile))
1870 1870 if mapname:
1871 1871 mapfile = mapname
1872 1872 return logtemplatespec(None, mapfile)
1873 1873
1874 1874 if not tmpl:
1875 1875 return logtemplatespec(None, None)
1876 1876
1877 1877 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1878 1878
1879 1879 def makelogtemplater(ui, repo, tmpl, buffered=False):
1880 1880 """Create a changeset_templater from a literal template 'tmpl'"""
1881 1881 spec = logtemplatespec(tmpl, None)
1882 1882 return changeset_templater(ui, repo, spec, buffered=buffered)
1883 1883
1884 1884 def show_changeset(ui, repo, opts, buffered=False):
1885 1885 """show one changeset using template or regular display.
1886 1886
1887 1887 Display format will be the first non-empty hit of:
1888 1888 1. option 'template'
1889 1889 2. option 'style'
1890 1890 3. [ui] setting 'logtemplate'
1891 1891 4. [ui] setting 'style'
1892 1892 If all of these values are either the unset or the empty string,
1893 1893 regular display via changeset_printer() is done.
1894 1894 """
1895 1895 # options
1896 1896 matchfn = None
1897 1897 if opts.get('patch') or opts.get('stat'):
1898 1898 matchfn = scmutil.matchall(repo)
1899 1899
1900 1900 if opts.get('template') == 'json':
1901 1901 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1902 1902
1903 1903 spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
1904 1904
1905 1905 if not spec.ref and not spec.tmpl and not spec.mapfile:
1906 1906 return changeset_printer(ui, repo, matchfn, opts, buffered)
1907 1907
1908 1908 return changeset_templater(ui, repo, spec, matchfn, opts, buffered)
1909 1909
1910 1910 def showmarker(fm, marker, index=None):
1911 1911 """utility function to display obsolescence marker in a readable way
1912 1912
1913 1913 To be used by debug function."""
1914 1914 if index is not None:
1915 1915 fm.write('index', '%i ', index)
1916 1916 fm.write('precnode', '%s ', hex(marker.precnode()))
1917 1917 succs = marker.succnodes()
1918 1918 fm.condwrite(succs, 'succnodes', '%s ',
1919 1919 fm.formatlist(map(hex, succs), name='node'))
1920 1920 fm.write('flag', '%X ', marker.flags())
1921 1921 parents = marker.parentnodes()
1922 1922 if parents is not None:
1923 1923 fm.write('parentnodes', '{%s} ',
1924 1924 fm.formatlist(map(hex, parents), name='node', sep=', '))
1925 1925 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1926 1926 meta = marker.metadata().copy()
1927 1927 meta.pop('date', None)
1928 1928 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1929 1929 fm.plain('\n')
1930 1930
1931 1931 def finddate(ui, repo, date):
1932 1932 """Find the tipmost changeset that matches the given date spec"""
1933 1933
1934 1934 df = util.matchdate(date)
1935 1935 m = scmutil.matchall(repo)
1936 1936 results = {}
1937 1937
1938 1938 def prep(ctx, fns):
1939 1939 d = ctx.date()
1940 1940 if df(d[0]):
1941 1941 results[ctx.rev()] = d
1942 1942
1943 1943 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1944 1944 rev = ctx.rev()
1945 1945 if rev in results:
1946 1946 ui.status(_("found revision %s from %s\n") %
1947 1947 (rev, util.datestr(results[rev])))
1948 1948 return '%d' % rev
1949 1949
1950 1950 raise error.Abort(_("revision matching date not found"))
1951 1951
1952 1952 def increasingwindows(windowsize=8, sizelimit=512):
1953 1953 while True:
1954 1954 yield windowsize
1955 1955 if windowsize < sizelimit:
1956 1956 windowsize *= 2
1957 1957
1958 1958 class FileWalkError(Exception):
1959 1959 pass
1960 1960
1961 1961 def walkfilerevs(repo, match, follow, revs, fncache):
1962 1962 '''Walks the file history for the matched files.
1963 1963
1964 1964 Returns the changeset revs that are involved in the file history.
1965 1965
1966 1966 Throws FileWalkError if the file history can't be walked using
1967 1967 filelogs alone.
1968 1968 '''
1969 1969 wanted = set()
1970 1970 copies = []
1971 1971 minrev, maxrev = min(revs), max(revs)
1972 1972 def filerevgen(filelog, last):
1973 1973 """
1974 1974 Only files, no patterns. Check the history of each file.
1975 1975
1976 1976 Examines filelog entries within minrev, maxrev linkrev range
1977 1977 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1978 1978 tuples in backwards order
1979 1979 """
1980 1980 cl_count = len(repo)
1981 1981 revs = []
1982 1982 for j in xrange(0, last + 1):
1983 1983 linkrev = filelog.linkrev(j)
1984 1984 if linkrev < minrev:
1985 1985 continue
1986 1986 # only yield rev for which we have the changelog, it can
1987 1987 # happen while doing "hg log" during a pull or commit
1988 1988 if linkrev >= cl_count:
1989 1989 break
1990 1990
1991 1991 parentlinkrevs = []
1992 1992 for p in filelog.parentrevs(j):
1993 1993 if p != nullrev:
1994 1994 parentlinkrevs.append(filelog.linkrev(p))
1995 1995 n = filelog.node(j)
1996 1996 revs.append((linkrev, parentlinkrevs,
1997 1997 follow and filelog.renamed(n)))
1998 1998
1999 1999 return reversed(revs)
2000 2000 def iterfiles():
2001 2001 pctx = repo['.']
2002 2002 for filename in match.files():
2003 2003 if follow:
2004 2004 if filename not in pctx:
2005 2005 raise error.Abort(_('cannot follow file not in parent '
2006 2006 'revision: "%s"') % filename)
2007 2007 yield filename, pctx[filename].filenode()
2008 2008 else:
2009 2009 yield filename, None
2010 2010 for filename_node in copies:
2011 2011 yield filename_node
2012 2012
2013 2013 for file_, node in iterfiles():
2014 2014 filelog = repo.file(file_)
2015 2015 if not len(filelog):
2016 2016 if node is None:
2017 2017 # A zero count may be a directory or deleted file, so
2018 2018 # try to find matching entries on the slow path.
2019 2019 if follow:
2020 2020 raise error.Abort(
2021 2021 _('cannot follow nonexistent file: "%s"') % file_)
2022 2022 raise FileWalkError("Cannot walk via filelog")
2023 2023 else:
2024 2024 continue
2025 2025
2026 2026 if node is None:
2027 2027 last = len(filelog) - 1
2028 2028 else:
2029 2029 last = filelog.rev(node)
2030 2030
2031 2031 # keep track of all ancestors of the file
2032 2032 ancestors = {filelog.linkrev(last)}
2033 2033
2034 2034 # iterate from latest to oldest revision
2035 2035 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
2036 2036 if not follow:
2037 2037 if rev > maxrev:
2038 2038 continue
2039 2039 else:
2040 2040 # Note that last might not be the first interesting
2041 2041 # rev to us:
2042 2042 # if the file has been changed after maxrev, we'll
2043 2043 # have linkrev(last) > maxrev, and we still need
2044 2044 # to explore the file graph
2045 2045 if rev not in ancestors:
2046 2046 continue
2047 2047 # XXX insert 1327 fix here
2048 2048 if flparentlinkrevs:
2049 2049 ancestors.update(flparentlinkrevs)
2050 2050
2051 2051 fncache.setdefault(rev, []).append(file_)
2052 2052 wanted.add(rev)
2053 2053 if copied:
2054 2054 copies.append(copied)
2055 2055
2056 2056 return wanted
2057 2057
2058 2058 class _followfilter(object):
2059 2059 def __init__(self, repo, onlyfirst=False):
2060 2060 self.repo = repo
2061 2061 self.startrev = nullrev
2062 2062 self.roots = set()
2063 2063 self.onlyfirst = onlyfirst
2064 2064
2065 2065 def match(self, rev):
2066 2066 def realparents(rev):
2067 2067 if self.onlyfirst:
2068 2068 return self.repo.changelog.parentrevs(rev)[0:1]
2069 2069 else:
2070 2070 return filter(lambda x: x != nullrev,
2071 2071 self.repo.changelog.parentrevs(rev))
2072 2072
2073 2073 if self.startrev == nullrev:
2074 2074 self.startrev = rev
2075 2075 return True
2076 2076
2077 2077 if rev > self.startrev:
2078 2078 # forward: all descendants
2079 2079 if not self.roots:
2080 2080 self.roots.add(self.startrev)
2081 2081 for parent in realparents(rev):
2082 2082 if parent in self.roots:
2083 2083 self.roots.add(rev)
2084 2084 return True
2085 2085 else:
2086 2086 # backwards: all parents
2087 2087 if not self.roots:
2088 2088 self.roots.update(realparents(self.startrev))
2089 2089 if rev in self.roots:
2090 2090 self.roots.remove(rev)
2091 2091 self.roots.update(realparents(rev))
2092 2092 return True
2093 2093
2094 2094 return False
2095 2095
2096 2096 def walkchangerevs(repo, match, opts, prepare):
2097 2097 '''Iterate over files and the revs in which they changed.
2098 2098
2099 2099 Callers most commonly need to iterate backwards over the history
2100 2100 in which they are interested. Doing so has awful (quadratic-looking)
2101 2101 performance, so we use iterators in a "windowed" way.
2102 2102
2103 2103 We walk a window of revisions in the desired order. Within the
2104 2104 window, we first walk forwards to gather data, then in the desired
2105 2105 order (usually backwards) to display it.
2106 2106
2107 2107 This function returns an iterator yielding contexts. Before
2108 2108 yielding each context, the iterator will first call the prepare
2109 2109 function on each context in the window in forward order.'''
2110 2110
2111 2111 follow = opts.get('follow') or opts.get('follow_first')
2112 2112 revs = _logrevs(repo, opts)
2113 2113 if not revs:
2114 2114 return []
2115 2115 wanted = set()
2116 2116 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2117 2117 opts.get('removed'))
2118 2118 fncache = {}
2119 2119 change = repo.changectx
2120 2120
2121 2121 # First step is to fill wanted, the set of revisions that we want to yield.
2122 2122 # When it does not induce extra cost, we also fill fncache for revisions in
2123 2123 # wanted: a cache of filenames that were changed (ctx.files()) and that
2124 2124 # match the file filtering conditions.
2125 2125
2126 2126 if match.always():
2127 2127 # No files, no patterns. Display all revs.
2128 2128 wanted = revs
2129 2129 elif not slowpath:
2130 2130 # We only have to read through the filelog to find wanted revisions
2131 2131
2132 2132 try:
2133 2133 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2134 2134 except FileWalkError:
2135 2135 slowpath = True
2136 2136
2137 2137 # We decided to fall back to the slowpath because at least one
2138 2138 # of the paths was not a file. Check to see if at least one of them
2139 2139 # existed in history, otherwise simply return
2140 2140 for path in match.files():
2141 2141 if path == '.' or path in repo.store:
2142 2142 break
2143 2143 else:
2144 2144 return []
2145 2145
2146 2146 if slowpath:
2147 2147 # We have to read the changelog to match filenames against
2148 2148 # changed files
2149 2149
2150 2150 if follow:
2151 2151 raise error.Abort(_('can only follow copies/renames for explicit '
2152 2152 'filenames'))
2153 2153
2154 2154 # The slow path checks files modified in every changeset.
2155 2155 # This is really slow on large repos, so compute the set lazily.
2156 2156 class lazywantedset(object):
2157 2157 def __init__(self):
2158 2158 self.set = set()
2159 2159 self.revs = set(revs)
2160 2160
2161 2161 # No need to worry about locality here because it will be accessed
2162 2162 # in the same order as the increasing window below.
2163 2163 def __contains__(self, value):
2164 2164 if value in self.set:
2165 2165 return True
2166 2166 elif not value in self.revs:
2167 2167 return False
2168 2168 else:
2169 2169 self.revs.discard(value)
2170 2170 ctx = change(value)
2171 2171 matches = filter(match, ctx.files())
2172 2172 if matches:
2173 2173 fncache[value] = matches
2174 2174 self.set.add(value)
2175 2175 return True
2176 2176 return False
2177 2177
2178 2178 def discard(self, value):
2179 2179 self.revs.discard(value)
2180 2180 self.set.discard(value)
2181 2181
2182 2182 wanted = lazywantedset()
2183 2183
2184 2184 # it might be worthwhile to do this in the iterator if the rev range
2185 2185 # is descending and the prune args are all within that range
2186 2186 for rev in opts.get('prune', ()):
2187 2187 rev = repo[rev].rev()
2188 2188 ff = _followfilter(repo)
2189 2189 stop = min(revs[0], revs[-1])
2190 2190 for x in xrange(rev, stop - 1, -1):
2191 2191 if ff.match(x):
2192 2192 wanted = wanted - [x]
2193 2193
2194 2194 # Now that wanted is correctly initialized, we can iterate over the
2195 2195 # revision range, yielding only revisions in wanted.
2196 2196 def iterate():
2197 2197 if follow and match.always():
2198 2198 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2199 2199 def want(rev):
2200 2200 return ff.match(rev) and rev in wanted
2201 2201 else:
2202 2202 def want(rev):
2203 2203 return rev in wanted
2204 2204
2205 2205 it = iter(revs)
2206 2206 stopiteration = False
2207 2207 for windowsize in increasingwindows():
2208 2208 nrevs = []
2209 2209 for i in xrange(windowsize):
2210 2210 rev = next(it, None)
2211 2211 if rev is None:
2212 2212 stopiteration = True
2213 2213 break
2214 2214 elif want(rev):
2215 2215 nrevs.append(rev)
2216 2216 for rev in sorted(nrevs):
2217 2217 fns = fncache.get(rev)
2218 2218 ctx = change(rev)
2219 2219 if not fns:
2220 2220 def fns_generator():
2221 2221 for f in ctx.files():
2222 2222 if match(f):
2223 2223 yield f
2224 2224 fns = fns_generator()
2225 2225 prepare(ctx, fns)
2226 2226 for rev in nrevs:
2227 2227 yield change(rev)
2228 2228
2229 2229 if stopiteration:
2230 2230 break
2231 2231
2232 2232 return iterate()
2233 2233
2234 2234 def _makefollowlogfilematcher(repo, files, followfirst):
2235 2235 # When displaying a revision with --patch --follow FILE, we have
2236 2236 # to know which file of the revision must be diffed. With
2237 2237 # --follow, we want the names of the ancestors of FILE in the
2238 2238 # revision, stored in "fcache". "fcache" is populated by
2239 2239 # reproducing the graph traversal already done by --follow revset
2240 2240 # and relating revs to file names (which is not "correct" but
2241 2241 # good enough).
2242 2242 fcache = {}
2243 2243 fcacheready = [False]
2244 2244 pctx = repo['.']
2245 2245
2246 2246 def populate():
2247 2247 for fn in files:
2248 2248 fctx = pctx[fn]
2249 2249 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
2250 2250 for c in fctx.ancestors(followfirst=followfirst):
2251 2251 fcache.setdefault(c.rev(), set()).add(c.path())
2252 2252
2253 2253 def filematcher(rev):
2254 2254 if not fcacheready[0]:
2255 2255 # Lazy initialization
2256 2256 fcacheready[0] = True
2257 2257 populate()
2258 2258 return scmutil.matchfiles(repo, fcache.get(rev, []))
2259 2259
2260 2260 return filematcher
2261 2261
2262 2262 def _makenofollowlogfilematcher(repo, pats, opts):
2263 2263 '''hook for extensions to override the filematcher for non-follow cases'''
2264 2264 return None
2265 2265
2266 2266 def _makelogrevset(repo, pats, opts, revs):
2267 2267 """Return (expr, filematcher) where expr is a revset string built
2268 2268 from log options and file patterns or None. If --stat or --patch
2269 2269 are not passed filematcher is None. Otherwise it is a callable
2270 2270 taking a revision number and returning a match objects filtering
2271 2271 the files to be detailed when displaying the revision.
2272 2272 """
2273 2273 opt2revset = {
2274 2274 'no_merges': ('not merge()', None),
2275 2275 'only_merges': ('merge()', None),
2276 2276 '_ancestors': ('ancestors(%(val)s)', None),
2277 2277 '_fancestors': ('_firstancestors(%(val)s)', None),
2278 2278 '_descendants': ('descendants(%(val)s)', None),
2279 2279 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2280 2280 '_matchfiles': ('_matchfiles(%(val)s)', None),
2281 2281 'date': ('date(%(val)r)', None),
2282 2282 'branch': ('branch(%(val)r)', ' or '),
2283 2283 '_patslog': ('filelog(%(val)r)', ' or '),
2284 2284 '_patsfollow': ('follow(%(val)r)', ' or '),
2285 2285 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2286 2286 'keyword': ('keyword(%(val)r)', ' or '),
2287 2287 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2288 2288 'user': ('user(%(val)r)', ' or '),
2289 2289 }
2290 2290
2291 2291 opts = dict(opts)
2292 2292 # follow or not follow?
2293 2293 follow = opts.get('follow') or opts.get('follow_first')
2294 2294 if opts.get('follow_first'):
2295 2295 followfirst = 1
2296 2296 else:
2297 2297 followfirst = 0
2298 2298 # --follow with FILE behavior depends on revs...
2299 2299 it = iter(revs)
2300 2300 startrev = next(it)
2301 2301 followdescendants = startrev < next(it, startrev)
2302 2302
2303 2303 # branch and only_branch are really aliases and must be handled at
2304 2304 # the same time
2305 2305 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2306 2306 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2307 2307 # pats/include/exclude are passed to match.match() directly in
2308 2308 # _matchfiles() revset but walkchangerevs() builds its matcher with
2309 2309 # scmutil.match(). The difference is input pats are globbed on
2310 2310 # platforms without shell expansion (windows).
2311 2311 wctx = repo[None]
2312 2312 match, pats = scmutil.matchandpats(wctx, pats, opts)
2313 2313 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2314 2314 opts.get('removed'))
2315 2315 if not slowpath:
2316 2316 for f in match.files():
2317 2317 if follow and f not in wctx:
2318 2318 # If the file exists, it may be a directory, so let it
2319 2319 # take the slow path.
2320 2320 if os.path.exists(repo.wjoin(f)):
2321 2321 slowpath = True
2322 2322 continue
2323 2323 else:
2324 2324 raise error.Abort(_('cannot follow file not in parent '
2325 2325 'revision: "%s"') % f)
2326 2326 filelog = repo.file(f)
2327 2327 if not filelog:
2328 2328 # A zero count may be a directory or deleted file, so
2329 2329 # try to find matching entries on the slow path.
2330 2330 if follow:
2331 2331 raise error.Abort(
2332 2332 _('cannot follow nonexistent file: "%s"') % f)
2333 2333 slowpath = True
2334 2334
2335 2335 # We decided to fall back to the slowpath because at least one
2336 2336 # of the paths was not a file. Check to see if at least one of them
2337 2337 # existed in history - in that case, we'll continue down the
2338 2338 # slowpath; otherwise, we can turn off the slowpath
2339 2339 if slowpath:
2340 2340 for path in match.files():
2341 2341 if path == '.' or path in repo.store:
2342 2342 break
2343 2343 else:
2344 2344 slowpath = False
2345 2345
2346 2346 fpats = ('_patsfollow', '_patsfollowfirst')
2347 2347 fnopats = (('_ancestors', '_fancestors'),
2348 2348 ('_descendants', '_fdescendants'))
2349 2349 if slowpath:
2350 2350 # See walkchangerevs() slow path.
2351 2351 #
2352 2352 # pats/include/exclude cannot be represented as separate
2353 2353 # revset expressions as their filtering logic applies at file
2354 2354 # level. For instance "-I a -X a" matches a revision touching
2355 2355 # "a" and "b" while "file(a) and not file(b)" does
2356 2356 # not. Besides, filesets are evaluated against the working
2357 2357 # directory.
2358 2358 matchargs = ['r:', 'd:relpath']
2359 2359 for p in pats:
2360 2360 matchargs.append('p:' + p)
2361 2361 for p in opts.get('include', []):
2362 2362 matchargs.append('i:' + p)
2363 2363 for p in opts.get('exclude', []):
2364 2364 matchargs.append('x:' + p)
2365 2365 matchargs = ','.join(('%r' % p) for p in matchargs)
2366 2366 opts['_matchfiles'] = matchargs
2367 2367 if follow:
2368 2368 opts[fnopats[0][followfirst]] = '.'
2369 2369 else:
2370 2370 if follow:
2371 2371 if pats:
2372 2372 # follow() revset interprets its file argument as a
2373 2373 # manifest entry, so use match.files(), not pats.
2374 2374 opts[fpats[followfirst]] = list(match.files())
2375 2375 else:
2376 2376 op = fnopats[followdescendants][followfirst]
2377 2377 opts[op] = 'rev(%d)' % startrev
2378 2378 else:
2379 2379 opts['_patslog'] = list(pats)
2380 2380
2381 2381 filematcher = None
2382 2382 if opts.get('patch') or opts.get('stat'):
2383 2383 # When following files, track renames via a special matcher.
2384 2384 # If we're forced to take the slowpath it means we're following
2385 2385 # at least one pattern/directory, so don't bother with rename tracking.
2386 2386 if follow and not match.always() and not slowpath:
2387 2387 # _makefollowlogfilematcher expects its files argument to be
2388 2388 # relative to the repo root, so use match.files(), not pats.
2389 2389 filematcher = _makefollowlogfilematcher(repo, match.files(),
2390 2390 followfirst)
2391 2391 else:
2392 2392 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2393 2393 if filematcher is None:
2394 2394 filematcher = lambda rev: match
2395 2395
2396 2396 expr = []
2397 2397 for op, val in sorted(opts.iteritems()):
2398 2398 if not val:
2399 2399 continue
2400 2400 if op not in opt2revset:
2401 2401 continue
2402 2402 revop, andor = opt2revset[op]
2403 2403 if '%(val)' not in revop:
2404 2404 expr.append(revop)
2405 2405 else:
2406 2406 if not isinstance(val, list):
2407 2407 e = revop % {'val': val}
2408 2408 else:
2409 2409 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2410 2410 expr.append(e)
2411 2411
2412 2412 if expr:
2413 2413 expr = '(' + ' and '.join(expr) + ')'
2414 2414 else:
2415 2415 expr = None
2416 2416 return expr, filematcher
2417 2417
2418 2418 def _logrevs(repo, opts):
2419 2419 # Default --rev value depends on --follow but --follow behavior
2420 2420 # depends on revisions resolved from --rev...
2421 2421 follow = opts.get('follow') or opts.get('follow_first')
2422 2422 if opts.get('rev'):
2423 2423 revs = scmutil.revrange(repo, opts['rev'])
2424 2424 elif follow and repo.dirstate.p1() == nullid:
2425 2425 revs = smartset.baseset()
2426 2426 elif follow:
2427 2427 revs = repo.revs('reverse(:.)')
2428 2428 else:
2429 2429 revs = smartset.spanset(repo)
2430 2430 revs.reverse()
2431 2431 return revs
2432 2432
2433 2433 def getgraphlogrevs(repo, pats, opts):
2434 2434 """Return (revs, expr, filematcher) where revs is an iterable of
2435 2435 revision numbers, expr is a revset string built from log options
2436 2436 and file patterns or None, and used to filter 'revs'. If --stat or
2437 2437 --patch are not passed filematcher is None. Otherwise it is a
2438 2438 callable taking a revision number and returning a match objects
2439 2439 filtering the files to be detailed when displaying the revision.
2440 2440 """
2441 2441 limit = loglimit(opts)
2442 2442 revs = _logrevs(repo, opts)
2443 2443 if not revs:
2444 2444 return smartset.baseset(), None, None
2445 2445 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2446 2446 if opts.get('rev'):
2447 2447 # User-specified revs might be unsorted, but don't sort before
2448 2448 # _makelogrevset because it might depend on the order of revs
2449 2449 if not (revs.isdescending() or revs.istopo()):
2450 2450 revs.sort(reverse=True)
2451 2451 if expr:
2452 2452 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2453 2453 revs = matcher(repo, revs)
2454 2454 if limit is not None:
2455 2455 limitedrevs = []
2456 2456 for idx, rev in enumerate(revs):
2457 2457 if idx >= limit:
2458 2458 break
2459 2459 limitedrevs.append(rev)
2460 2460 revs = smartset.baseset(limitedrevs)
2461 2461
2462 2462 return revs, expr, filematcher
2463 2463
2464 2464 def getlogrevs(repo, pats, opts):
2465 2465 """Return (revs, expr, filematcher) where revs is an iterable of
2466 2466 revision numbers, expr is a revset string built from log options
2467 2467 and file patterns or None, and used to filter 'revs'. If --stat or
2468 2468 --patch are not passed filematcher is None. Otherwise it is a
2469 2469 callable taking a revision number and returning a match objects
2470 2470 filtering the files to be detailed when displaying the revision.
2471 2471 """
2472 2472 limit = loglimit(opts)
2473 2473 revs = _logrevs(repo, opts)
2474 2474 if not revs:
2475 2475 return smartset.baseset([]), None, None
2476 2476 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2477 2477 if expr:
2478 2478 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2479 2479 revs = matcher(repo, revs)
2480 2480 if limit is not None:
2481 2481 limitedrevs = []
2482 2482 for idx, r in enumerate(revs):
2483 2483 if limit <= idx:
2484 2484 break
2485 2485 limitedrevs.append(r)
2486 2486 revs = smartset.baseset(limitedrevs)
2487 2487
2488 2488 return revs, expr, filematcher
2489 2489
2490 2490 def _graphnodeformatter(ui, displayer):
2491 2491 spec = ui.config('ui', 'graphnodetemplate')
2492 2492 if not spec:
2493 2493 return templatekw.showgraphnode # fast path for "{graphnode}"
2494 2494
2495 2495 spec = templater.unquotestring(spec)
2496 2496 templ = formatter.maketemplater(ui, spec)
2497 2497 cache = {}
2498 2498 if isinstance(displayer, changeset_templater):
2499 2499 cache = displayer.cache # reuse cache of slow templates
2500 2500 props = templatekw.keywords.copy()
2501 2501 props['templ'] = templ
2502 2502 props['cache'] = cache
2503 2503 def formatnode(repo, ctx):
2504 2504 props['ctx'] = ctx
2505 2505 props['repo'] = repo
2506 2506 props['ui'] = repo.ui
2507 2507 props['revcache'] = {}
2508 2508 return templ.render(props)
2509 2509 return formatnode
2510 2510
2511 2511 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2512 2512 filematcher=None):
2513 2513 formatnode = _graphnodeformatter(ui, displayer)
2514 2514 state = graphmod.asciistate()
2515 2515 styles = state['styles']
2516 2516
2517 2517 # only set graph styling if HGPLAIN is not set.
2518 2518 if ui.plain('graph'):
2519 2519 # set all edge styles to |, the default pre-3.8 behaviour
2520 2520 styles.update(dict.fromkeys(styles, '|'))
2521 2521 else:
2522 2522 edgetypes = {
2523 2523 'parent': graphmod.PARENT,
2524 2524 'grandparent': graphmod.GRANDPARENT,
2525 2525 'missing': graphmod.MISSINGPARENT
2526 2526 }
2527 2527 for name, key in edgetypes.items():
2528 2528 # experimental config: experimental.graphstyle.*
2529 2529 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2530 2530 styles[key])
2531 2531 if not styles[key]:
2532 2532 styles[key] = None
2533 2533
2534 2534 # experimental config: experimental.graphshorten
2535 2535 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2536 2536
2537 2537 for rev, type, ctx, parents in dag:
2538 2538 char = formatnode(repo, ctx)
2539 2539 copies = None
2540 2540 if getrenamed and ctx.rev():
2541 2541 copies = []
2542 2542 for fn in ctx.files():
2543 2543 rename = getrenamed(fn, ctx.rev())
2544 2544 if rename:
2545 2545 copies.append((fn, rename[0]))
2546 2546 revmatchfn = None
2547 2547 if filematcher is not None:
2548 2548 revmatchfn = filematcher(ctx.rev())
2549 2549 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2550 2550 lines = displayer.hunk.pop(rev).split('\n')
2551 2551 if not lines[-1]:
2552 2552 del lines[-1]
2553 2553 displayer.flush(ctx)
2554 2554 edges = edgefn(type, char, lines, state, rev, parents)
2555 2555 for type, char, lines, coldata in edges:
2556 2556 graphmod.ascii(ui, state, type, char, lines, coldata)
2557 2557 displayer.close()
2558 2558
2559 2559 def graphlog(ui, repo, pats, opts):
2560 2560 # Parameters are identical to log command ones
2561 2561 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2562 2562 revdag = graphmod.dagwalker(repo, revs)
2563 2563
2564 2564 getrenamed = None
2565 2565 if opts.get('copies'):
2566 2566 endrev = None
2567 2567 if opts.get('rev'):
2568 2568 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2569 2569 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2570 2570
2571 2571 ui.pager('log')
2572 2572 displayer = show_changeset(ui, repo, opts, buffered=True)
2573 2573 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2574 2574 filematcher)
2575 2575
2576 2576 def checkunsupportedgraphflags(pats, opts):
2577 2577 for op in ["newest_first"]:
2578 2578 if op in opts and opts[op]:
2579 2579 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2580 2580 % op.replace("_", "-"))
2581 2581
2582 2582 def graphrevs(repo, nodes, opts):
2583 2583 limit = loglimit(opts)
2584 2584 nodes.reverse()
2585 2585 if limit is not None:
2586 2586 nodes = nodes[:limit]
2587 2587 return graphmod.nodes(repo, nodes)
2588 2588
2589 2589 def add(ui, repo, match, prefix, explicitonly, **opts):
2590 2590 join = lambda f: os.path.join(prefix, f)
2591 2591 bad = []
2592 2592
2593 2593 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2594 2594 names = []
2595 2595 wctx = repo[None]
2596 2596 cca = None
2597 2597 abort, warn = scmutil.checkportabilityalert(ui)
2598 2598 if abort or warn:
2599 2599 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2600 2600
2601 2601 badmatch = matchmod.badmatch(match, badfn)
2602 2602 dirstate = repo.dirstate
2603 2603 # We don't want to just call wctx.walk here, since it would return a lot of
2604 2604 # clean files, which we aren't interested in and takes time.
2605 2605 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2606 2606 True, False, full=False)):
2607 2607 exact = match.exact(f)
2608 2608 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2609 2609 if cca:
2610 2610 cca(f)
2611 2611 names.append(f)
2612 2612 if ui.verbose or not exact:
2613 2613 ui.status(_('adding %s\n') % match.rel(f))
2614 2614
2615 2615 for subpath in sorted(wctx.substate):
2616 2616 sub = wctx.sub(subpath)
2617 2617 try:
2618 2618 submatch = matchmod.subdirmatcher(subpath, match)
2619 2619 if opts.get(r'subrepos'):
2620 2620 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2621 2621 else:
2622 2622 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2623 2623 except error.LookupError:
2624 2624 ui.status(_("skipping missing subrepository: %s\n")
2625 2625 % join(subpath))
2626 2626
2627 2627 if not opts.get(r'dry_run'):
2628 2628 rejected = wctx.add(names, prefix)
2629 2629 bad.extend(f for f in rejected if f in match.files())
2630 2630 return bad
2631 2631
2632 2632 def addwebdirpath(repo, serverpath, webconf):
2633 2633 webconf[serverpath] = repo.root
2634 2634 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2635 2635
2636 2636 for r in repo.revs('filelog("path:.hgsub")'):
2637 2637 ctx = repo[r]
2638 2638 for subpath in ctx.substate:
2639 2639 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2640 2640
2641 2641 def forget(ui, repo, match, prefix, explicitonly):
2642 2642 join = lambda f: os.path.join(prefix, f)
2643 2643 bad = []
2644 2644 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2645 2645 wctx = repo[None]
2646 2646 forgot = []
2647 2647
2648 2648 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2649 2649 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2650 2650 if explicitonly:
2651 2651 forget = [f for f in forget if match.exact(f)]
2652 2652
2653 2653 for subpath in sorted(wctx.substate):
2654 2654 sub = wctx.sub(subpath)
2655 2655 try:
2656 2656 submatch = matchmod.subdirmatcher(subpath, match)
2657 2657 subbad, subforgot = sub.forget(submatch, prefix)
2658 2658 bad.extend([subpath + '/' + f for f in subbad])
2659 2659 forgot.extend([subpath + '/' + f for f in subforgot])
2660 2660 except error.LookupError:
2661 2661 ui.status(_("skipping missing subrepository: %s\n")
2662 2662 % join(subpath))
2663 2663
2664 2664 if not explicitonly:
2665 2665 for f in match.files():
2666 2666 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2667 2667 if f not in forgot:
2668 2668 if repo.wvfs.exists(f):
2669 2669 # Don't complain if the exact case match wasn't given.
2670 2670 # But don't do this until after checking 'forgot', so
2671 2671 # that subrepo files aren't normalized, and this op is
2672 2672 # purely from data cached by the status walk above.
2673 2673 if repo.dirstate.normalize(f) in repo.dirstate:
2674 2674 continue
2675 2675 ui.warn(_('not removing %s: '
2676 2676 'file is already untracked\n')
2677 2677 % match.rel(f))
2678 2678 bad.append(f)
2679 2679
2680 2680 for f in forget:
2681 2681 if ui.verbose or not match.exact(f):
2682 2682 ui.status(_('removing %s\n') % match.rel(f))
2683 2683
2684 2684 rejected = wctx.forget(forget, prefix)
2685 2685 bad.extend(f for f in rejected if f in match.files())
2686 2686 forgot.extend(f for f in forget if f not in rejected)
2687 2687 return bad, forgot
2688 2688
2689 2689 def files(ui, ctx, m, fm, fmt, subrepos):
2690 2690 rev = ctx.rev()
2691 2691 ret = 1
2692 2692 ds = ctx.repo().dirstate
2693 2693
2694 2694 for f in ctx.matches(m):
2695 2695 if rev is None and ds[f] == 'r':
2696 2696 continue
2697 2697 fm.startitem()
2698 2698 if ui.verbose:
2699 2699 fc = ctx[f]
2700 2700 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2701 2701 fm.data(abspath=f)
2702 2702 fm.write('path', fmt, m.rel(f))
2703 2703 ret = 0
2704 2704
2705 2705 for subpath in sorted(ctx.substate):
2706 2706 submatch = matchmod.subdirmatcher(subpath, m)
2707 2707 if (subrepos or m.exact(subpath) or any(submatch.files())):
2708 2708 sub = ctx.sub(subpath)
2709 2709 try:
2710 2710 recurse = m.exact(subpath) or subrepos
2711 2711 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2712 2712 ret = 0
2713 2713 except error.LookupError:
2714 2714 ui.status(_("skipping missing subrepository: %s\n")
2715 2715 % m.abs(subpath))
2716 2716
2717 2717 return ret
2718 2718
2719 2719 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2720 2720 join = lambda f: os.path.join(prefix, f)
2721 2721 ret = 0
2722 2722 s = repo.status(match=m, clean=True)
2723 2723 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2724 2724
2725 2725 wctx = repo[None]
2726 2726
2727 2727 if warnings is None:
2728 2728 warnings = []
2729 2729 warn = True
2730 2730 else:
2731 2731 warn = False
2732 2732
2733 2733 subs = sorted(wctx.substate)
2734 2734 total = len(subs)
2735 2735 count = 0
2736 2736 for subpath in subs:
2737 2737 count += 1
2738 2738 submatch = matchmod.subdirmatcher(subpath, m)
2739 2739 if subrepos or m.exact(subpath) or any(submatch.files()):
2740 2740 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2741 2741 sub = wctx.sub(subpath)
2742 2742 try:
2743 2743 if sub.removefiles(submatch, prefix, after, force, subrepos,
2744 2744 warnings):
2745 2745 ret = 1
2746 2746 except error.LookupError:
2747 2747 warnings.append(_("skipping missing subrepository: %s\n")
2748 2748 % join(subpath))
2749 2749 ui.progress(_('searching'), None)
2750 2750
2751 2751 # warn about failure to delete explicit files/dirs
2752 2752 deleteddirs = util.dirs(deleted)
2753 2753 files = m.files()
2754 2754 total = len(files)
2755 2755 count = 0
2756 2756 for f in files:
2757 2757 def insubrepo():
2758 2758 for subpath in wctx.substate:
2759 2759 if f.startswith(subpath + '/'):
2760 2760 return True
2761 2761 return False
2762 2762
2763 2763 count += 1
2764 2764 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2765 2765 isdir = f in deleteddirs or wctx.hasdir(f)
2766 2766 if (f in repo.dirstate or isdir or f == '.'
2767 2767 or insubrepo() or f in subs):
2768 2768 continue
2769 2769
2770 2770 if repo.wvfs.exists(f):
2771 2771 if repo.wvfs.isdir(f):
2772 2772 warnings.append(_('not removing %s: no tracked files\n')
2773 2773 % m.rel(f))
2774 2774 else:
2775 2775 warnings.append(_('not removing %s: file is untracked\n')
2776 2776 % m.rel(f))
2777 2777 # missing files will generate a warning elsewhere
2778 2778 ret = 1
2779 2779 ui.progress(_('deleting'), None)
2780 2780
2781 2781 if force:
2782 2782 list = modified + deleted + clean + added
2783 2783 elif after:
2784 2784 list = deleted
2785 2785 remaining = modified + added + clean
2786 2786 total = len(remaining)
2787 2787 count = 0
2788 2788 for f in remaining:
2789 2789 count += 1
2790 2790 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2791 2791 warnings.append(_('not removing %s: file still exists\n')
2792 2792 % m.rel(f))
2793 2793 ret = 1
2794 2794 ui.progress(_('skipping'), None)
2795 2795 else:
2796 2796 list = deleted + clean
2797 2797 total = len(modified) + len(added)
2798 2798 count = 0
2799 2799 for f in modified:
2800 2800 count += 1
2801 2801 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2802 2802 warnings.append(_('not removing %s: file is modified (use -f'
2803 2803 ' to force removal)\n') % m.rel(f))
2804 2804 ret = 1
2805 2805 for f in added:
2806 2806 count += 1
2807 2807 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2808 2808 warnings.append(_("not removing %s: file has been marked for add"
2809 2809 " (use 'hg forget' to undo add)\n") % m.rel(f))
2810 2810 ret = 1
2811 2811 ui.progress(_('skipping'), None)
2812 2812
2813 2813 list = sorted(list)
2814 2814 total = len(list)
2815 2815 count = 0
2816 2816 for f in list:
2817 2817 count += 1
2818 2818 if ui.verbose or not m.exact(f):
2819 2819 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2820 2820 ui.status(_('removing %s\n') % m.rel(f))
2821 2821 ui.progress(_('deleting'), None)
2822 2822
2823 2823 with repo.wlock():
2824 2824 if not after:
2825 2825 for f in list:
2826 2826 if f in added:
2827 2827 continue # we never unlink added files on remove
2828 2828 repo.wvfs.unlinkpath(f, ignoremissing=True)
2829 2829 repo[None].forget(list)
2830 2830
2831 2831 if warn:
2832 2832 for warning in warnings:
2833 2833 ui.warn(warning)
2834 2834
2835 2835 return ret
2836 2836
2837 2837 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2838 2838 err = 1
2839 2839
2840 2840 def write(path):
2841 2841 filename = None
2842 2842 if fntemplate:
2843 2843 filename = makefilename(repo, fntemplate, ctx.node(),
2844 2844 pathname=os.path.join(prefix, path))
2845 2845 with formatter.maybereopen(basefm, filename, opts) as fm:
2846 2846 data = ctx[path].data()
2847 2847 if opts.get('decode'):
2848 2848 data = repo.wwritedata(path, data)
2849 2849 fm.startitem()
2850 2850 fm.write('data', '%s', data)
2851 2851 fm.data(abspath=path, path=matcher.rel(path))
2852 2852
2853 2853 # Automation often uses hg cat on single files, so special case it
2854 2854 # for performance to avoid the cost of parsing the manifest.
2855 2855 if len(matcher.files()) == 1 and not matcher.anypats():
2856 2856 file = matcher.files()[0]
2857 2857 mfl = repo.manifestlog
2858 2858 mfnode = ctx.manifestnode()
2859 2859 try:
2860 2860 if mfnode and mfl[mfnode].find(file)[0]:
2861 2861 write(file)
2862 2862 return 0
2863 2863 except KeyError:
2864 2864 pass
2865 2865
2866 2866 for abs in ctx.walk(matcher):
2867 2867 write(abs)
2868 2868 err = 0
2869 2869
2870 2870 for subpath in sorted(ctx.substate):
2871 2871 sub = ctx.sub(subpath)
2872 2872 try:
2873 2873 submatch = matchmod.subdirmatcher(subpath, matcher)
2874 2874
2875 2875 if not sub.cat(submatch, basefm, fntemplate,
2876 2876 os.path.join(prefix, sub._path), **opts):
2877 2877 err = 0
2878 2878 except error.RepoLookupError:
2879 2879 ui.status(_("skipping missing subrepository: %s\n")
2880 2880 % os.path.join(prefix, subpath))
2881 2881
2882 2882 return err
2883 2883
2884 2884 def commit(ui, repo, commitfunc, pats, opts):
2885 2885 '''commit the specified files or all outstanding changes'''
2886 2886 date = opts.get('date')
2887 2887 if date:
2888 2888 opts['date'] = util.parsedate(date)
2889 2889 message = logmessage(ui, opts)
2890 2890 matcher = scmutil.match(repo[None], pats, opts)
2891 2891
2892 2892 dsguard = None
2893 2893 # extract addremove carefully -- this function can be called from a command
2894 2894 # that doesn't support addremove
2895 2895 try:
2896 2896 if opts.get('addremove'):
2897 2897 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2898 2898 if scmutil.addremove(repo, matcher, "", opts) != 0:
2899 2899 raise error.Abort(
2900 2900 _("failed to mark all new/missing files as added/removed"))
2901 2901
2902 2902 r = commitfunc(ui, repo, message, matcher, opts)
2903 2903 if dsguard:
2904 2904 dsguard.close()
2905 2905 return r
2906 2906 finally:
2907 2907 if dsguard:
2908 2908 dsguard.release()
2909 2909
2910 2910 def samefile(f, ctx1, ctx2):
2911 2911 if f in ctx1.manifest():
2912 2912 a = ctx1.filectx(f)
2913 2913 if f in ctx2.manifest():
2914 2914 b = ctx2.filectx(f)
2915 2915 return (not a.cmp(b)
2916 2916 and a.flags() == b.flags())
2917 2917 else:
2918 2918 return False
2919 2919 else:
2920 2920 return f not in ctx2.manifest()
2921 2921
2922 2922 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2923 2923 # avoid cycle context -> subrepo -> cmdutil
2924 2924 from . import context
2925 2925
2926 2926 # amend will reuse the existing user if not specified, but the obsolete
2927 2927 # marker creation requires that the current user's name is specified.
2928 2928 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2929 2929 ui.username() # raise exception if username not set
2930 2930
2931 2931 ui.note(_('amending changeset %s\n') % old)
2932 2932 base = old.p1()
2933 2933
2934 2934 newid = None
2935 2935 with repo.wlock(), repo.lock(), repo.transaction('amend'):
2936 2936 # See if we got a message from -m or -l, if not, open the editor
2937 2937 # with the message of the changeset to amend
2938 2938 message = logmessage(ui, opts)
2939 2939 # ensure logfile does not conflict with later enforcement of the
2940 2940 # message. potential logfile content has been processed by
2941 2941 # `logmessage` anyway.
2942 2942 opts.pop('logfile')
2943 2943 # First, do a regular commit to record all changes in the working
2944 2944 # directory (if there are any)
2945 2945 ui.callhooks = False
2946 2946 activebookmark = repo._bookmarks.active
2947 2947 try:
2948 2948 repo._bookmarks.active = None
2949 2949 opts['message'] = 'temporary amend commit for %s' % old
2950 2950 node = commit(ui, repo, commitfunc, pats, opts)
2951 2951 finally:
2952 2952 repo._bookmarks.active = activebookmark
2953 2953 ui.callhooks = True
2954 2954 ctx = repo[node]
2955 2955
2956 2956 # Participating changesets:
2957 2957 #
2958 2958 # node/ctx o - new (intermediate) commit that contains changes
2959 2959 # | from working dir to go into amending commit
2960 2960 # | (or a workingctx if there were no changes)
2961 2961 # |
2962 2962 # old o - changeset to amend
2963 2963 # |
2964 2964 # base o - parent of amending changeset
2965 2965
2966 2966 # Update extra dict from amended commit (e.g. to preserve graft
2967 2967 # source)
2968 2968 extra.update(old.extra())
2969 2969
2970 2970 # Also update it from the intermediate commit or from the wctx
2971 2971 extra.update(ctx.extra())
2972 2972
2973 2973 if len(old.parents()) > 1:
2974 2974 # ctx.files() isn't reliable for merges, so fall back to the
2975 2975 # slower repo.status() method
2976 2976 files = set([fn for st in repo.status(base, old)[:3]
2977 2977 for fn in st])
2978 2978 else:
2979 2979 files = set(old.files())
2980 2980
2981 2981 # Second, we use either the commit we just did, or if there were no
2982 2982 # changes the parent of the working directory as the version of the
2983 2983 # files in the final amend commit
2984 2984 if node:
2985 2985 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2986 2986
2987 2987 user = ctx.user()
2988 2988 date = ctx.date()
2989 2989 # Recompute copies (avoid recording a -> b -> a)
2990 2990 copied = copies.pathcopies(base, ctx)
2991 2991 if old.p2:
2992 2992 copied.update(copies.pathcopies(old.p2(), ctx))
2993 2993
2994 2994 # Prune files which were reverted by the updates: if old
2995 2995 # introduced file X and our intermediate commit, node,
2996 2996 # renamed that file, then those two files are the same and
2997 2997 # we can discard X from our list of files. Likewise if X
2998 2998 # was deleted, it's no longer relevant
2999 2999 files.update(ctx.files())
3000 3000 files = [f for f in files if not samefile(f, ctx, base)]
3001 3001
3002 3002 def filectxfn(repo, ctx_, path):
3003 3003 try:
3004 3004 fctx = ctx[path]
3005 3005 flags = fctx.flags()
3006 3006 mctx = context.memfilectx(repo,
3007 3007 fctx.path(), fctx.data(),
3008 3008 islink='l' in flags,
3009 3009 isexec='x' in flags,
3010 3010 copied=copied.get(path))
3011 3011 return mctx
3012 3012 except KeyError:
3013 3013 return None
3014 3014 else:
3015 3015 ui.note(_('copying changeset %s to %s\n') % (old, base))
3016 3016
3017 3017 # Use version of files as in the old cset
3018 3018 def filectxfn(repo, ctx_, path):
3019 3019 try:
3020 3020 return old.filectx(path)
3021 3021 except KeyError:
3022 3022 return None
3023 3023
3024 3024 user = opts.get('user') or old.user()
3025 3025 date = opts.get('date') or old.date()
3026 3026 editform = mergeeditform(old, 'commit.amend')
3027 3027 editor = getcommiteditor(editform=editform,
3028 3028 **pycompat.strkwargs(opts))
3029 3029 if not message:
3030 3030 editor = getcommiteditor(edit=True, editform=editform)
3031 3031 message = old.description()
3032 3032
3033 3033 pureextra = extra.copy()
3034 3034 extra['amend_source'] = old.hex()
3035 3035
3036 3036 new = context.memctx(repo,
3037 3037 parents=[base.node(), old.p2().node()],
3038 3038 text=message,
3039 3039 files=files,
3040 3040 filectxfn=filectxfn,
3041 3041 user=user,
3042 3042 date=date,
3043 3043 extra=extra,
3044 3044 editor=editor)
3045 3045
3046 3046 newdesc = changelog.stripdesc(new.description())
3047 3047 if ((not node)
3048 3048 and newdesc == old.description()
3049 3049 and user == old.user()
3050 3050 and date == old.date()
3051 3051 and pureextra == old.extra()):
3052 3052 # nothing changed. continuing here would create a new node
3053 3053 # anyway because of the amend_source noise.
3054 3054 #
3055 3055 # This not what we expect from amend.
3056 3056 return old.node()
3057 3057
3058 3058 ph = repo.ui.config('phases', 'new-commit', phases.draft)
3059 3059 try:
3060 3060 if opts.get('secret'):
3061 3061 commitphase = 'secret'
3062 3062 else:
3063 3063 commitphase = old.phase()
3064 3064 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
3065 3065 newid = repo.commitctx(new)
3066 3066 finally:
3067 3067 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
3068 3068 if newid != old.node():
3069 3069 # Reroute the working copy parent to the new changeset
3070 3070 repo.setparents(newid, nullid)
3071 3071 mapping = {old.node(): (newid,)}
3072 3072 if node:
3073 3073 mapping[node] = ()
3074 3074 scmutil.cleanupnodes(repo, mapping, 'amend')
3075 3075 return newid
3076 3076
3077 3077 def commiteditor(repo, ctx, subs, editform=''):
3078 3078 if ctx.description():
3079 3079 return ctx.description()
3080 3080 return commitforceeditor(repo, ctx, subs, editform=editform,
3081 3081 unchangedmessagedetection=True)
3082 3082
3083 3083 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
3084 3084 editform='', unchangedmessagedetection=False):
3085 3085 if not extramsg:
3086 3086 extramsg = _("Leave message empty to abort commit.")
3087 3087
3088 3088 forms = [e for e in editform.split('.') if e]
3089 3089 forms.insert(0, 'changeset')
3090 3090 templatetext = None
3091 3091 while forms:
3092 3092 ref = '.'.join(forms)
3093 3093 if repo.ui.config('committemplate', ref):
3094 3094 templatetext = committext = buildcommittemplate(
3095 3095 repo, ctx, subs, extramsg, ref)
3096 3096 break
3097 3097 forms.pop()
3098 3098 else:
3099 3099 committext = buildcommittext(repo, ctx, subs, extramsg)
3100 3100
3101 3101 # run editor in the repository root
3102 3102 olddir = pycompat.getcwd()
3103 3103 os.chdir(repo.root)
3104 3104
3105 3105 # make in-memory changes visible to external process
3106 3106 tr = repo.currenttransaction()
3107 3107 repo.dirstate.write(tr)
3108 3108 pending = tr and tr.writepending() and repo.root
3109 3109
3110 3110 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
3111 3111 editform=editform, pending=pending,
3112 3112 repopath=repo.path)
3113 3113 text = editortext
3114 3114
3115 3115 # strip away anything below this special string (used for editors that want
3116 3116 # to display the diff)
3117 3117 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3118 3118 if stripbelow:
3119 3119 text = text[:stripbelow.start()]
3120 3120
3121 3121 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
3122 3122 os.chdir(olddir)
3123 3123
3124 3124 if finishdesc:
3125 3125 text = finishdesc(text)
3126 3126 if not text.strip():
3127 3127 raise error.Abort(_("empty commit message"))
3128 3128 if unchangedmessagedetection and editortext == templatetext:
3129 3129 raise error.Abort(_("commit message unchanged"))
3130 3130
3131 3131 return text
3132 3132
3133 3133 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3134 3134 ui = repo.ui
3135 3135 spec = formatter.templatespec(ref, None, None)
3136 3136 t = changeset_templater(ui, repo, spec, None, {}, False)
3137 3137 t.t.cache.update((k, templater.unquotestring(v))
3138 3138 for k, v in repo.ui.configitems('committemplate'))
3139 3139
3140 3140 if not extramsg:
3141 3141 extramsg = '' # ensure that extramsg is string
3142 3142
3143 3143 ui.pushbuffer()
3144 3144 t.show(ctx, extramsg=extramsg)
3145 3145 return ui.popbuffer()
3146 3146
3147 3147 def hgprefix(msg):
3148 3148 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
3149 3149
3150 3150 def buildcommittext(repo, ctx, subs, extramsg):
3151 3151 edittext = []
3152 3152 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3153 3153 if ctx.description():
3154 3154 edittext.append(ctx.description())
3155 3155 edittext.append("")
3156 3156 edittext.append("") # Empty line between message and comments.
3157 3157 edittext.append(hgprefix(_("Enter commit message."
3158 3158 " Lines beginning with 'HG:' are removed.")))
3159 3159 edittext.append(hgprefix(extramsg))
3160 3160 edittext.append("HG: --")
3161 3161 edittext.append(hgprefix(_("user: %s") % ctx.user()))
3162 3162 if ctx.p2():
3163 3163 edittext.append(hgprefix(_("branch merge")))
3164 3164 if ctx.branch():
3165 3165 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
3166 3166 if bookmarks.isactivewdirparent(repo):
3167 3167 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
3168 3168 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
3169 3169 edittext.extend([hgprefix(_("added %s") % f) for f in added])
3170 3170 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
3171 3171 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
3172 3172 if not added and not modified and not removed:
3173 3173 edittext.append(hgprefix(_("no files changed")))
3174 3174 edittext.append("")
3175 3175
3176 3176 return "\n".join(edittext)
3177 3177
3178 3178 def commitstatus(repo, node, branch, bheads=None, opts=None):
3179 3179 if opts is None:
3180 3180 opts = {}
3181 3181 ctx = repo[node]
3182 3182 parents = ctx.parents()
3183 3183
3184 3184 if (not opts.get('amend') and bheads and node not in bheads and not
3185 3185 [x for x in parents if x.node() in bheads and x.branch() == branch]):
3186 3186 repo.ui.status(_('created new head\n'))
3187 3187 # The message is not printed for initial roots. For the other
3188 3188 # changesets, it is printed in the following situations:
3189 3189 #
3190 3190 # Par column: for the 2 parents with ...
3191 3191 # N: null or no parent
3192 3192 # B: parent is on another named branch
3193 3193 # C: parent is a regular non head changeset
3194 3194 # H: parent was a branch head of the current branch
3195 3195 # Msg column: whether we print "created new head" message
3196 3196 # In the following, it is assumed that there already exists some
3197 3197 # initial branch heads of the current branch, otherwise nothing is
3198 3198 # printed anyway.
3199 3199 #
3200 3200 # Par Msg Comment
3201 3201 # N N y additional topo root
3202 3202 #
3203 3203 # B N y additional branch root
3204 3204 # C N y additional topo head
3205 3205 # H N n usual case
3206 3206 #
3207 3207 # B B y weird additional branch root
3208 3208 # C B y branch merge
3209 3209 # H B n merge with named branch
3210 3210 #
3211 3211 # C C y additional head from merge
3212 3212 # C H n merge with a head
3213 3213 #
3214 3214 # H H n head merge: head count decreases
3215 3215
3216 3216 if not opts.get('close_branch'):
3217 3217 for r in parents:
3218 3218 if r.closesbranch() and r.branch() == branch:
3219 3219 repo.ui.status(_('reopening closed branch head %d\n') % r)
3220 3220
3221 3221 if repo.ui.debugflag:
3222 3222 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
3223 3223 elif repo.ui.verbose:
3224 3224 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
3225 3225
3226 3226 def postcommitstatus(repo, pats, opts):
3227 3227 return repo.status(match=scmutil.match(repo[None], pats, opts))
3228 3228
3229 3229 def revert(ui, repo, ctx, parents, *pats, **opts):
3230 3230 parent, p2 = parents
3231 3231 node = ctx.node()
3232 3232
3233 3233 mf = ctx.manifest()
3234 3234 if node == p2:
3235 3235 parent = p2
3236 3236
3237 3237 # need all matching names in dirstate and manifest of target rev,
3238 3238 # so have to walk both. do not print errors if files exist in one
3239 3239 # but not other. in both cases, filesets should be evaluated against
3240 3240 # workingctx to get consistent result (issue4497). this means 'set:**'
3241 3241 # cannot be used to select missing files from target rev.
3242 3242
3243 3243 # `names` is a mapping for all elements in working copy and target revision
3244 3244 # The mapping is in the form:
3245 3245 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3246 3246 names = {}
3247 3247
3248 3248 with repo.wlock():
3249 3249 ## filling of the `names` mapping
3250 3250 # walk dirstate to fill `names`
3251 3251
3252 3252 interactive = opts.get('interactive', False)
3253 3253 wctx = repo[None]
3254 3254 m = scmutil.match(wctx, pats, opts)
3255 3255
3256 3256 # we'll need this later
3257 3257 targetsubs = sorted(s for s in wctx.substate if m(s))
3258 3258
3259 3259 if not m.always():
3260 3260 matcher = matchmod.badmatch(m, lambda x, y: False)
3261 3261 for abs in wctx.walk(matcher):
3262 3262 names[abs] = m.rel(abs), m.exact(abs)
3263 3263
3264 3264 # walk target manifest to fill `names`
3265 3265
3266 3266 def badfn(path, msg):
3267 3267 if path in names:
3268 3268 return
3269 3269 if path in ctx.substate:
3270 3270 return
3271 3271 path_ = path + '/'
3272 3272 for f in names:
3273 3273 if f.startswith(path_):
3274 3274 return
3275 3275 ui.warn("%s: %s\n" % (m.rel(path), msg))
3276 3276
3277 3277 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3278 3278 if abs not in names:
3279 3279 names[abs] = m.rel(abs), m.exact(abs)
3280 3280
3281 3281 # Find status of all file in `names`.
3282 3282 m = scmutil.matchfiles(repo, names)
3283 3283
3284 3284 changes = repo.status(node1=node, match=m,
3285 3285 unknown=True, ignored=True, clean=True)
3286 3286 else:
3287 3287 changes = repo.status(node1=node, match=m)
3288 3288 for kind in changes:
3289 3289 for abs in kind:
3290 3290 names[abs] = m.rel(abs), m.exact(abs)
3291 3291
3292 3292 m = scmutil.matchfiles(repo, names)
3293 3293
3294 3294 modified = set(changes.modified)
3295 3295 added = set(changes.added)
3296 3296 removed = set(changes.removed)
3297 3297 _deleted = set(changes.deleted)
3298 3298 unknown = set(changes.unknown)
3299 3299 unknown.update(changes.ignored)
3300 3300 clean = set(changes.clean)
3301 3301 modadded = set()
3302 3302
3303 3303 # We need to account for the state of the file in the dirstate,
3304 3304 # even when we revert against something else than parent. This will
3305 3305 # slightly alter the behavior of revert (doing back up or not, delete
3306 3306 # or just forget etc).
3307 3307 if parent == node:
3308 3308 dsmodified = modified
3309 3309 dsadded = added
3310 3310 dsremoved = removed
3311 3311 # store all local modifications, useful later for rename detection
3312 3312 localchanges = dsmodified | dsadded
3313 3313 modified, added, removed = set(), set(), set()
3314 3314 else:
3315 3315 changes = repo.status(node1=parent, match=m)
3316 3316 dsmodified = set(changes.modified)
3317 3317 dsadded = set(changes.added)
3318 3318 dsremoved = set(changes.removed)
3319 3319 # store all local modifications, useful later for rename detection
3320 3320 localchanges = dsmodified | dsadded
3321 3321
3322 3322 # only take into account for removes between wc and target
3323 3323 clean |= dsremoved - removed
3324 3324 dsremoved &= removed
3325 3325 # distinct between dirstate remove and other
3326 3326 removed -= dsremoved
3327 3327
3328 3328 modadded = added & dsmodified
3329 3329 added -= modadded
3330 3330
3331 3331 # tell newly modified apart.
3332 3332 dsmodified &= modified
3333 3333 dsmodified |= modified & dsadded # dirstate added may need backup
3334 3334 modified -= dsmodified
3335 3335
3336 3336 # We need to wait for some post-processing to update this set
3337 3337 # before making the distinction. The dirstate will be used for
3338 3338 # that purpose.
3339 3339 dsadded = added
3340 3340
3341 3341 # in case of merge, files that are actually added can be reported as
3342 3342 # modified, we need to post process the result
3343 3343 if p2 != nullid:
3344 3344 mergeadd = set(dsmodified)
3345 3345 for path in dsmodified:
3346 3346 if path in mf:
3347 3347 mergeadd.remove(path)
3348 3348 dsadded |= mergeadd
3349 3349 dsmodified -= mergeadd
3350 3350
3351 3351 # if f is a rename, update `names` to also revert the source
3352 3352 cwd = repo.getcwd()
3353 3353 for f in localchanges:
3354 3354 src = repo.dirstate.copied(f)
3355 3355 # XXX should we check for rename down to target node?
3356 3356 if src and src not in names and repo.dirstate[src] == 'r':
3357 3357 dsremoved.add(src)
3358 3358 names[src] = (repo.pathto(src, cwd), True)
3359 3359
3360 3360 # determine the exact nature of the deleted changesets
3361 3361 deladded = set(_deleted)
3362 3362 for path in _deleted:
3363 3363 if path in mf:
3364 3364 deladded.remove(path)
3365 3365 deleted = _deleted - deladded
3366 3366
3367 3367 # distinguish between file to forget and the other
3368 3368 added = set()
3369 3369 for abs in dsadded:
3370 3370 if repo.dirstate[abs] != 'a':
3371 3371 added.add(abs)
3372 3372 dsadded -= added
3373 3373
3374 3374 for abs in deladded:
3375 3375 if repo.dirstate[abs] == 'a':
3376 3376 dsadded.add(abs)
3377 3377 deladded -= dsadded
3378 3378
3379 3379 # For files marked as removed, we check if an unknown file is present at
3380 3380 # the same path. If a such file exists it may need to be backed up.
3381 3381 # Making the distinction at this stage helps have simpler backup
3382 3382 # logic.
3383 3383 removunk = set()
3384 3384 for abs in removed:
3385 3385 target = repo.wjoin(abs)
3386 3386 if os.path.lexists(target):
3387 3387 removunk.add(abs)
3388 3388 removed -= removunk
3389 3389
3390 3390 dsremovunk = set()
3391 3391 for abs in dsremoved:
3392 3392 target = repo.wjoin(abs)
3393 3393 if os.path.lexists(target):
3394 3394 dsremovunk.add(abs)
3395 3395 dsremoved -= dsremovunk
3396 3396
3397 3397 # action to be actually performed by revert
3398 3398 # (<list of file>, message>) tuple
3399 3399 actions = {'revert': ([], _('reverting %s\n')),
3400 3400 'add': ([], _('adding %s\n')),
3401 3401 'remove': ([], _('removing %s\n')),
3402 3402 'drop': ([], _('removing %s\n')),
3403 3403 'forget': ([], _('forgetting %s\n')),
3404 3404 'undelete': ([], _('undeleting %s\n')),
3405 3405 'noop': (None, _('no changes needed to %s\n')),
3406 3406 'unknown': (None, _('file not managed: %s\n')),
3407 3407 }
3408 3408
3409 3409 # "constant" that convey the backup strategy.
3410 3410 # All set to `discard` if `no-backup` is set do avoid checking
3411 3411 # no_backup lower in the code.
3412 3412 # These values are ordered for comparison purposes
3413 3413 backupinteractive = 3 # do backup if interactively modified
3414 3414 backup = 2 # unconditionally do backup
3415 3415 check = 1 # check if the existing file differs from target
3416 3416 discard = 0 # never do backup
3417 3417 if opts.get('no_backup'):
3418 3418 backupinteractive = backup = check = discard
3419 3419 if interactive:
3420 3420 dsmodifiedbackup = backupinteractive
3421 3421 else:
3422 3422 dsmodifiedbackup = backup
3423 3423 tobackup = set()
3424 3424
3425 3425 backupanddel = actions['remove']
3426 3426 if not opts.get('no_backup'):
3427 3427 backupanddel = actions['drop']
3428 3428
3429 3429 disptable = (
3430 3430 # dispatch table:
3431 3431 # file state
3432 3432 # action
3433 3433 # make backup
3434 3434
3435 3435 ## Sets that results that will change file on disk
3436 3436 # Modified compared to target, no local change
3437 3437 (modified, actions['revert'], discard),
3438 3438 # Modified compared to target, but local file is deleted
3439 3439 (deleted, actions['revert'], discard),
3440 3440 # Modified compared to target, local change
3441 3441 (dsmodified, actions['revert'], dsmodifiedbackup),
3442 3442 # Added since target
3443 3443 (added, actions['remove'], discard),
3444 3444 # Added in working directory
3445 3445 (dsadded, actions['forget'], discard),
3446 3446 # Added since target, have local modification
3447 3447 (modadded, backupanddel, backup),
3448 3448 # Added since target but file is missing in working directory
3449 3449 (deladded, actions['drop'], discard),
3450 3450 # Removed since target, before working copy parent
3451 3451 (removed, actions['add'], discard),
3452 3452 # Same as `removed` but an unknown file exists at the same path
3453 3453 (removunk, actions['add'], check),
3454 3454 # Removed since targe, marked as such in working copy parent
3455 3455 (dsremoved, actions['undelete'], discard),
3456 3456 # Same as `dsremoved` but an unknown file exists at the same path
3457 3457 (dsremovunk, actions['undelete'], check),
3458 3458 ## the following sets does not result in any file changes
3459 3459 # File with no modification
3460 3460 (clean, actions['noop'], discard),
3461 3461 # Existing file, not tracked anywhere
3462 3462 (unknown, actions['unknown'], discard),
3463 3463 )
3464 3464
3465 3465 for abs, (rel, exact) in sorted(names.items()):
3466 3466 # target file to be touch on disk (relative to cwd)
3467 3467 target = repo.wjoin(abs)
3468 3468 # search the entry in the dispatch table.
3469 3469 # if the file is in any of these sets, it was touched in the working
3470 3470 # directory parent and we are sure it needs to be reverted.
3471 3471 for table, (xlist, msg), dobackup in disptable:
3472 3472 if abs not in table:
3473 3473 continue
3474 3474 if xlist is not None:
3475 3475 xlist.append(abs)
3476 3476 if dobackup:
3477 3477 # If in interactive mode, don't automatically create
3478 3478 # .orig files (issue4793)
3479 3479 if dobackup == backupinteractive:
3480 3480 tobackup.add(abs)
3481 3481 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3482 3482 bakname = scmutil.origpath(ui, repo, rel)
3483 3483 ui.note(_('saving current version of %s as %s\n') %
3484 3484 (rel, bakname))
3485 3485 if not opts.get('dry_run'):
3486 3486 if interactive:
3487 3487 util.copyfile(target, bakname)
3488 3488 else:
3489 3489 util.rename(target, bakname)
3490 3490 if ui.verbose or not exact:
3491 3491 if not isinstance(msg, basestring):
3492 3492 msg = msg(abs)
3493 3493 ui.status(msg % rel)
3494 3494 elif exact:
3495 3495 ui.warn(msg % rel)
3496 3496 break
3497 3497
3498 3498 if not opts.get('dry_run'):
3499 3499 needdata = ('revert', 'add', 'undelete')
3500 3500 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3501 3501 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3502 3502
3503 3503 if targetsubs:
3504 3504 # Revert the subrepos on the revert list
3505 3505 for sub in targetsubs:
3506 3506 try:
3507 3507 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3508 3508 except KeyError:
3509 3509 raise error.Abort("subrepository '%s' does not exist in %s!"
3510 3510 % (sub, short(ctx.node())))
3511 3511
3512 3512 def _revertprefetch(repo, ctx, *files):
3513 3513 """Let extension changing the storage layer prefetch content"""
3514 3514 pass
3515 3515
3516 3516 def _performrevert(repo, parents, ctx, actions, interactive=False,
3517 3517 tobackup=None):
3518 3518 """function that actually perform all the actions computed for revert
3519 3519
3520 3520 This is an independent function to let extension to plug in and react to
3521 3521 the imminent revert.
3522 3522
3523 3523 Make sure you have the working directory locked when calling this function.
3524 3524 """
3525 3525 parent, p2 = parents
3526 3526 node = ctx.node()
3527 3527 excluded_files = []
3528 3528 matcher_opts = {"exclude": excluded_files}
3529 3529
3530 3530 def checkout(f):
3531 3531 fc = ctx[f]
3532 3532 repo.wwrite(f, fc.data(), fc.flags())
3533 3533
3534 3534 def doremove(f):
3535 3535 try:
3536 3536 repo.wvfs.unlinkpath(f)
3537 3537 except OSError:
3538 3538 pass
3539 3539 repo.dirstate.remove(f)
3540 3540
3541 audit_path = pathutil.pathauditor(repo.root)
3541 audit_path = pathutil.pathauditor(repo.root, cached=True)
3542 3542 for f in actions['forget'][0]:
3543 3543 if interactive:
3544 3544 choice = repo.ui.promptchoice(
3545 3545 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3546 3546 if choice == 0:
3547 3547 repo.dirstate.drop(f)
3548 3548 else:
3549 3549 excluded_files.append(repo.wjoin(f))
3550 3550 else:
3551 3551 repo.dirstate.drop(f)
3552 3552 for f in actions['remove'][0]:
3553 3553 audit_path(f)
3554 3554 if interactive:
3555 3555 choice = repo.ui.promptchoice(
3556 3556 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3557 3557 if choice == 0:
3558 3558 doremove(f)
3559 3559 else:
3560 3560 excluded_files.append(repo.wjoin(f))
3561 3561 else:
3562 3562 doremove(f)
3563 3563 for f in actions['drop'][0]:
3564 3564 audit_path(f)
3565 3565 repo.dirstate.remove(f)
3566 3566
3567 3567 normal = None
3568 3568 if node == parent:
3569 3569 # We're reverting to our parent. If possible, we'd like status
3570 3570 # to report the file as clean. We have to use normallookup for
3571 3571 # merges to avoid losing information about merged/dirty files.
3572 3572 if p2 != nullid:
3573 3573 normal = repo.dirstate.normallookup
3574 3574 else:
3575 3575 normal = repo.dirstate.normal
3576 3576
3577 3577 newlyaddedandmodifiedfiles = set()
3578 3578 if interactive:
3579 3579 # Prompt the user for changes to revert
3580 3580 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3581 3581 m = scmutil.match(ctx, torevert, matcher_opts)
3582 3582 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3583 3583 diffopts.nodates = True
3584 3584 diffopts.git = True
3585 3585 operation = 'discard'
3586 3586 reversehunks = True
3587 3587 if node != parent:
3588 3588 operation = 'revert'
3589 3589 reversehunks = repo.ui.configbool('experimental',
3590 3590 'revertalternateinteractivemode')
3591 3591 if reversehunks:
3592 3592 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3593 3593 else:
3594 3594 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3595 3595 originalchunks = patch.parsepatch(diff)
3596 3596
3597 3597 try:
3598 3598
3599 3599 chunks, opts = recordfilter(repo.ui, originalchunks,
3600 3600 operation=operation)
3601 3601 if reversehunks:
3602 3602 chunks = patch.reversehunks(chunks)
3603 3603
3604 3604 except patch.PatchError as err:
3605 3605 raise error.Abort(_('error parsing patch: %s') % err)
3606 3606
3607 3607 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3608 3608 if tobackup is None:
3609 3609 tobackup = set()
3610 3610 # Apply changes
3611 3611 fp = stringio()
3612 3612 for c in chunks:
3613 3613 # Create a backup file only if this hunk should be backed up
3614 3614 if ishunk(c) and c.header.filename() in tobackup:
3615 3615 abs = c.header.filename()
3616 3616 target = repo.wjoin(abs)
3617 3617 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3618 3618 util.copyfile(target, bakname)
3619 3619 tobackup.remove(abs)
3620 3620 c.write(fp)
3621 3621 dopatch = fp.tell()
3622 3622 fp.seek(0)
3623 3623 if dopatch:
3624 3624 try:
3625 3625 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3626 3626 except patch.PatchError as err:
3627 3627 raise error.Abort(str(err))
3628 3628 del fp
3629 3629 else:
3630 3630 for f in actions['revert'][0]:
3631 3631 checkout(f)
3632 3632 if normal:
3633 3633 normal(f)
3634 3634
3635 3635 for f in actions['add'][0]:
3636 3636 # Don't checkout modified files, they are already created by the diff
3637 3637 if f not in newlyaddedandmodifiedfiles:
3638 3638 checkout(f)
3639 3639 repo.dirstate.add(f)
3640 3640
3641 3641 normal = repo.dirstate.normallookup
3642 3642 if node == parent and p2 == nullid:
3643 3643 normal = repo.dirstate.normal
3644 3644 for f in actions['undelete'][0]:
3645 3645 checkout(f)
3646 3646 normal(f)
3647 3647
3648 3648 copied = copies.pathcopies(repo[parent], ctx)
3649 3649
3650 3650 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3651 3651 if f in copied:
3652 3652 repo.dirstate.copy(copied[f], f)
3653 3653
3654 3654 class command(registrar.command):
3655 3655 def _doregister(self, func, name, *args, **kwargs):
3656 3656 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3657 3657 return super(command, self)._doregister(func, name, *args, **kwargs)
3658 3658
3659 3659 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3660 3660 # commands.outgoing. "missing" is "missing" of the result of
3661 3661 # "findcommonoutgoing()"
3662 3662 outgoinghooks = util.hooks()
3663 3663
3664 3664 # a list of (ui, repo) functions called by commands.summary
3665 3665 summaryhooks = util.hooks()
3666 3666
3667 3667 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3668 3668 #
3669 3669 # functions should return tuple of booleans below, if 'changes' is None:
3670 3670 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3671 3671 #
3672 3672 # otherwise, 'changes' is a tuple of tuples below:
3673 3673 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3674 3674 # - (desturl, destbranch, destpeer, outgoing)
3675 3675 summaryremotehooks = util.hooks()
3676 3676
3677 3677 # A list of state files kept by multistep operations like graft.
3678 3678 # Since graft cannot be aborted, it is considered 'clearable' by update.
3679 3679 # note: bisect is intentionally excluded
3680 3680 # (state file, clearable, allowcommit, error, hint)
3681 3681 unfinishedstates = [
3682 3682 ('graftstate', True, False, _('graft in progress'),
3683 3683 _("use 'hg graft --continue' or 'hg update' to abort")),
3684 3684 ('updatestate', True, False, _('last update was interrupted'),
3685 3685 _("use 'hg update' to get a consistent checkout"))
3686 3686 ]
3687 3687
3688 3688 def checkunfinished(repo, commit=False):
3689 3689 '''Look for an unfinished multistep operation, like graft, and abort
3690 3690 if found. It's probably good to check this right before
3691 3691 bailifchanged().
3692 3692 '''
3693 3693 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3694 3694 if commit and allowcommit:
3695 3695 continue
3696 3696 if repo.vfs.exists(f):
3697 3697 raise error.Abort(msg, hint=hint)
3698 3698
3699 3699 def clearunfinished(repo):
3700 3700 '''Check for unfinished operations (as above), and clear the ones
3701 3701 that are clearable.
3702 3702 '''
3703 3703 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3704 3704 if not clearable and repo.vfs.exists(f):
3705 3705 raise error.Abort(msg, hint=hint)
3706 3706 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3707 3707 if clearable and repo.vfs.exists(f):
3708 3708 util.unlink(repo.vfs.join(f))
3709 3709
3710 3710 afterresolvedstates = [
3711 3711 ('graftstate',
3712 3712 _('hg graft --continue')),
3713 3713 ]
3714 3714
3715 3715 def howtocontinue(repo):
3716 3716 '''Check for an unfinished operation and return the command to finish
3717 3717 it.
3718 3718
3719 3719 afterresolvedstates tuples define a .hg/{file} and the corresponding
3720 3720 command needed to finish it.
3721 3721
3722 3722 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3723 3723 a boolean.
3724 3724 '''
3725 3725 contmsg = _("continue: %s")
3726 3726 for f, msg in afterresolvedstates:
3727 3727 if repo.vfs.exists(f):
3728 3728 return contmsg % msg, True
3729 3729 if repo[None].dirty(missing=True, merge=False, branch=False):
3730 3730 return contmsg % _("hg commit"), False
3731 3731 return None, None
3732 3732
3733 3733 def checkafterresolved(repo):
3734 3734 '''Inform the user about the next action after completing hg resolve
3735 3735
3736 3736 If there's a matching afterresolvedstates, howtocontinue will yield
3737 3737 repo.ui.warn as the reporter.
3738 3738
3739 3739 Otherwise, it will yield repo.ui.note.
3740 3740 '''
3741 3741 msg, warning = howtocontinue(repo)
3742 3742 if msg is not None:
3743 3743 if warning:
3744 3744 repo.ui.warn("%s\n" % msg)
3745 3745 else:
3746 3746 repo.ui.note("%s\n" % msg)
3747 3747
3748 3748 def wrongtooltocontinue(repo, task):
3749 3749 '''Raise an abort suggesting how to properly continue if there is an
3750 3750 active task.
3751 3751
3752 3752 Uses howtocontinue() to find the active task.
3753 3753
3754 3754 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3755 3755 a hint.
3756 3756 '''
3757 3757 after = howtocontinue(repo)
3758 3758 hint = None
3759 3759 if after[1]:
3760 3760 hint = after[0]
3761 3761 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,1343 +1,1343
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 def nonnormalentries(dmap):
58 58 '''Compute the nonnormal dirstate entries from the dmap'''
59 59 try:
60 60 return parsers.nonnormalotherparententries(dmap)
61 61 except AttributeError:
62 62 nonnorm = set()
63 63 otherparent = set()
64 64 for fname, e in dmap.iteritems():
65 65 if e[0] != 'n' or e[3] == -1:
66 66 nonnorm.add(fname)
67 67 if e[0] == 'n' and e[2] == -2:
68 68 otherparent.add(fname)
69 69 return nonnorm, otherparent
70 70
71 71 class dirstate(object):
72 72
73 73 def __init__(self, opener, ui, root, validate, sparsematchfn):
74 74 '''Create a new dirstate object.
75 75
76 76 opener is an open()-like callable that can be used to open the
77 77 dirstate file; root is the root of the directory tracked by
78 78 the dirstate.
79 79 '''
80 80 self._opener = opener
81 81 self._validate = validate
82 82 self._root = root
83 83 self._sparsematchfn = sparsematchfn
84 84 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
85 85 # UNC path pointing to root share (issue4557)
86 86 self._rootdir = pathutil.normasprefix(root)
87 87 self._dirty = False
88 88 self._dirtypl = False
89 89 self._lastnormaltime = 0
90 90 self._ui = ui
91 91 self._filecache = {}
92 92 self._parentwriters = 0
93 93 self._filename = 'dirstate'
94 94 self._pendingfilename = '%s.pending' % self._filename
95 95 self._plchangecallbacks = {}
96 96 self._origpl = None
97 97 self._updatedfiles = set()
98 98
99 99 # for consistent view between _pl() and _read() invocations
100 100 self._pendingmode = None
101 101
102 102 @contextlib.contextmanager
103 103 def parentchange(self):
104 104 '''Context manager for handling dirstate parents.
105 105
106 106 If an exception occurs in the scope of the context manager,
107 107 the incoherent dirstate won't be written when wlock is
108 108 released.
109 109 '''
110 110 self._parentwriters += 1
111 111 yield
112 112 # Typically we want the "undo" step of a context manager in a
113 113 # finally block so it happens even when an exception
114 114 # occurs. In this case, however, we only want to decrement
115 115 # parentwriters if the code in the with statement exits
116 116 # normally, so we don't have a try/finally here on purpose.
117 117 self._parentwriters -= 1
118 118
119 119 def beginparentchange(self):
120 120 '''Marks the beginning of a set of changes that involve changing
121 121 the dirstate parents. If there is an exception during this time,
122 122 the dirstate will not be written when the wlock is released. This
123 123 prevents writing an incoherent dirstate where the parent doesn't
124 124 match the contents.
125 125 '''
126 126 self._ui.deprecwarn('beginparentchange is obsoleted by the '
127 127 'parentchange context manager.', '4.3')
128 128 self._parentwriters += 1
129 129
130 130 def endparentchange(self):
131 131 '''Marks the end of a set of changes that involve changing the
132 132 dirstate parents. Once all parent changes have been marked done,
133 133 the wlock will be free to write the dirstate on release.
134 134 '''
135 135 self._ui.deprecwarn('endparentchange is obsoleted by the '
136 136 'parentchange context manager.', '4.3')
137 137 if self._parentwriters > 0:
138 138 self._parentwriters -= 1
139 139
140 140 def pendingparentchange(self):
141 141 '''Returns true if the dirstate is in the middle of a set of changes
142 142 that modify the dirstate parent.
143 143 '''
144 144 return self._parentwriters > 0
145 145
146 146 @propertycache
147 147 def _map(self):
148 148 '''Return the dirstate contents as a map from filename to
149 149 (state, mode, size, time).'''
150 150 self._read()
151 151 return self._map
152 152
153 153 @propertycache
154 154 def _copymap(self):
155 155 self._read()
156 156 return self._copymap
157 157
158 158 @propertycache
159 159 def _identity(self):
160 160 self._read()
161 161 return self._identity
162 162
163 163 @propertycache
164 164 def _nonnormalset(self):
165 165 nonnorm, otherparents = nonnormalentries(self._map)
166 166 self._otherparentset = otherparents
167 167 return nonnorm
168 168
169 169 @propertycache
170 170 def _otherparentset(self):
171 171 nonnorm, otherparents = nonnormalentries(self._map)
172 172 self._nonnormalset = nonnorm
173 173 return otherparents
174 174
175 175 @propertycache
176 176 def _filefoldmap(self):
177 177 try:
178 178 makefilefoldmap = parsers.make_file_foldmap
179 179 except AttributeError:
180 180 pass
181 181 else:
182 182 return makefilefoldmap(self._map, util.normcasespec,
183 183 util.normcasefallback)
184 184
185 185 f = {}
186 186 normcase = util.normcase
187 187 for name, s in self._map.iteritems():
188 188 if s[0] != 'r':
189 189 f[normcase(name)] = name
190 190 f['.'] = '.' # prevents useless util.fspath() invocation
191 191 return f
192 192
193 193 @propertycache
194 194 def _dirfoldmap(self):
195 195 f = {}
196 196 normcase = util.normcase
197 197 for name in self._dirs:
198 198 f[normcase(name)] = name
199 199 return f
200 200
201 201 @property
202 202 def _sparsematcher(self):
203 203 """The matcher for the sparse checkout.
204 204
205 205 The working directory may not include every file from a manifest. The
206 206 matcher obtained by this property will match a path if it is to be
207 207 included in the working directory.
208 208 """
209 209 # TODO there is potential to cache this property. For now, the matcher
210 210 # is resolved on every access. (But the called function does use a
211 211 # cache to keep the lookup fast.)
212 212 return self._sparsematchfn()
213 213
214 214 @repocache('branch')
215 215 def _branch(self):
216 216 try:
217 217 return self._opener.read("branch").strip() or "default"
218 218 except IOError as inst:
219 219 if inst.errno != errno.ENOENT:
220 220 raise
221 221 return "default"
222 222
223 223 @propertycache
224 224 def _pl(self):
225 225 try:
226 226 fp = self._opendirstatefile()
227 227 st = fp.read(40)
228 228 fp.close()
229 229 l = len(st)
230 230 if l == 40:
231 231 return st[:20], st[20:40]
232 232 elif l > 0 and l < 40:
233 233 raise error.Abort(_('working directory state appears damaged!'))
234 234 except IOError as err:
235 235 if err.errno != errno.ENOENT:
236 236 raise
237 237 return [nullid, nullid]
238 238
239 239 @propertycache
240 240 def _dirs(self):
241 241 return util.dirs(self._map, 'r')
242 242
243 243 def dirs(self):
244 244 return self._dirs
245 245
246 246 @rootcache('.hgignore')
247 247 def _ignore(self):
248 248 files = self._ignorefiles()
249 249 if not files:
250 250 return matchmod.never(self._root, '')
251 251
252 252 pats = ['include:%s' % f for f in files]
253 253 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
254 254
255 255 @propertycache
256 256 def _slash(self):
257 257 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
258 258
259 259 @propertycache
260 260 def _checklink(self):
261 261 return util.checklink(self._root)
262 262
263 263 @propertycache
264 264 def _checkexec(self):
265 265 return util.checkexec(self._root)
266 266
267 267 @propertycache
268 268 def _checkcase(self):
269 269 return not util.fscasesensitive(self._join('.hg'))
270 270
271 271 def _join(self, f):
272 272 # much faster than os.path.join()
273 273 # it's safe because f is always a relative path
274 274 return self._rootdir + f
275 275
276 276 def flagfunc(self, buildfallback):
277 277 if self._checklink and self._checkexec:
278 278 def f(x):
279 279 try:
280 280 st = os.lstat(self._join(x))
281 281 if util.statislink(st):
282 282 return 'l'
283 283 if util.statisexec(st):
284 284 return 'x'
285 285 except OSError:
286 286 pass
287 287 return ''
288 288 return f
289 289
290 290 fallback = buildfallback()
291 291 if self._checklink:
292 292 def f(x):
293 293 if os.path.islink(self._join(x)):
294 294 return 'l'
295 295 if 'x' in fallback(x):
296 296 return 'x'
297 297 return ''
298 298 return f
299 299 if self._checkexec:
300 300 def f(x):
301 301 if 'l' in fallback(x):
302 302 return 'l'
303 303 if util.isexec(self._join(x)):
304 304 return 'x'
305 305 return ''
306 306 return f
307 307 else:
308 308 return fallback
309 309
310 310 @propertycache
311 311 def _cwd(self):
312 312 # internal config: ui.forcecwd
313 313 forcecwd = self._ui.config('ui', 'forcecwd')
314 314 if forcecwd:
315 315 return forcecwd
316 316 return pycompat.getcwd()
317 317
318 318 def getcwd(self):
319 319 '''Return the path from which a canonical path is calculated.
320 320
321 321 This path should be used to resolve file patterns or to convert
322 322 canonical paths back to file paths for display. It shouldn't be
323 323 used to get real file paths. Use vfs functions instead.
324 324 '''
325 325 cwd = self._cwd
326 326 if cwd == self._root:
327 327 return ''
328 328 # self._root ends with a path separator if self._root is '/' or 'C:\'
329 329 rootsep = self._root
330 330 if not util.endswithsep(rootsep):
331 331 rootsep += pycompat.ossep
332 332 if cwd.startswith(rootsep):
333 333 return cwd[len(rootsep):]
334 334 else:
335 335 # we're outside the repo. return an absolute path.
336 336 return cwd
337 337
338 338 def pathto(self, f, cwd=None):
339 339 if cwd is None:
340 340 cwd = self.getcwd()
341 341 path = util.pathto(self._root, cwd, f)
342 342 if self._slash:
343 343 return util.pconvert(path)
344 344 return path
345 345
346 346 def __getitem__(self, key):
347 347 '''Return the current state of key (a filename) in the dirstate.
348 348
349 349 States are:
350 350 n normal
351 351 m needs merging
352 352 r marked for removal
353 353 a marked for addition
354 354 ? not tracked
355 355 '''
356 356 return self._map.get(key, ("?",))[0]
357 357
358 358 def __contains__(self, key):
359 359 return key in self._map
360 360
361 361 def __iter__(self):
362 362 for x in sorted(self._map):
363 363 yield x
364 364
365 365 def items(self):
366 366 return self._map.iteritems()
367 367
368 368 iteritems = items
369 369
370 370 def parents(self):
371 371 return [self._validate(p) for p in self._pl]
372 372
373 373 def p1(self):
374 374 return self._validate(self._pl[0])
375 375
376 376 def p2(self):
377 377 return self._validate(self._pl[1])
378 378
379 379 def branch(self):
380 380 return encoding.tolocal(self._branch)
381 381
382 382 def setparents(self, p1, p2=nullid):
383 383 """Set dirstate parents to p1 and p2.
384 384
385 385 When moving from two parents to one, 'm' merged entries a
386 386 adjusted to normal and previous copy records discarded and
387 387 returned by the call.
388 388
389 389 See localrepo.setparents()
390 390 """
391 391 if self._parentwriters == 0:
392 392 raise ValueError("cannot set dirstate parent without "
393 393 "calling dirstate.beginparentchange")
394 394
395 395 self._dirty = self._dirtypl = True
396 396 oldp2 = self._pl[1]
397 397 if self._origpl is None:
398 398 self._origpl = self._pl
399 399 self._pl = p1, p2
400 400 copies = {}
401 401 if oldp2 != nullid and p2 == nullid:
402 402 candidatefiles = self._nonnormalset.union(self._otherparentset)
403 403 for f in candidatefiles:
404 404 s = self._map.get(f)
405 405 if s is None:
406 406 continue
407 407
408 408 # Discard 'm' markers when moving away from a merge state
409 409 if s[0] == 'm':
410 410 if f in self._copymap:
411 411 copies[f] = self._copymap[f]
412 412 self.normallookup(f)
413 413 # Also fix up otherparent markers
414 414 elif s[0] == 'n' and s[2] == -2:
415 415 if f in self._copymap:
416 416 copies[f] = self._copymap[f]
417 417 self.add(f)
418 418 return copies
419 419
420 420 def setbranch(self, branch):
421 421 self._branch = encoding.fromlocal(branch)
422 422 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
423 423 try:
424 424 f.write(self._branch + '\n')
425 425 f.close()
426 426
427 427 # make sure filecache has the correct stat info for _branch after
428 428 # replacing the underlying file
429 429 ce = self._filecache['_branch']
430 430 if ce:
431 431 ce.refresh()
432 432 except: # re-raises
433 433 f.discard()
434 434 raise
435 435
436 436 def _opendirstatefile(self):
437 437 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
438 438 if self._pendingmode is not None and self._pendingmode != mode:
439 439 fp.close()
440 440 raise error.Abort(_('working directory state may be '
441 441 'changed parallelly'))
442 442 self._pendingmode = mode
443 443 return fp
444 444
445 445 def _read(self):
446 446 self._map = {}
447 447 self._copymap = {}
448 448 # ignore HG_PENDING because identity is used only for writing
449 449 self._identity = util.filestat.frompath(
450 450 self._opener.join(self._filename))
451 451 try:
452 452 fp = self._opendirstatefile()
453 453 try:
454 454 st = fp.read()
455 455 finally:
456 456 fp.close()
457 457 except IOError as err:
458 458 if err.errno != errno.ENOENT:
459 459 raise
460 460 return
461 461 if not st:
462 462 return
463 463
464 464 if util.safehasattr(parsers, 'dict_new_presized'):
465 465 # Make an estimate of the number of files in the dirstate based on
466 466 # its size. From a linear regression on a set of real-world repos,
467 467 # all over 10,000 files, the size of a dirstate entry is 85
468 468 # bytes. The cost of resizing is significantly higher than the cost
469 469 # of filling in a larger presized dict, so subtract 20% from the
470 470 # size.
471 471 #
472 472 # This heuristic is imperfect in many ways, so in a future dirstate
473 473 # format update it makes sense to just record the number of entries
474 474 # on write.
475 475 self._map = parsers.dict_new_presized(len(st) / 71)
476 476
477 477 # Python's garbage collector triggers a GC each time a certain number
478 478 # of container objects (the number being defined by
479 479 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
480 480 # for each file in the dirstate. The C version then immediately marks
481 481 # them as not to be tracked by the collector. However, this has no
482 482 # effect on when GCs are triggered, only on what objects the GC looks
483 483 # into. This means that O(number of files) GCs are unavoidable.
484 484 # Depending on when in the process's lifetime the dirstate is parsed,
485 485 # this can get very expensive. As a workaround, disable GC while
486 486 # parsing the dirstate.
487 487 #
488 488 # (we cannot decorate the function directly since it is in a C module)
489 489 parse_dirstate = util.nogc(parsers.parse_dirstate)
490 490 p = parse_dirstate(self._map, self._copymap, st)
491 491 if not self._dirtypl:
492 492 self._pl = p
493 493
494 494 def invalidate(self):
495 495 '''Causes the next access to reread the dirstate.
496 496
497 497 This is different from localrepo.invalidatedirstate() because it always
498 498 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
499 499 check whether the dirstate has changed before rereading it.'''
500 500
501 501 for a in ("_map", "_copymap", "_identity",
502 502 "_filefoldmap", "_dirfoldmap", "_branch",
503 503 "_pl", "_dirs", "_ignore", "_nonnormalset",
504 504 "_otherparentset"):
505 505 if a in self.__dict__:
506 506 delattr(self, a)
507 507 self._lastnormaltime = 0
508 508 self._dirty = False
509 509 self._updatedfiles.clear()
510 510 self._parentwriters = 0
511 511 self._origpl = None
512 512
513 513 def copy(self, source, dest):
514 514 """Mark dest as a copy of source. Unmark dest if source is None."""
515 515 if source == dest:
516 516 return
517 517 self._dirty = True
518 518 if source is not None:
519 519 self._copymap[dest] = source
520 520 self._updatedfiles.add(source)
521 521 self._updatedfiles.add(dest)
522 522 elif dest in self._copymap:
523 523 del self._copymap[dest]
524 524 self._updatedfiles.add(dest)
525 525
526 526 def copied(self, file):
527 527 return self._copymap.get(file, None)
528 528
529 529 def copies(self):
530 530 return self._copymap
531 531
532 532 def _droppath(self, f):
533 533 if self[f] not in "?r" and "_dirs" in self.__dict__:
534 534 self._dirs.delpath(f)
535 535
536 536 if "_filefoldmap" in self.__dict__:
537 537 normed = util.normcase(f)
538 538 if normed in self._filefoldmap:
539 539 del self._filefoldmap[normed]
540 540
541 541 self._updatedfiles.add(f)
542 542
543 543 def _addpath(self, f, state, mode, size, mtime):
544 544 oldstate = self[f]
545 545 if state == 'a' or oldstate == 'r':
546 546 scmutil.checkfilename(f)
547 547 if f in self._dirs:
548 548 raise error.Abort(_('directory %r already in dirstate') % f)
549 549 # shadows
550 550 for d in util.finddirs(f):
551 551 if d in self._dirs:
552 552 break
553 553 if d in self._map and self[d] != 'r':
554 554 raise error.Abort(
555 555 _('file %r in dirstate clashes with %r') % (d, f))
556 556 if oldstate in "?r" and "_dirs" in self.__dict__:
557 557 self._dirs.addpath(f)
558 558 self._dirty = True
559 559 self._updatedfiles.add(f)
560 560 self._map[f] = dirstatetuple(state, mode, size, mtime)
561 561 if state != 'n' or mtime == -1:
562 562 self._nonnormalset.add(f)
563 563 if size == -2:
564 564 self._otherparentset.add(f)
565 565
566 566 def normal(self, f):
567 567 '''Mark a file normal and clean.'''
568 568 s = os.lstat(self._join(f))
569 569 mtime = s.st_mtime
570 570 self._addpath(f, 'n', s.st_mode,
571 571 s.st_size & _rangemask, mtime & _rangemask)
572 572 if f in self._copymap:
573 573 del self._copymap[f]
574 574 if f in self._nonnormalset:
575 575 self._nonnormalset.remove(f)
576 576 if mtime > self._lastnormaltime:
577 577 # Remember the most recent modification timeslot for status(),
578 578 # to make sure we won't miss future size-preserving file content
579 579 # modifications that happen within the same timeslot.
580 580 self._lastnormaltime = mtime
581 581
582 582 def normallookup(self, f):
583 583 '''Mark a file normal, but possibly dirty.'''
584 584 if self._pl[1] != nullid and f in self._map:
585 585 # if there is a merge going on and the file was either
586 586 # in state 'm' (-1) or coming from other parent (-2) before
587 587 # being removed, restore that state.
588 588 entry = self._map[f]
589 589 if entry[0] == 'r' and entry[2] in (-1, -2):
590 590 source = self._copymap.get(f)
591 591 if entry[2] == -1:
592 592 self.merge(f)
593 593 elif entry[2] == -2:
594 594 self.otherparent(f)
595 595 if source:
596 596 self.copy(source, f)
597 597 return
598 598 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
599 599 return
600 600 self._addpath(f, 'n', 0, -1, -1)
601 601 if f in self._copymap:
602 602 del self._copymap[f]
603 603 if f in self._nonnormalset:
604 604 self._nonnormalset.remove(f)
605 605
606 606 def otherparent(self, f):
607 607 '''Mark as coming from the other parent, always dirty.'''
608 608 if self._pl[1] == nullid:
609 609 raise error.Abort(_("setting %r to other parent "
610 610 "only allowed in merges") % f)
611 611 if f in self and self[f] == 'n':
612 612 # merge-like
613 613 self._addpath(f, 'm', 0, -2, -1)
614 614 else:
615 615 # add-like
616 616 self._addpath(f, 'n', 0, -2, -1)
617 617
618 618 if f in self._copymap:
619 619 del self._copymap[f]
620 620
621 621 def add(self, f):
622 622 '''Mark a file added.'''
623 623 self._addpath(f, 'a', 0, -1, -1)
624 624 if f in self._copymap:
625 625 del self._copymap[f]
626 626
627 627 def remove(self, f):
628 628 '''Mark a file removed.'''
629 629 self._dirty = True
630 630 self._droppath(f)
631 631 size = 0
632 632 if self._pl[1] != nullid and f in self._map:
633 633 # backup the previous state
634 634 entry = self._map[f]
635 635 if entry[0] == 'm': # merge
636 636 size = -1
637 637 elif entry[0] == 'n' and entry[2] == -2: # other parent
638 638 size = -2
639 639 self._otherparentset.add(f)
640 640 self._map[f] = dirstatetuple('r', 0, size, 0)
641 641 self._nonnormalset.add(f)
642 642 if size == 0 and f in self._copymap:
643 643 del self._copymap[f]
644 644
645 645 def merge(self, f):
646 646 '''Mark a file merged.'''
647 647 if self._pl[1] == nullid:
648 648 return self.normallookup(f)
649 649 return self.otherparent(f)
650 650
651 651 def drop(self, f):
652 652 '''Drop a file from the dirstate'''
653 653 if f in self._map:
654 654 self._dirty = True
655 655 self._droppath(f)
656 656 del self._map[f]
657 657 if f in self._nonnormalset:
658 658 self._nonnormalset.remove(f)
659 659 if f in self._copymap:
660 660 del self._copymap[f]
661 661
662 662 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
663 663 if exists is None:
664 664 exists = os.path.lexists(os.path.join(self._root, path))
665 665 if not exists:
666 666 # Maybe a path component exists
667 667 if not ignoremissing and '/' in path:
668 668 d, f = path.rsplit('/', 1)
669 669 d = self._normalize(d, False, ignoremissing, None)
670 670 folded = d + "/" + f
671 671 else:
672 672 # No path components, preserve original case
673 673 folded = path
674 674 else:
675 675 # recursively normalize leading directory components
676 676 # against dirstate
677 677 if '/' in normed:
678 678 d, f = normed.rsplit('/', 1)
679 679 d = self._normalize(d, False, ignoremissing, True)
680 680 r = self._root + "/" + d
681 681 folded = d + "/" + util.fspath(f, r)
682 682 else:
683 683 folded = util.fspath(normed, self._root)
684 684 storemap[normed] = folded
685 685
686 686 return folded
687 687
688 688 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
689 689 normed = util.normcase(path)
690 690 folded = self._filefoldmap.get(normed, None)
691 691 if folded is None:
692 692 if isknown:
693 693 folded = path
694 694 else:
695 695 folded = self._discoverpath(path, normed, ignoremissing, exists,
696 696 self._filefoldmap)
697 697 return folded
698 698
699 699 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
700 700 normed = util.normcase(path)
701 701 folded = self._filefoldmap.get(normed, None)
702 702 if folded is None:
703 703 folded = self._dirfoldmap.get(normed, None)
704 704 if folded is None:
705 705 if isknown:
706 706 folded = path
707 707 else:
708 708 # store discovered result in dirfoldmap so that future
709 709 # normalizefile calls don't start matching directories
710 710 folded = self._discoverpath(path, normed, ignoremissing, exists,
711 711 self._dirfoldmap)
712 712 return folded
713 713
714 714 def normalize(self, path, isknown=False, ignoremissing=False):
715 715 '''
716 716 normalize the case of a pathname when on a casefolding filesystem
717 717
718 718 isknown specifies whether the filename came from walking the
719 719 disk, to avoid extra filesystem access.
720 720
721 721 If ignoremissing is True, missing path are returned
722 722 unchanged. Otherwise, we try harder to normalize possibly
723 723 existing path components.
724 724
725 725 The normalized case is determined based on the following precedence:
726 726
727 727 - version of name already stored in the dirstate
728 728 - version of name stored on disk
729 729 - version provided via command arguments
730 730 '''
731 731
732 732 if self._checkcase:
733 733 return self._normalize(path, isknown, ignoremissing)
734 734 return path
735 735
736 736 def clear(self):
737 737 self._map = {}
738 738 self._nonnormalset = set()
739 739 self._otherparentset = set()
740 740 if "_dirs" in self.__dict__:
741 741 delattr(self, "_dirs")
742 742 self._copymap = {}
743 743 self._pl = [nullid, nullid]
744 744 self._lastnormaltime = 0
745 745 self._updatedfiles.clear()
746 746 self._dirty = True
747 747
748 748 def rebuild(self, parent, allfiles, changedfiles=None):
749 749 if changedfiles is None:
750 750 # Rebuild entire dirstate
751 751 changedfiles = allfiles
752 752 lastnormaltime = self._lastnormaltime
753 753 self.clear()
754 754 self._lastnormaltime = lastnormaltime
755 755
756 756 if self._origpl is None:
757 757 self._origpl = self._pl
758 758 self._pl = (parent, nullid)
759 759 for f in changedfiles:
760 760 if f in allfiles:
761 761 self.normallookup(f)
762 762 else:
763 763 self.drop(f)
764 764
765 765 self._dirty = True
766 766
767 767 def identity(self):
768 768 '''Return identity of dirstate itself to detect changing in storage
769 769
770 770 If identity of previous dirstate is equal to this, writing
771 771 changes based on the former dirstate out can keep consistency.
772 772 '''
773 773 return self._identity
774 774
775 775 def write(self, tr):
776 776 if not self._dirty:
777 777 return
778 778
779 779 filename = self._filename
780 780 if tr:
781 781 # 'dirstate.write()' is not only for writing in-memory
782 782 # changes out, but also for dropping ambiguous timestamp.
783 783 # delayed writing re-raise "ambiguous timestamp issue".
784 784 # See also the wiki page below for detail:
785 785 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
786 786
787 787 # emulate dropping timestamp in 'parsers.pack_dirstate'
788 788 now = _getfsnow(self._opener)
789 789 dmap = self._map
790 790 for f in self._updatedfiles:
791 791 e = dmap.get(f)
792 792 if e is not None and e[0] == 'n' and e[3] == now:
793 793 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
794 794 self._nonnormalset.add(f)
795 795
796 796 # emulate that all 'dirstate.normal' results are written out
797 797 self._lastnormaltime = 0
798 798 self._updatedfiles.clear()
799 799
800 800 # delay writing in-memory changes out
801 801 tr.addfilegenerator('dirstate', (self._filename,),
802 802 self._writedirstate, location='plain')
803 803 return
804 804
805 805 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
806 806 self._writedirstate(st)
807 807
808 808 def addparentchangecallback(self, category, callback):
809 809 """add a callback to be called when the wd parents are changed
810 810
811 811 Callback will be called with the following arguments:
812 812 dirstate, (oldp1, oldp2), (newp1, newp2)
813 813
814 814 Category is a unique identifier to allow overwriting an old callback
815 815 with a newer callback.
816 816 """
817 817 self._plchangecallbacks[category] = callback
818 818
819 819 def _writedirstate(self, st):
820 820 # notify callbacks about parents change
821 821 if self._origpl is not None and self._origpl != self._pl:
822 822 for c, callback in sorted(self._plchangecallbacks.iteritems()):
823 823 callback(self, self._origpl, self._pl)
824 824 self._origpl = None
825 825 # use the modification time of the newly created temporary file as the
826 826 # filesystem's notion of 'now'
827 827 now = util.fstat(st).st_mtime & _rangemask
828 828
829 829 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
830 830 # timestamp of each entries in dirstate, because of 'now > mtime'
831 831 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
832 832 if delaywrite > 0:
833 833 # do we have any files to delay for?
834 834 for f, e in self._map.iteritems():
835 835 if e[0] == 'n' and e[3] == now:
836 836 import time # to avoid useless import
837 837 # rather than sleep n seconds, sleep until the next
838 838 # multiple of n seconds
839 839 clock = time.time()
840 840 start = int(clock) - (int(clock) % delaywrite)
841 841 end = start + delaywrite
842 842 time.sleep(end - clock)
843 843 now = end # trust our estimate that the end is near now
844 844 break
845 845
846 846 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
847 847 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
848 848 st.close()
849 849 self._lastnormaltime = 0
850 850 self._dirty = self._dirtypl = False
851 851
852 852 def _dirignore(self, f):
853 853 if f == '.':
854 854 return False
855 855 if self._ignore(f):
856 856 return True
857 857 for p in util.finddirs(f):
858 858 if self._ignore(p):
859 859 return True
860 860 return False
861 861
862 862 def _ignorefiles(self):
863 863 files = []
864 864 if os.path.exists(self._join('.hgignore')):
865 865 files.append(self._join('.hgignore'))
866 866 for name, path in self._ui.configitems("ui"):
867 867 if name == 'ignore' or name.startswith('ignore.'):
868 868 # we need to use os.path.join here rather than self._join
869 869 # because path is arbitrary and user-specified
870 870 files.append(os.path.join(self._rootdir, util.expandpath(path)))
871 871 return files
872 872
873 873 def _ignorefileandline(self, f):
874 874 files = collections.deque(self._ignorefiles())
875 875 visited = set()
876 876 while files:
877 877 i = files.popleft()
878 878 patterns = matchmod.readpatternfile(i, self._ui.warn,
879 879 sourceinfo=True)
880 880 for pattern, lineno, line in patterns:
881 881 kind, p = matchmod._patsplit(pattern, 'glob')
882 882 if kind == "subinclude":
883 883 if p not in visited:
884 884 files.append(p)
885 885 continue
886 886 m = matchmod.match(self._root, '', [], [pattern],
887 887 warn=self._ui.warn)
888 888 if m(f):
889 889 return (i, lineno, line)
890 890 visited.add(i)
891 891 return (None, -1, "")
892 892
893 893 def _walkexplicit(self, match, subrepos):
894 894 '''Get stat data about the files explicitly specified by match.
895 895
896 896 Return a triple (results, dirsfound, dirsnotfound).
897 897 - results is a mapping from filename to stat result. It also contains
898 898 listings mapping subrepos and .hg to None.
899 899 - dirsfound is a list of files found to be directories.
900 900 - dirsnotfound is a list of files that the dirstate thinks are
901 901 directories and that were not found.'''
902 902
903 903 def badtype(mode):
904 904 kind = _('unknown')
905 905 if stat.S_ISCHR(mode):
906 906 kind = _('character device')
907 907 elif stat.S_ISBLK(mode):
908 908 kind = _('block device')
909 909 elif stat.S_ISFIFO(mode):
910 910 kind = _('fifo')
911 911 elif stat.S_ISSOCK(mode):
912 912 kind = _('socket')
913 913 elif stat.S_ISDIR(mode):
914 914 kind = _('directory')
915 915 return _('unsupported file type (type is %s)') % kind
916 916
917 917 matchedir = match.explicitdir
918 918 badfn = match.bad
919 919 dmap = self._map
920 920 lstat = os.lstat
921 921 getkind = stat.S_IFMT
922 922 dirkind = stat.S_IFDIR
923 923 regkind = stat.S_IFREG
924 924 lnkkind = stat.S_IFLNK
925 925 join = self._join
926 926 dirsfound = []
927 927 foundadd = dirsfound.append
928 928 dirsnotfound = []
929 929 notfoundadd = dirsnotfound.append
930 930
931 931 if not match.isexact() and self._checkcase:
932 932 normalize = self._normalize
933 933 else:
934 934 normalize = None
935 935
936 936 files = sorted(match.files())
937 937 subrepos.sort()
938 938 i, j = 0, 0
939 939 while i < len(files) and j < len(subrepos):
940 940 subpath = subrepos[j] + "/"
941 941 if files[i] < subpath:
942 942 i += 1
943 943 continue
944 944 while i < len(files) and files[i].startswith(subpath):
945 945 del files[i]
946 946 j += 1
947 947
948 948 if not files or '.' in files:
949 949 files = ['.']
950 950 results = dict.fromkeys(subrepos)
951 951 results['.hg'] = None
952 952
953 953 alldirs = None
954 954 for ff in files:
955 955 # constructing the foldmap is expensive, so don't do it for the
956 956 # common case where files is ['.']
957 957 if normalize and ff != '.':
958 958 nf = normalize(ff, False, True)
959 959 else:
960 960 nf = ff
961 961 if nf in results:
962 962 continue
963 963
964 964 try:
965 965 st = lstat(join(nf))
966 966 kind = getkind(st.st_mode)
967 967 if kind == dirkind:
968 968 if nf in dmap:
969 969 # file replaced by dir on disk but still in dirstate
970 970 results[nf] = None
971 971 if matchedir:
972 972 matchedir(nf)
973 973 foundadd((nf, ff))
974 974 elif kind == regkind or kind == lnkkind:
975 975 results[nf] = st
976 976 else:
977 977 badfn(ff, badtype(kind))
978 978 if nf in dmap:
979 979 results[nf] = None
980 980 except OSError as inst: # nf not found on disk - it is dirstate only
981 981 if nf in dmap: # does it exactly match a missing file?
982 982 results[nf] = None
983 983 else: # does it match a missing directory?
984 984 if alldirs is None:
985 985 alldirs = util.dirs(dmap)
986 986 if nf in alldirs:
987 987 if matchedir:
988 988 matchedir(nf)
989 989 notfoundadd(nf)
990 990 else:
991 991 badfn(ff, inst.strerror)
992 992
993 993 # Case insensitive filesystems cannot rely on lstat() failing to detect
994 994 # a case-only rename. Prune the stat object for any file that does not
995 995 # match the case in the filesystem, if there are multiple files that
996 996 # normalize to the same path.
997 997 if match.isexact() and self._checkcase:
998 998 normed = {}
999 999
1000 1000 for f, st in results.iteritems():
1001 1001 if st is None:
1002 1002 continue
1003 1003
1004 1004 nc = util.normcase(f)
1005 1005 paths = normed.get(nc)
1006 1006
1007 1007 if paths is None:
1008 1008 paths = set()
1009 1009 normed[nc] = paths
1010 1010
1011 1011 paths.add(f)
1012 1012
1013 1013 for norm, paths in normed.iteritems():
1014 1014 if len(paths) > 1:
1015 1015 for path in paths:
1016 1016 folded = self._discoverpath(path, norm, True, None,
1017 1017 self._dirfoldmap)
1018 1018 if path != folded:
1019 1019 results[path] = None
1020 1020
1021 1021 return results, dirsfound, dirsnotfound
1022 1022
1023 1023 def walk(self, match, subrepos, unknown, ignored, full=True):
1024 1024 '''
1025 1025 Walk recursively through the directory tree, finding all files
1026 1026 matched by match.
1027 1027
1028 1028 If full is False, maybe skip some known-clean files.
1029 1029
1030 1030 Return a dict mapping filename to stat-like object (either
1031 1031 mercurial.osutil.stat instance or return value of os.stat()).
1032 1032
1033 1033 '''
1034 1034 # full is a flag that extensions that hook into walk can use -- this
1035 1035 # implementation doesn't use it at all. This satisfies the contract
1036 1036 # because we only guarantee a "maybe".
1037 1037
1038 1038 if ignored:
1039 1039 ignore = util.never
1040 1040 dirignore = util.never
1041 1041 elif unknown:
1042 1042 ignore = self._ignore
1043 1043 dirignore = self._dirignore
1044 1044 else:
1045 1045 # if not unknown and not ignored, drop dir recursion and step 2
1046 1046 ignore = util.always
1047 1047 dirignore = util.always
1048 1048
1049 1049 matchfn = match.matchfn
1050 1050 matchalways = match.always()
1051 1051 matchtdir = match.traversedir
1052 1052 dmap = self._map
1053 1053 listdir = util.listdir
1054 1054 lstat = os.lstat
1055 1055 dirkind = stat.S_IFDIR
1056 1056 regkind = stat.S_IFREG
1057 1057 lnkkind = stat.S_IFLNK
1058 1058 join = self._join
1059 1059
1060 1060 exact = skipstep3 = False
1061 1061 if match.isexact(): # match.exact
1062 1062 exact = True
1063 1063 dirignore = util.always # skip step 2
1064 1064 elif match.prefix(): # match.match, no patterns
1065 1065 skipstep3 = True
1066 1066
1067 1067 if not exact and self._checkcase:
1068 1068 normalize = self._normalize
1069 1069 normalizefile = self._normalizefile
1070 1070 skipstep3 = False
1071 1071 else:
1072 1072 normalize = self._normalize
1073 1073 normalizefile = None
1074 1074
1075 1075 # step 1: find all explicit files
1076 1076 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1077 1077
1078 1078 skipstep3 = skipstep3 and not (work or dirsnotfound)
1079 1079 work = [d for d in work if not dirignore(d[0])]
1080 1080
1081 1081 # step 2: visit subdirectories
1082 1082 def traverse(work, alreadynormed):
1083 1083 wadd = work.append
1084 1084 while work:
1085 1085 nd = work.pop()
1086 1086 if not match.visitdir(nd):
1087 1087 continue
1088 1088 skip = None
1089 1089 if nd == '.':
1090 1090 nd = ''
1091 1091 else:
1092 1092 skip = '.hg'
1093 1093 try:
1094 1094 entries = listdir(join(nd), stat=True, skip=skip)
1095 1095 except OSError as inst:
1096 1096 if inst.errno in (errno.EACCES, errno.ENOENT):
1097 1097 match.bad(self.pathto(nd), inst.strerror)
1098 1098 continue
1099 1099 raise
1100 1100 for f, kind, st in entries:
1101 1101 if normalizefile:
1102 1102 # even though f might be a directory, we're only
1103 1103 # interested in comparing it to files currently in the
1104 1104 # dmap -- therefore normalizefile is enough
1105 1105 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1106 1106 True)
1107 1107 else:
1108 1108 nf = nd and (nd + "/" + f) or f
1109 1109 if nf not in results:
1110 1110 if kind == dirkind:
1111 1111 if not ignore(nf):
1112 1112 if matchtdir:
1113 1113 matchtdir(nf)
1114 1114 wadd(nf)
1115 1115 if nf in dmap and (matchalways or matchfn(nf)):
1116 1116 results[nf] = None
1117 1117 elif kind == regkind or kind == lnkkind:
1118 1118 if nf in dmap:
1119 1119 if matchalways or matchfn(nf):
1120 1120 results[nf] = st
1121 1121 elif ((matchalways or matchfn(nf))
1122 1122 and not ignore(nf)):
1123 1123 # unknown file -- normalize if necessary
1124 1124 if not alreadynormed:
1125 1125 nf = normalize(nf, False, True)
1126 1126 results[nf] = st
1127 1127 elif nf in dmap and (matchalways or matchfn(nf)):
1128 1128 results[nf] = None
1129 1129
1130 1130 for nd, d in work:
1131 1131 # alreadynormed means that processwork doesn't have to do any
1132 1132 # expensive directory normalization
1133 1133 alreadynormed = not normalize or nd == d
1134 1134 traverse([d], alreadynormed)
1135 1135
1136 1136 for s in subrepos:
1137 1137 del results[s]
1138 1138 del results['.hg']
1139 1139
1140 1140 # step 3: visit remaining files from dmap
1141 1141 if not skipstep3 and not exact:
1142 1142 # If a dmap file is not in results yet, it was either
1143 1143 # a) not matching matchfn b) ignored, c) missing, or d) under a
1144 1144 # symlink directory.
1145 1145 if not results and matchalways:
1146 1146 visit = [f for f in dmap]
1147 1147 else:
1148 1148 visit = [f for f in dmap if f not in results and matchfn(f)]
1149 1149 visit.sort()
1150 1150
1151 1151 if unknown:
1152 1152 # unknown == True means we walked all dirs under the roots
1153 1153 # that wasn't ignored, and everything that matched was stat'ed
1154 1154 # and is already in results.
1155 1155 # The rest must thus be ignored or under a symlink.
1156 audit_path = pathutil.pathauditor(self._root)
1156 audit_path = pathutil.pathauditor(self._root, cached=True)
1157 1157
1158 1158 for nf in iter(visit):
1159 1159 # If a stat for the same file was already added with a
1160 1160 # different case, don't add one for this, since that would
1161 1161 # make it appear as if the file exists under both names
1162 1162 # on disk.
1163 1163 if (normalizefile and
1164 1164 normalizefile(nf, True, True) in results):
1165 1165 results[nf] = None
1166 1166 # Report ignored items in the dmap as long as they are not
1167 1167 # under a symlink directory.
1168 1168 elif audit_path.check(nf):
1169 1169 try:
1170 1170 results[nf] = lstat(join(nf))
1171 1171 # file was just ignored, no links, and exists
1172 1172 except OSError:
1173 1173 # file doesn't exist
1174 1174 results[nf] = None
1175 1175 else:
1176 1176 # It's either missing or under a symlink directory
1177 1177 # which we in this case report as missing
1178 1178 results[nf] = None
1179 1179 else:
1180 1180 # We may not have walked the full directory tree above,
1181 1181 # so stat and check everything we missed.
1182 1182 iv = iter(visit)
1183 1183 for st in util.statfiles([join(i) for i in visit]):
1184 1184 results[next(iv)] = st
1185 1185 return results
1186 1186
1187 1187 def status(self, match, subrepos, ignored, clean, unknown):
1188 1188 '''Determine the status of the working copy relative to the
1189 1189 dirstate and return a pair of (unsure, status), where status is of type
1190 1190 scmutil.status and:
1191 1191
1192 1192 unsure:
1193 1193 files that might have been modified since the dirstate was
1194 1194 written, but need to be read to be sure (size is the same
1195 1195 but mtime differs)
1196 1196 status.modified:
1197 1197 files that have definitely been modified since the dirstate
1198 1198 was written (different size or mode)
1199 1199 status.clean:
1200 1200 files that have definitely not been modified since the
1201 1201 dirstate was written
1202 1202 '''
1203 1203 listignored, listclean, listunknown = ignored, clean, unknown
1204 1204 lookup, modified, added, unknown, ignored = [], [], [], [], []
1205 1205 removed, deleted, clean = [], [], []
1206 1206
1207 1207 dmap = self._map
1208 1208 ladd = lookup.append # aka "unsure"
1209 1209 madd = modified.append
1210 1210 aadd = added.append
1211 1211 uadd = unknown.append
1212 1212 iadd = ignored.append
1213 1213 radd = removed.append
1214 1214 dadd = deleted.append
1215 1215 cadd = clean.append
1216 1216 mexact = match.exact
1217 1217 dirignore = self._dirignore
1218 1218 checkexec = self._checkexec
1219 1219 copymap = self._copymap
1220 1220 lastnormaltime = self._lastnormaltime
1221 1221
1222 1222 # We need to do full walks when either
1223 1223 # - we're listing all clean files, or
1224 1224 # - match.traversedir does something, because match.traversedir should
1225 1225 # be called for every dir in the working dir
1226 1226 full = listclean or match.traversedir is not None
1227 1227 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1228 1228 full=full).iteritems():
1229 1229 if fn not in dmap:
1230 1230 if (listignored or mexact(fn)) and dirignore(fn):
1231 1231 if listignored:
1232 1232 iadd(fn)
1233 1233 else:
1234 1234 uadd(fn)
1235 1235 continue
1236 1236
1237 1237 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1238 1238 # written like that for performance reasons. dmap[fn] is not a
1239 1239 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1240 1240 # opcode has fast paths when the value to be unpacked is a tuple or
1241 1241 # a list, but falls back to creating a full-fledged iterator in
1242 1242 # general. That is much slower than simply accessing and storing the
1243 1243 # tuple members one by one.
1244 1244 t = dmap[fn]
1245 1245 state = t[0]
1246 1246 mode = t[1]
1247 1247 size = t[2]
1248 1248 time = t[3]
1249 1249
1250 1250 if not st and state in "nma":
1251 1251 dadd(fn)
1252 1252 elif state == 'n':
1253 1253 if (size >= 0 and
1254 1254 ((size != st.st_size and size != st.st_size & _rangemask)
1255 1255 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1256 1256 or size == -2 # other parent
1257 1257 or fn in copymap):
1258 1258 madd(fn)
1259 1259 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1260 1260 ladd(fn)
1261 1261 elif st.st_mtime == lastnormaltime:
1262 1262 # fn may have just been marked as normal and it may have
1263 1263 # changed in the same second without changing its size.
1264 1264 # This can happen if we quickly do multiple commits.
1265 1265 # Force lookup, so we don't miss such a racy file change.
1266 1266 ladd(fn)
1267 1267 elif listclean:
1268 1268 cadd(fn)
1269 1269 elif state == 'm':
1270 1270 madd(fn)
1271 1271 elif state == 'a':
1272 1272 aadd(fn)
1273 1273 elif state == 'r':
1274 1274 radd(fn)
1275 1275
1276 1276 return (lookup, scmutil.status(modified, added, removed, deleted,
1277 1277 unknown, ignored, clean))
1278 1278
1279 1279 def matches(self, match):
1280 1280 '''
1281 1281 return files in the dirstate (in whatever state) filtered by match
1282 1282 '''
1283 1283 dmap = self._map
1284 1284 if match.always():
1285 1285 return dmap.keys()
1286 1286 files = match.files()
1287 1287 if match.isexact():
1288 1288 # fast path -- filter the other way around, since typically files is
1289 1289 # much smaller than dmap
1290 1290 return [f for f in files if f in dmap]
1291 1291 if match.prefix() and all(fn in dmap for fn in files):
1292 1292 # fast path -- all the values are known to be files, so just return
1293 1293 # that
1294 1294 return list(files)
1295 1295 return [f for f in dmap if match(f)]
1296 1296
1297 1297 def _actualfilename(self, tr):
1298 1298 if tr:
1299 1299 return self._pendingfilename
1300 1300 else:
1301 1301 return self._filename
1302 1302
1303 1303 def savebackup(self, tr, backupname):
1304 1304 '''Save current dirstate into backup file'''
1305 1305 filename = self._actualfilename(tr)
1306 1306 assert backupname != filename
1307 1307
1308 1308 # use '_writedirstate' instead of 'write' to write changes certainly,
1309 1309 # because the latter omits writing out if transaction is running.
1310 1310 # output file will be used to create backup of dirstate at this point.
1311 1311 if self._dirty or not self._opener.exists(filename):
1312 1312 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1313 1313 checkambig=True))
1314 1314
1315 1315 if tr:
1316 1316 # ensure that subsequent tr.writepending returns True for
1317 1317 # changes written out above, even if dirstate is never
1318 1318 # changed after this
1319 1319 tr.addfilegenerator('dirstate', (self._filename,),
1320 1320 self._writedirstate, location='plain')
1321 1321
1322 1322 # ensure that pending file written above is unlinked at
1323 1323 # failure, even if tr.writepending isn't invoked until the
1324 1324 # end of this transaction
1325 1325 tr.registertmp(filename, location='plain')
1326 1326
1327 1327 self._opener.tryunlink(backupname)
1328 1328 # hardlink backup is okay because _writedirstate is always called
1329 1329 # with an "atomictemp=True" file.
1330 1330 util.copyfile(self._opener.join(filename),
1331 1331 self._opener.join(backupname), hardlink=True)
1332 1332
1333 1333 def restorebackup(self, tr, backupname):
1334 1334 '''Restore dirstate by backup file'''
1335 1335 # this "invalidate()" prevents "wlock.release()" from writing
1336 1336 # changes of dirstate out after restoring from backup file
1337 1337 self.invalidate()
1338 1338 filename = self._actualfilename(tr)
1339 1339 self._opener.rename(backupname, filename, checkambig=True)
1340 1340
1341 1341 def clearbackup(self, tr, backupname):
1342 1342 '''Clear backup file'''
1343 1343 self._opener.unlink(backupname)
@@ -1,2262 +1,2263
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 mergeutil,
45 45 namespaces,
46 46 obsolete,
47 47 pathutil,
48 48 peer,
49 49 phases,
50 50 pushkey,
51 51 pycompat,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 sparse,
57 57 store,
58 58 subrepo,
59 59 tags as tagsmod,
60 60 transaction,
61 61 txnutil,
62 62 util,
63 63 vfs as vfsmod,
64 64 )
65 65
66 66 release = lockmod.release
67 67 urlerr = util.urlerr
68 68 urlreq = util.urlreq
69 69
70 70 # set of (path, vfs-location) tuples. vfs-location is:
71 71 # - 'plain for vfs relative paths
72 72 # - '' for svfs relative paths
73 73 _cachedfiles = set()
74 74
75 75 class _basefilecache(scmutil.filecache):
76 76 """All filecache usage on repo are done for logic that should be unfiltered
77 77 """
78 78 def __get__(self, repo, type=None):
79 79 if repo is None:
80 80 return self
81 81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 82 def __set__(self, repo, value):
83 83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 84 def __delete__(self, repo):
85 85 return super(_basefilecache, self).__delete__(repo.unfiltered())
86 86
87 87 class repofilecache(_basefilecache):
88 88 """filecache for files in .hg but outside of .hg/store"""
89 89 def __init__(self, *paths):
90 90 super(repofilecache, self).__init__(*paths)
91 91 for path in paths:
92 92 _cachedfiles.add((path, 'plain'))
93 93
94 94 def join(self, obj, fname):
95 95 return obj.vfs.join(fname)
96 96
97 97 class storecache(_basefilecache):
98 98 """filecache for files in the store"""
99 99 def __init__(self, *paths):
100 100 super(storecache, self).__init__(*paths)
101 101 for path in paths:
102 102 _cachedfiles.add((path, ''))
103 103
104 104 def join(self, obj, fname):
105 105 return obj.sjoin(fname)
106 106
107 107 def isfilecached(repo, name):
108 108 """check if a repo has already cached "name" filecache-ed property
109 109
110 110 This returns (cachedobj-or-None, iscached) tuple.
111 111 """
112 112 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 113 if not cacheentry:
114 114 return None, False
115 115 return cacheentry.obj, True
116 116
117 117 class unfilteredpropertycache(util.propertycache):
118 118 """propertycache that apply to unfiltered repo only"""
119 119
120 120 def __get__(self, repo, type=None):
121 121 unfi = repo.unfiltered()
122 122 if unfi is repo:
123 123 return super(unfilteredpropertycache, self).__get__(unfi)
124 124 return getattr(unfi, self.name)
125 125
126 126 class filteredpropertycache(util.propertycache):
127 127 """propertycache that must take filtering in account"""
128 128
129 129 def cachevalue(self, obj, value):
130 130 object.__setattr__(obj, self.name, value)
131 131
132 132
133 133 def hasunfilteredcache(repo, name):
134 134 """check if a repo has an unfilteredpropertycache value for <name>"""
135 135 return name in vars(repo.unfiltered())
136 136
137 137 def unfilteredmethod(orig):
138 138 """decorate method that always need to be run on unfiltered version"""
139 139 def wrapper(repo, *args, **kwargs):
140 140 return orig(repo.unfiltered(), *args, **kwargs)
141 141 return wrapper
142 142
143 143 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
144 144 'unbundle'}
145 145 legacycaps = moderncaps.union({'changegroupsubset'})
146 146
147 147 class localpeer(peer.peerrepository):
148 148 '''peer for a local repo; reflects only the most recent API'''
149 149
150 150 def __init__(self, repo, caps=None):
151 151 if caps is None:
152 152 caps = moderncaps.copy()
153 153 peer.peerrepository.__init__(self)
154 154 self._repo = repo.filtered('served')
155 155 self.ui = repo.ui
156 156 self._caps = repo._restrictcapabilities(caps)
157 157 self.requirements = repo.requirements
158 158 self.supportedformats = repo.supportedformats
159 159
160 160 def close(self):
161 161 self._repo.close()
162 162
163 163 def _capabilities(self):
164 164 return self._caps
165 165
166 166 def local(self):
167 167 return self._repo
168 168
169 169 def canpush(self):
170 170 return True
171 171
172 172 def url(self):
173 173 return self._repo.url()
174 174
175 175 def lookup(self, key):
176 176 return self._repo.lookup(key)
177 177
178 178 def branchmap(self):
179 179 return self._repo.branchmap()
180 180
181 181 def heads(self):
182 182 return self._repo.heads()
183 183
184 184 def known(self, nodes):
185 185 return self._repo.known(nodes)
186 186
187 187 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
188 188 **kwargs):
189 189 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
190 190 common=common, bundlecaps=bundlecaps,
191 191 **kwargs)
192 192 cb = util.chunkbuffer(chunks)
193 193
194 194 if exchange.bundle2requested(bundlecaps):
195 195 # When requesting a bundle2, getbundle returns a stream to make the
196 196 # wire level function happier. We need to build a proper object
197 197 # from it in local peer.
198 198 return bundle2.getunbundler(self.ui, cb)
199 199 else:
200 200 return changegroup.getunbundler('01', cb, None)
201 201
202 202 # TODO We might want to move the next two calls into legacypeer and add
203 203 # unbundle instead.
204 204
205 205 def unbundle(self, cg, heads, url):
206 206 """apply a bundle on a repo
207 207
208 208 This function handles the repo locking itself."""
209 209 try:
210 210 try:
211 211 cg = exchange.readbundle(self.ui, cg, None)
212 212 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
213 213 if util.safehasattr(ret, 'getchunks'):
214 214 # This is a bundle20 object, turn it into an unbundler.
215 215 # This little dance should be dropped eventually when the
216 216 # API is finally improved.
217 217 stream = util.chunkbuffer(ret.getchunks())
218 218 ret = bundle2.getunbundler(self.ui, stream)
219 219 return ret
220 220 except Exception as exc:
221 221 # If the exception contains output salvaged from a bundle2
222 222 # reply, we need to make sure it is printed before continuing
223 223 # to fail. So we build a bundle2 with such output and consume
224 224 # it directly.
225 225 #
226 226 # This is not very elegant but allows a "simple" solution for
227 227 # issue4594
228 228 output = getattr(exc, '_bundle2salvagedoutput', ())
229 229 if output:
230 230 bundler = bundle2.bundle20(self._repo.ui)
231 231 for out in output:
232 232 bundler.addpart(out)
233 233 stream = util.chunkbuffer(bundler.getchunks())
234 234 b = bundle2.getunbundler(self.ui, stream)
235 235 bundle2.processbundle(self._repo, b)
236 236 raise
237 237 except error.PushRaced as exc:
238 238 raise error.ResponseError(_('push failed:'), str(exc))
239 239
240 240 def lock(self):
241 241 return self._repo.lock()
242 242
243 243 def pushkey(self, namespace, key, old, new):
244 244 return self._repo.pushkey(namespace, key, old, new)
245 245
246 246 def listkeys(self, namespace):
247 247 return self._repo.listkeys(namespace)
248 248
249 249 def debugwireargs(self, one, two, three=None, four=None, five=None):
250 250 '''used to test argument passing over the wire'''
251 251 return "%s %s %s %s %s" % (one, two, three, four, five)
252 252
253 253 class locallegacypeer(localpeer):
254 254 '''peer extension which implements legacy methods too; used for tests with
255 255 restricted capabilities'''
256 256
257 257 def __init__(self, repo):
258 258 localpeer.__init__(self, repo, caps=legacycaps)
259 259
260 260 def branches(self, nodes):
261 261 return self._repo.branches(nodes)
262 262
263 263 def between(self, pairs):
264 264 return self._repo.between(pairs)
265 265
266 266 def changegroup(self, basenodes, source):
267 267 return changegroup.changegroup(self._repo, basenodes, source)
268 268
269 269 def changegroupsubset(self, bases, heads, source):
270 270 return changegroup.changegroupsubset(self._repo, bases, heads, source)
271 271
272 272 # Increment the sub-version when the revlog v2 format changes to lock out old
273 273 # clients.
274 274 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
275 275
276 276 class localrepository(object):
277 277
278 278 supportedformats = {
279 279 'revlogv1',
280 280 'generaldelta',
281 281 'treemanifest',
282 282 'manifestv2',
283 283 REVLOGV2_REQUIREMENT,
284 284 }
285 285 _basesupported = supportedformats | {
286 286 'store',
287 287 'fncache',
288 288 'shared',
289 289 'relshared',
290 290 'dotencode',
291 291 'exp-sparse',
292 292 }
293 293 openerreqs = {
294 294 'revlogv1',
295 295 'generaldelta',
296 296 'treemanifest',
297 297 'manifestv2',
298 298 }
299 299
300 300 # a list of (ui, featureset) functions.
301 301 # only functions defined in module of enabled extensions are invoked
302 302 featuresetupfuncs = set()
303 303
304 304 # list of prefix for file which can be written without 'wlock'
305 305 # Extensions should extend this list when needed
306 306 _wlockfreeprefix = {
307 307 # We migh consider requiring 'wlock' for the next
308 308 # two, but pretty much all the existing code assume
309 309 # wlock is not needed so we keep them excluded for
310 310 # now.
311 311 'hgrc',
312 312 'requires',
313 313 # XXX cache is a complicatged business someone
314 314 # should investigate this in depth at some point
315 315 'cache/',
316 316 # XXX shouldn't be dirstate covered by the wlock?
317 317 'dirstate',
318 318 # XXX bisect was still a bit too messy at the time
319 319 # this changeset was introduced. Someone should fix
320 320 # the remainig bit and drop this line
321 321 'bisect.state',
322 322 }
323 323
324 324 def __init__(self, baseui, path, create=False):
325 325 self.requirements = set()
326 326 self.filtername = None
327 327 # wvfs: rooted at the repository root, used to access the working copy
328 328 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
329 329 # vfs: rooted at .hg, used to access repo files outside of .hg/store
330 330 self.vfs = None
331 331 # svfs: usually rooted at .hg/store, used to access repository history
332 332 # If this is a shared repository, this vfs may point to another
333 333 # repository's .hg/store directory.
334 334 self.svfs = None
335 335 self.root = self.wvfs.base
336 336 self.path = self.wvfs.join(".hg")
337 337 self.origroot = path
338 338 # These auditor are not used by the vfs,
339 339 # only used when writing this comment: basectx.match
340 340 self.auditor = pathutil.pathauditor(self.root, self._checknested)
341 341 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
342 realfs=False)
342 realfs=False, cached=True)
343 343 self.baseui = baseui
344 344 self.ui = baseui.copy()
345 345 self.ui.copy = baseui.copy # prevent copying repo configuration
346 self.vfs = vfsmod.vfs(self.path)
346 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
347 347 if (self.ui.configbool('devel', 'all-warnings') or
348 348 self.ui.configbool('devel', 'check-locks')):
349 349 self.vfs.audit = self._getvfsward(self.vfs.audit)
350 350 # A list of callback to shape the phase if no data were found.
351 351 # Callback are in the form: func(repo, roots) --> processed root.
352 352 # This list it to be filled by extension during repo setup
353 353 self._phasedefaults = []
354 354 try:
355 355 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
356 356 self._loadextensions()
357 357 except IOError:
358 358 pass
359 359
360 360 if self.featuresetupfuncs:
361 361 self.supported = set(self._basesupported) # use private copy
362 362 extmods = set(m.__name__ for n, m
363 363 in extensions.extensions(self.ui))
364 364 for setupfunc in self.featuresetupfuncs:
365 365 if setupfunc.__module__ in extmods:
366 366 setupfunc(self.ui, self.supported)
367 367 else:
368 368 self.supported = self._basesupported
369 369 color.setup(self.ui)
370 370
371 371 # Add compression engines.
372 372 for name in util.compengines:
373 373 engine = util.compengines[name]
374 374 if engine.revlogheader():
375 375 self.supported.add('exp-compression-%s' % name)
376 376
377 377 if not self.vfs.isdir():
378 378 if create:
379 379 self.requirements = newreporequirements(self)
380 380
381 381 if not self.wvfs.exists():
382 382 self.wvfs.makedirs()
383 383 self.vfs.makedir(notindexed=True)
384 384
385 385 if 'store' in self.requirements:
386 386 self.vfs.mkdir("store")
387 387
388 388 # create an invalid changelog
389 389 self.vfs.append(
390 390 "00changelog.i",
391 391 '\0\0\0\2' # represents revlogv2
392 392 ' dummy changelog to prevent using the old repo layout'
393 393 )
394 394 else:
395 395 raise error.RepoError(_("repository %s not found") % path)
396 396 elif create:
397 397 raise error.RepoError(_("repository %s already exists") % path)
398 398 else:
399 399 try:
400 400 self.requirements = scmutil.readrequires(
401 401 self.vfs, self.supported)
402 402 except IOError as inst:
403 403 if inst.errno != errno.ENOENT:
404 404 raise
405 405
406 406 cachepath = self.vfs.join('cache')
407 407 self.sharedpath = self.path
408 408 try:
409 409 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
410 410 if 'relshared' in self.requirements:
411 411 sharedpath = self.vfs.join(sharedpath)
412 412 vfs = vfsmod.vfs(sharedpath, realpath=True)
413 413 cachepath = vfs.join('cache')
414 414 s = vfs.base
415 415 if not vfs.exists():
416 416 raise error.RepoError(
417 417 _('.hg/sharedpath points to nonexistent directory %s') % s)
418 418 self.sharedpath = s
419 419 except IOError as inst:
420 420 if inst.errno != errno.ENOENT:
421 421 raise
422 422
423 423 if 'exp-sparse' in self.requirements and not sparse.enabled:
424 424 raise error.RepoError(_('repository is using sparse feature but '
425 425 'sparse is not enabled; enable the '
426 426 '"sparse" extensions to access'))
427 427
428 428 self.store = store.store(
429 self.requirements, self.sharedpath, vfsmod.vfs)
429 self.requirements, self.sharedpath,
430 lambda base: vfsmod.vfs(base, cacheaudited=True))
430 431 self.spath = self.store.path
431 432 self.svfs = self.store.vfs
432 433 self.sjoin = self.store.join
433 434 self.vfs.createmode = self.store.createmode
434 self.cachevfs = vfsmod.vfs(cachepath)
435 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
435 436 self.cachevfs.createmode = self.store.createmode
436 437 if (self.ui.configbool('devel', 'all-warnings') or
437 438 self.ui.configbool('devel', 'check-locks')):
438 439 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
439 440 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
440 441 else: # standard vfs
441 442 self.svfs.audit = self._getsvfsward(self.svfs.audit)
442 443 self._applyopenerreqs()
443 444 if create:
444 445 self._writerequirements()
445 446
446 447 self._dirstatevalidatewarned = False
447 448
448 449 self._branchcaches = {}
449 450 self._revbranchcache = None
450 451 self.filterpats = {}
451 452 self._datafilters = {}
452 453 self._transref = self._lockref = self._wlockref = None
453 454
454 455 # A cache for various files under .hg/ that tracks file changes,
455 456 # (used by the filecache decorator)
456 457 #
457 458 # Maps a property name to its util.filecacheentry
458 459 self._filecache = {}
459 460
460 461 # hold sets of revision to be filtered
461 462 # should be cleared when something might have changed the filter value:
462 463 # - new changesets,
463 464 # - phase change,
464 465 # - new obsolescence marker,
465 466 # - working directory parent change,
466 467 # - bookmark changes
467 468 self.filteredrevcache = {}
468 469
469 470 # post-dirstate-status hooks
470 471 self._postdsstatus = []
471 472
472 473 # Cache of types representing filtered repos.
473 474 self._filteredrepotypes = weakref.WeakKeyDictionary()
474 475
475 476 # generic mapping between names and nodes
476 477 self.names = namespaces.namespaces()
477 478
478 479 # Key to signature value.
479 480 self._sparsesignaturecache = {}
480 481 # Signature to cached matcher instance.
481 482 self._sparsematchercache = {}
482 483
483 484 def _getvfsward(self, origfunc):
484 485 """build a ward for self.vfs"""
485 486 rref = weakref.ref(self)
486 487 def checkvfs(path, mode=None):
487 488 ret = origfunc(path, mode=mode)
488 489 repo = rref()
489 490 if (repo is None
490 491 or not util.safehasattr(repo, '_wlockref')
491 492 or not util.safehasattr(repo, '_lockref')):
492 493 return
493 494 if mode in (None, 'r', 'rb'):
494 495 return
495 496 if path.startswith(repo.path):
496 497 # truncate name relative to the repository (.hg)
497 498 path = path[len(repo.path) + 1:]
498 499 if path.startswith('cache/'):
499 500 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
500 501 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
501 502 if path.startswith('journal.'):
502 503 # journal is covered by 'lock'
503 504 if repo._currentlock(repo._lockref) is None:
504 505 repo.ui.develwarn('write with no lock: "%s"' % path,
505 506 stacklevel=2, config='check-locks')
506 507 elif repo._currentlock(repo._wlockref) is None:
507 508 # rest of vfs files are covered by 'wlock'
508 509 #
509 510 # exclude special files
510 511 for prefix in self._wlockfreeprefix:
511 512 if path.startswith(prefix):
512 513 return
513 514 repo.ui.develwarn('write with no wlock: "%s"' % path,
514 515 stacklevel=2, config='check-locks')
515 516 return ret
516 517 return checkvfs
517 518
518 519 def _getsvfsward(self, origfunc):
519 520 """build a ward for self.svfs"""
520 521 rref = weakref.ref(self)
521 522 def checksvfs(path, mode=None):
522 523 ret = origfunc(path, mode=mode)
523 524 repo = rref()
524 525 if repo is None or not util.safehasattr(repo, '_lockref'):
525 526 return
526 527 if mode in (None, 'r', 'rb'):
527 528 return
528 529 if path.startswith(repo.sharedpath):
529 530 # truncate name relative to the repository (.hg)
530 531 path = path[len(repo.sharedpath) + 1:]
531 532 if repo._currentlock(repo._lockref) is None:
532 533 repo.ui.develwarn('write with no lock: "%s"' % path,
533 534 stacklevel=3)
534 535 return ret
535 536 return checksvfs
536 537
537 538 def close(self):
538 539 self._writecaches()
539 540
540 541 def _loadextensions(self):
541 542 extensions.loadall(self.ui)
542 543
543 544 def _writecaches(self):
544 545 if self._revbranchcache:
545 546 self._revbranchcache.write()
546 547
547 548 def _restrictcapabilities(self, caps):
548 549 if self.ui.configbool('experimental', 'bundle2-advertise'):
549 550 caps = set(caps)
550 551 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
551 552 caps.add('bundle2=' + urlreq.quote(capsblob))
552 553 return caps
553 554
554 555 def _applyopenerreqs(self):
555 556 self.svfs.options = dict((r, 1) for r in self.requirements
556 557 if r in self.openerreqs)
557 558 # experimental config: format.chunkcachesize
558 559 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
559 560 if chunkcachesize is not None:
560 561 self.svfs.options['chunkcachesize'] = chunkcachesize
561 562 # experimental config: format.maxchainlen
562 563 maxchainlen = self.ui.configint('format', 'maxchainlen')
563 564 if maxchainlen is not None:
564 565 self.svfs.options['maxchainlen'] = maxchainlen
565 566 # experimental config: format.manifestcachesize
566 567 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
567 568 if manifestcachesize is not None:
568 569 self.svfs.options['manifestcachesize'] = manifestcachesize
569 570 # experimental config: format.aggressivemergedeltas
570 571 aggressivemergedeltas = self.ui.configbool('format',
571 572 'aggressivemergedeltas')
572 573 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
573 574 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
574 575 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
575 576 if 0 <= chainspan:
576 577 self.svfs.options['maxdeltachainspan'] = chainspan
577 578
578 579 for r in self.requirements:
579 580 if r.startswith('exp-compression-'):
580 581 self.svfs.options['compengine'] = r[len('exp-compression-'):]
581 582
582 583 # TODO move "revlogv2" to openerreqs once finalized.
583 584 if REVLOGV2_REQUIREMENT in self.requirements:
584 585 self.svfs.options['revlogv2'] = True
585 586
586 587 def _writerequirements(self):
587 588 scmutil.writerequires(self.vfs, self.requirements)
588 589
589 590 def _checknested(self, path):
590 591 """Determine if path is a legal nested repository."""
591 592 if not path.startswith(self.root):
592 593 return False
593 594 subpath = path[len(self.root) + 1:]
594 595 normsubpath = util.pconvert(subpath)
595 596
596 597 # XXX: Checking against the current working copy is wrong in
597 598 # the sense that it can reject things like
598 599 #
599 600 # $ hg cat -r 10 sub/x.txt
600 601 #
601 602 # if sub/ is no longer a subrepository in the working copy
602 603 # parent revision.
603 604 #
604 605 # However, it can of course also allow things that would have
605 606 # been rejected before, such as the above cat command if sub/
606 607 # is a subrepository now, but was a normal directory before.
607 608 # The old path auditor would have rejected by mistake since it
608 609 # panics when it sees sub/.hg/.
609 610 #
610 611 # All in all, checking against the working copy seems sensible
611 612 # since we want to prevent access to nested repositories on
612 613 # the filesystem *now*.
613 614 ctx = self[None]
614 615 parts = util.splitpath(subpath)
615 616 while parts:
616 617 prefix = '/'.join(parts)
617 618 if prefix in ctx.substate:
618 619 if prefix == normsubpath:
619 620 return True
620 621 else:
621 622 sub = ctx.sub(prefix)
622 623 return sub.checknested(subpath[len(prefix) + 1:])
623 624 else:
624 625 parts.pop()
625 626 return False
626 627
627 628 def peer(self):
628 629 return localpeer(self) # not cached to avoid reference cycle
629 630
630 631 def unfiltered(self):
631 632 """Return unfiltered version of the repository
632 633
633 634 Intended to be overwritten by filtered repo."""
634 635 return self
635 636
636 637 def filtered(self, name):
637 638 """Return a filtered version of a repository"""
638 639 # Python <3.4 easily leaks types via __mro__. See
639 640 # https://bugs.python.org/issue17950. We cache dynamically
640 641 # created types so this method doesn't leak on every
641 642 # invocation.
642 643
643 644 key = self.unfiltered().__class__
644 645 if key not in self._filteredrepotypes:
645 646 # Build a new type with the repoview mixin and the base
646 647 # class of this repo. Give it a name containing the
647 648 # filter name to aid debugging.
648 649 bases = (repoview.repoview, key)
649 650 cls = type(r'%sfilteredrepo' % name, bases, {})
650 651 self._filteredrepotypes[key] = cls
651 652
652 653 return self._filteredrepotypes[key](self, name)
653 654
654 655 @repofilecache('bookmarks', 'bookmarks.current')
655 656 def _bookmarks(self):
656 657 return bookmarks.bmstore(self)
657 658
658 659 @property
659 660 def _activebookmark(self):
660 661 return self._bookmarks.active
661 662
662 663 # _phaserevs and _phasesets depend on changelog. what we need is to
663 664 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
664 665 # can't be easily expressed in filecache mechanism.
665 666 @storecache('phaseroots', '00changelog.i')
666 667 def _phasecache(self):
667 668 return phases.phasecache(self, self._phasedefaults)
668 669
669 670 @storecache('obsstore')
670 671 def obsstore(self):
671 672 return obsolete.makestore(self.ui, self)
672 673
673 674 @storecache('00changelog.i')
674 675 def changelog(self):
675 676 return changelog.changelog(self.svfs,
676 677 trypending=txnutil.mayhavepending(self.root))
677 678
678 679 def _constructmanifest(self):
679 680 # This is a temporary function while we migrate from manifest to
680 681 # manifestlog. It allows bundlerepo and unionrepo to intercept the
681 682 # manifest creation.
682 683 return manifest.manifestrevlog(self.svfs)
683 684
684 685 @storecache('00manifest.i')
685 686 def manifestlog(self):
686 687 return manifest.manifestlog(self.svfs, self)
687 688
688 689 @repofilecache('dirstate')
689 690 def dirstate(self):
690 691 sparsematchfn = lambda: sparse.matcher(self)
691 692
692 693 return dirstate.dirstate(self.vfs, self.ui, self.root,
693 694 self._dirstatevalidate, sparsematchfn)
694 695
695 696 def _dirstatevalidate(self, node):
696 697 try:
697 698 self.changelog.rev(node)
698 699 return node
699 700 except error.LookupError:
700 701 if not self._dirstatevalidatewarned:
701 702 self._dirstatevalidatewarned = True
702 703 self.ui.warn(_("warning: ignoring unknown"
703 704 " working parent %s!\n") % short(node))
704 705 return nullid
705 706
706 707 def __getitem__(self, changeid):
707 708 if changeid is None:
708 709 return context.workingctx(self)
709 710 if isinstance(changeid, slice):
710 711 # wdirrev isn't contiguous so the slice shouldn't include it
711 712 return [context.changectx(self, i)
712 713 for i in xrange(*changeid.indices(len(self)))
713 714 if i not in self.changelog.filteredrevs]
714 715 try:
715 716 return context.changectx(self, changeid)
716 717 except error.WdirUnsupported:
717 718 return context.workingctx(self)
718 719
719 720 def __contains__(self, changeid):
720 721 """True if the given changeid exists
721 722
722 723 error.LookupError is raised if an ambiguous node specified.
723 724 """
724 725 try:
725 726 self[changeid]
726 727 return True
727 728 except error.RepoLookupError:
728 729 return False
729 730
730 731 def __nonzero__(self):
731 732 return True
732 733
733 734 __bool__ = __nonzero__
734 735
735 736 def __len__(self):
736 737 return len(self.changelog)
737 738
738 739 def __iter__(self):
739 740 return iter(self.changelog)
740 741
741 742 def revs(self, expr, *args):
742 743 '''Find revisions matching a revset.
743 744
744 745 The revset is specified as a string ``expr`` that may contain
745 746 %-formatting to escape certain types. See ``revsetlang.formatspec``.
746 747
747 748 Revset aliases from the configuration are not expanded. To expand
748 749 user aliases, consider calling ``scmutil.revrange()`` or
749 750 ``repo.anyrevs([expr], user=True)``.
750 751
751 752 Returns a revset.abstractsmartset, which is a list-like interface
752 753 that contains integer revisions.
753 754 '''
754 755 expr = revsetlang.formatspec(expr, *args)
755 756 m = revset.match(None, expr)
756 757 return m(self)
757 758
758 759 def set(self, expr, *args):
759 760 '''Find revisions matching a revset and emit changectx instances.
760 761
761 762 This is a convenience wrapper around ``revs()`` that iterates the
762 763 result and is a generator of changectx instances.
763 764
764 765 Revset aliases from the configuration are not expanded. To expand
765 766 user aliases, consider calling ``scmutil.revrange()``.
766 767 '''
767 768 for r in self.revs(expr, *args):
768 769 yield self[r]
769 770
770 771 def anyrevs(self, specs, user=False, localalias=None):
771 772 '''Find revisions matching one of the given revsets.
772 773
773 774 Revset aliases from the configuration are not expanded by default. To
774 775 expand user aliases, specify ``user=True``. To provide some local
775 776 definitions overriding user aliases, set ``localalias`` to
776 777 ``{name: definitionstring}``.
777 778 '''
778 779 if user:
779 780 m = revset.matchany(self.ui, specs, repo=self,
780 781 localalias=localalias)
781 782 else:
782 783 m = revset.matchany(None, specs, localalias=localalias)
783 784 return m(self)
784 785
785 786 def url(self):
786 787 return 'file:' + self.root
787 788
788 789 def hook(self, name, throw=False, **args):
789 790 """Call a hook, passing this repo instance.
790 791
791 792 This a convenience method to aid invoking hooks. Extensions likely
792 793 won't call this unless they have registered a custom hook or are
793 794 replacing code that is expected to call a hook.
794 795 """
795 796 return hook.hook(self.ui, self, name, throw, **args)
796 797
797 798 @filteredpropertycache
798 799 def _tagscache(self):
799 800 '''Returns a tagscache object that contains various tags related
800 801 caches.'''
801 802
802 803 # This simplifies its cache management by having one decorated
803 804 # function (this one) and the rest simply fetch things from it.
804 805 class tagscache(object):
805 806 def __init__(self):
806 807 # These two define the set of tags for this repository. tags
807 808 # maps tag name to node; tagtypes maps tag name to 'global' or
808 809 # 'local'. (Global tags are defined by .hgtags across all
809 810 # heads, and local tags are defined in .hg/localtags.)
810 811 # They constitute the in-memory cache of tags.
811 812 self.tags = self.tagtypes = None
812 813
813 814 self.nodetagscache = self.tagslist = None
814 815
815 816 cache = tagscache()
816 817 cache.tags, cache.tagtypes = self._findtags()
817 818
818 819 return cache
819 820
820 821 def tags(self):
821 822 '''return a mapping of tag to node'''
822 823 t = {}
823 824 if self.changelog.filteredrevs:
824 825 tags, tt = self._findtags()
825 826 else:
826 827 tags = self._tagscache.tags
827 828 for k, v in tags.iteritems():
828 829 try:
829 830 # ignore tags to unknown nodes
830 831 self.changelog.rev(v)
831 832 t[k] = v
832 833 except (error.LookupError, ValueError):
833 834 pass
834 835 return t
835 836
836 837 def _findtags(self):
837 838 '''Do the hard work of finding tags. Return a pair of dicts
838 839 (tags, tagtypes) where tags maps tag name to node, and tagtypes
839 840 maps tag name to a string like \'global\' or \'local\'.
840 841 Subclasses or extensions are free to add their own tags, but
841 842 should be aware that the returned dicts will be retained for the
842 843 duration of the localrepo object.'''
843 844
844 845 # XXX what tagtype should subclasses/extensions use? Currently
845 846 # mq and bookmarks add tags, but do not set the tagtype at all.
846 847 # Should each extension invent its own tag type? Should there
847 848 # be one tagtype for all such "virtual" tags? Or is the status
848 849 # quo fine?
849 850
850 851
851 852 # map tag name to (node, hist)
852 853 alltags = tagsmod.findglobaltags(self.ui, self)
853 854 # map tag name to tag type
854 855 tagtypes = dict((tag, 'global') for tag in alltags)
855 856
856 857 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
857 858
858 859 # Build the return dicts. Have to re-encode tag names because
859 860 # the tags module always uses UTF-8 (in order not to lose info
860 861 # writing to the cache), but the rest of Mercurial wants them in
861 862 # local encoding.
862 863 tags = {}
863 864 for (name, (node, hist)) in alltags.iteritems():
864 865 if node != nullid:
865 866 tags[encoding.tolocal(name)] = node
866 867 tags['tip'] = self.changelog.tip()
867 868 tagtypes = dict([(encoding.tolocal(name), value)
868 869 for (name, value) in tagtypes.iteritems()])
869 870 return (tags, tagtypes)
870 871
871 872 def tagtype(self, tagname):
872 873 '''
873 874 return the type of the given tag. result can be:
874 875
875 876 'local' : a local tag
876 877 'global' : a global tag
877 878 None : tag does not exist
878 879 '''
879 880
880 881 return self._tagscache.tagtypes.get(tagname)
881 882
882 883 def tagslist(self):
883 884 '''return a list of tags ordered by revision'''
884 885 if not self._tagscache.tagslist:
885 886 l = []
886 887 for t, n in self.tags().iteritems():
887 888 l.append((self.changelog.rev(n), t, n))
888 889 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
889 890
890 891 return self._tagscache.tagslist
891 892
892 893 def nodetags(self, node):
893 894 '''return the tags associated with a node'''
894 895 if not self._tagscache.nodetagscache:
895 896 nodetagscache = {}
896 897 for t, n in self._tagscache.tags.iteritems():
897 898 nodetagscache.setdefault(n, []).append(t)
898 899 for tags in nodetagscache.itervalues():
899 900 tags.sort()
900 901 self._tagscache.nodetagscache = nodetagscache
901 902 return self._tagscache.nodetagscache.get(node, [])
902 903
903 904 def nodebookmarks(self, node):
904 905 """return the list of bookmarks pointing to the specified node"""
905 906 marks = []
906 907 for bookmark, n in self._bookmarks.iteritems():
907 908 if n == node:
908 909 marks.append(bookmark)
909 910 return sorted(marks)
910 911
911 912 def branchmap(self):
912 913 '''returns a dictionary {branch: [branchheads]} with branchheads
913 914 ordered by increasing revision number'''
914 915 branchmap.updatecache(self)
915 916 return self._branchcaches[self.filtername]
916 917
917 918 @unfilteredmethod
918 919 def revbranchcache(self):
919 920 if not self._revbranchcache:
920 921 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
921 922 return self._revbranchcache
922 923
923 924 def branchtip(self, branch, ignoremissing=False):
924 925 '''return the tip node for a given branch
925 926
926 927 If ignoremissing is True, then this method will not raise an error.
927 928 This is helpful for callers that only expect None for a missing branch
928 929 (e.g. namespace).
929 930
930 931 '''
931 932 try:
932 933 return self.branchmap().branchtip(branch)
933 934 except KeyError:
934 935 if not ignoremissing:
935 936 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
936 937 else:
937 938 pass
938 939
939 940 def lookup(self, key):
940 941 return self[key].node()
941 942
942 943 def lookupbranch(self, key, remote=None):
943 944 repo = remote or self
944 945 if key in repo.branchmap():
945 946 return key
946 947
947 948 repo = (remote and remote.local()) and remote or self
948 949 return repo[key].branch()
949 950
950 951 def known(self, nodes):
951 952 cl = self.changelog
952 953 nm = cl.nodemap
953 954 filtered = cl.filteredrevs
954 955 result = []
955 956 for n in nodes:
956 957 r = nm.get(n)
957 958 resp = not (r is None or r in filtered)
958 959 result.append(resp)
959 960 return result
960 961
961 962 def local(self):
962 963 return self
963 964
964 965 def publishing(self):
965 966 # it's safe (and desirable) to trust the publish flag unconditionally
966 967 # so that we don't finalize changes shared between users via ssh or nfs
967 968 return self.ui.configbool('phases', 'publish', untrusted=True)
968 969
969 970 def cancopy(self):
970 971 # so statichttprepo's override of local() works
971 972 if not self.local():
972 973 return False
973 974 if not self.publishing():
974 975 return True
975 976 # if publishing we can't copy if there is filtered content
976 977 return not self.filtered('visible').changelog.filteredrevs
977 978
978 979 def shared(self):
979 980 '''the type of shared repository (None if not shared)'''
980 981 if self.sharedpath != self.path:
981 982 return 'store'
982 983 return None
983 984
984 985 def wjoin(self, f, *insidef):
985 986 return self.vfs.reljoin(self.root, f, *insidef)
986 987
987 988 def file(self, f):
988 989 if f[0] == '/':
989 990 f = f[1:]
990 991 return filelog.filelog(self.svfs, f)
991 992
992 993 def changectx(self, changeid):
993 994 return self[changeid]
994 995
995 996 def setparents(self, p1, p2=nullid):
996 997 with self.dirstate.parentchange():
997 998 copies = self.dirstate.setparents(p1, p2)
998 999 pctx = self[p1]
999 1000 if copies:
1000 1001 # Adjust copy records, the dirstate cannot do it, it
1001 1002 # requires access to parents manifests. Preserve them
1002 1003 # only for entries added to first parent.
1003 1004 for f in copies:
1004 1005 if f not in pctx and copies[f] in pctx:
1005 1006 self.dirstate.copy(copies[f], f)
1006 1007 if p2 == nullid:
1007 1008 for f, s in sorted(self.dirstate.copies().items()):
1008 1009 if f not in pctx and s not in pctx:
1009 1010 self.dirstate.copy(None, f)
1010 1011
1011 1012 def filectx(self, path, changeid=None, fileid=None):
1012 1013 """changeid can be a changeset revision, node, or tag.
1013 1014 fileid can be a file revision or node."""
1014 1015 return context.filectx(self, path, changeid, fileid)
1015 1016
1016 1017 def getcwd(self):
1017 1018 return self.dirstate.getcwd()
1018 1019
1019 1020 def pathto(self, f, cwd=None):
1020 1021 return self.dirstate.pathto(f, cwd)
1021 1022
1022 1023 def _loadfilter(self, filter):
1023 1024 if filter not in self.filterpats:
1024 1025 l = []
1025 1026 for pat, cmd in self.ui.configitems(filter):
1026 1027 if cmd == '!':
1027 1028 continue
1028 1029 mf = matchmod.match(self.root, '', [pat])
1029 1030 fn = None
1030 1031 params = cmd
1031 1032 for name, filterfn in self._datafilters.iteritems():
1032 1033 if cmd.startswith(name):
1033 1034 fn = filterfn
1034 1035 params = cmd[len(name):].lstrip()
1035 1036 break
1036 1037 if not fn:
1037 1038 fn = lambda s, c, **kwargs: util.filter(s, c)
1038 1039 # Wrap old filters not supporting keyword arguments
1039 1040 if not inspect.getargspec(fn)[2]:
1040 1041 oldfn = fn
1041 1042 fn = lambda s, c, **kwargs: oldfn(s, c)
1042 1043 l.append((mf, fn, params))
1043 1044 self.filterpats[filter] = l
1044 1045 return self.filterpats[filter]
1045 1046
1046 1047 def _filter(self, filterpats, filename, data):
1047 1048 for mf, fn, cmd in filterpats:
1048 1049 if mf(filename):
1049 1050 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1050 1051 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1051 1052 break
1052 1053
1053 1054 return data
1054 1055
1055 1056 @unfilteredpropertycache
1056 1057 def _encodefilterpats(self):
1057 1058 return self._loadfilter('encode')
1058 1059
1059 1060 @unfilteredpropertycache
1060 1061 def _decodefilterpats(self):
1061 1062 return self._loadfilter('decode')
1062 1063
1063 1064 def adddatafilter(self, name, filter):
1064 1065 self._datafilters[name] = filter
1065 1066
1066 1067 def wread(self, filename):
1067 1068 if self.wvfs.islink(filename):
1068 1069 data = self.wvfs.readlink(filename)
1069 1070 else:
1070 1071 data = self.wvfs.read(filename)
1071 1072 return self._filter(self._encodefilterpats, filename, data)
1072 1073
1073 1074 def wwrite(self, filename, data, flags, backgroundclose=False):
1074 1075 """write ``data`` into ``filename`` in the working directory
1075 1076
1076 1077 This returns length of written (maybe decoded) data.
1077 1078 """
1078 1079 data = self._filter(self._decodefilterpats, filename, data)
1079 1080 if 'l' in flags:
1080 1081 self.wvfs.symlink(data, filename)
1081 1082 else:
1082 1083 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1083 1084 if 'x' in flags:
1084 1085 self.wvfs.setflags(filename, False, True)
1085 1086 return len(data)
1086 1087
1087 1088 def wwritedata(self, filename, data):
1088 1089 return self._filter(self._decodefilterpats, filename, data)
1089 1090
1090 1091 def currenttransaction(self):
1091 1092 """return the current transaction or None if non exists"""
1092 1093 if self._transref:
1093 1094 tr = self._transref()
1094 1095 else:
1095 1096 tr = None
1096 1097
1097 1098 if tr and tr.running():
1098 1099 return tr
1099 1100 return None
1100 1101
1101 1102 def transaction(self, desc, report=None):
1102 1103 if (self.ui.configbool('devel', 'all-warnings')
1103 1104 or self.ui.configbool('devel', 'check-locks')):
1104 1105 if self._currentlock(self._lockref) is None:
1105 1106 raise error.ProgrammingError('transaction requires locking')
1106 1107 tr = self.currenttransaction()
1107 1108 if tr is not None:
1108 1109 scmutil.registersummarycallback(self, tr, desc)
1109 1110 return tr.nest()
1110 1111
1111 1112 # abort here if the journal already exists
1112 1113 if self.svfs.exists("journal"):
1113 1114 raise error.RepoError(
1114 1115 _("abandoned transaction found"),
1115 1116 hint=_("run 'hg recover' to clean up transaction"))
1116 1117
1117 1118 idbase = "%.40f#%f" % (random.random(), time.time())
1118 1119 ha = hex(hashlib.sha1(idbase).digest())
1119 1120 txnid = 'TXN:' + ha
1120 1121 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1121 1122
1122 1123 self._writejournal(desc)
1123 1124 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1124 1125 if report:
1125 1126 rp = report
1126 1127 else:
1127 1128 rp = self.ui.warn
1128 1129 vfsmap = {'plain': self.vfs} # root of .hg/
1129 1130 # we must avoid cyclic reference between repo and transaction.
1130 1131 reporef = weakref.ref(self)
1131 1132 # Code to track tag movement
1132 1133 #
1133 1134 # Since tags are all handled as file content, it is actually quite hard
1134 1135 # to track these movement from a code perspective. So we fallback to a
1135 1136 # tracking at the repository level. One could envision to track changes
1136 1137 # to the '.hgtags' file through changegroup apply but that fails to
1137 1138 # cope with case where transaction expose new heads without changegroup
1138 1139 # being involved (eg: phase movement).
1139 1140 #
1140 1141 # For now, We gate the feature behind a flag since this likely comes
1141 1142 # with performance impacts. The current code run more often than needed
1142 1143 # and do not use caches as much as it could. The current focus is on
1143 1144 # the behavior of the feature so we disable it by default. The flag
1144 1145 # will be removed when we are happy with the performance impact.
1145 1146 #
1146 1147 # Once this feature is no longer experimental move the following
1147 1148 # documentation to the appropriate help section:
1148 1149 #
1149 1150 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1150 1151 # tags (new or changed or deleted tags). In addition the details of
1151 1152 # these changes are made available in a file at:
1152 1153 # ``REPOROOT/.hg/changes/tags.changes``.
1153 1154 # Make sure you check for HG_TAG_MOVED before reading that file as it
1154 1155 # might exist from a previous transaction even if no tag were touched
1155 1156 # in this one. Changes are recorded in a line base format::
1156 1157 #
1157 1158 # <action> <hex-node> <tag-name>\n
1158 1159 #
1159 1160 # Actions are defined as follow:
1160 1161 # "-R": tag is removed,
1161 1162 # "+A": tag is added,
1162 1163 # "-M": tag is moved (old value),
1163 1164 # "+M": tag is moved (new value),
1164 1165 tracktags = lambda x: None
1165 1166 # experimental config: experimental.hook-track-tags
1166 1167 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1167 1168 if desc != 'strip' and shouldtracktags:
1168 1169 oldheads = self.changelog.headrevs()
1169 1170 def tracktags(tr2):
1170 1171 repo = reporef()
1171 1172 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1172 1173 newheads = repo.changelog.headrevs()
1173 1174 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1174 1175 # notes: we compare lists here.
1175 1176 # As we do it only once buiding set would not be cheaper
1176 1177 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1177 1178 if changes:
1178 1179 tr2.hookargs['tag_moved'] = '1'
1179 1180 with repo.vfs('changes/tags.changes', 'w',
1180 1181 atomictemp=True) as changesfile:
1181 1182 # note: we do not register the file to the transaction
1182 1183 # because we needs it to still exist on the transaction
1183 1184 # is close (for txnclose hooks)
1184 1185 tagsmod.writediff(changesfile, changes)
1185 1186 def validate(tr2):
1186 1187 """will run pre-closing hooks"""
1187 1188 # XXX the transaction API is a bit lacking here so we take a hacky
1188 1189 # path for now
1189 1190 #
1190 1191 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1191 1192 # dict is copied before these run. In addition we needs the data
1192 1193 # available to in memory hooks too.
1193 1194 #
1194 1195 # Moreover, we also need to make sure this runs before txnclose
1195 1196 # hooks and there is no "pending" mechanism that would execute
1196 1197 # logic only if hooks are about to run.
1197 1198 #
1198 1199 # Fixing this limitation of the transaction is also needed to track
1199 1200 # other families of changes (bookmarks, phases, obsolescence).
1200 1201 #
1201 1202 # This will have to be fixed before we remove the experimental
1202 1203 # gating.
1203 1204 tracktags(tr2)
1204 1205 reporef().hook('pretxnclose', throw=True,
1205 1206 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1206 1207 def releasefn(tr, success):
1207 1208 repo = reporef()
1208 1209 if success:
1209 1210 # this should be explicitly invoked here, because
1210 1211 # in-memory changes aren't written out at closing
1211 1212 # transaction, if tr.addfilegenerator (via
1212 1213 # dirstate.write or so) isn't invoked while
1213 1214 # transaction running
1214 1215 repo.dirstate.write(None)
1215 1216 else:
1216 1217 # discard all changes (including ones already written
1217 1218 # out) in this transaction
1218 1219 repo.dirstate.restorebackup(None, 'journal.dirstate')
1219 1220
1220 1221 repo.invalidate(clearfilecache=True)
1221 1222
1222 1223 tr = transaction.transaction(rp, self.svfs, vfsmap,
1223 1224 "journal",
1224 1225 "undo",
1225 1226 aftertrans(renames),
1226 1227 self.store.createmode,
1227 1228 validator=validate,
1228 1229 releasefn=releasefn,
1229 1230 checkambigfiles=_cachedfiles)
1230 1231 tr.changes['revs'] = set()
1231 1232 tr.changes['obsmarkers'] = set()
1232 1233 tr.changes['phases'] = {}
1233 1234 tr.changes['bookmarks'] = {}
1234 1235
1235 1236 tr.hookargs['txnid'] = txnid
1236 1237 # note: writing the fncache only during finalize mean that the file is
1237 1238 # outdated when running hooks. As fncache is used for streaming clone,
1238 1239 # this is not expected to break anything that happen during the hooks.
1239 1240 tr.addfinalize('flush-fncache', self.store.write)
1240 1241 def txnclosehook(tr2):
1241 1242 """To be run if transaction is successful, will schedule a hook run
1242 1243 """
1243 1244 # Don't reference tr2 in hook() so we don't hold a reference.
1244 1245 # This reduces memory consumption when there are multiple
1245 1246 # transactions per lock. This can likely go away if issue5045
1246 1247 # fixes the function accumulation.
1247 1248 hookargs = tr2.hookargs
1248 1249
1249 1250 def hook():
1250 1251 reporef().hook('txnclose', throw=False, txnname=desc,
1251 1252 **pycompat.strkwargs(hookargs))
1252 1253 reporef()._afterlock(hook)
1253 1254 tr.addfinalize('txnclose-hook', txnclosehook)
1254 1255 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1255 1256 def txnaborthook(tr2):
1256 1257 """To be run if transaction is aborted
1257 1258 """
1258 1259 reporef().hook('txnabort', throw=False, txnname=desc,
1259 1260 **tr2.hookargs)
1260 1261 tr.addabort('txnabort-hook', txnaborthook)
1261 1262 # avoid eager cache invalidation. in-memory data should be identical
1262 1263 # to stored data if transaction has no error.
1263 1264 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1264 1265 self._transref = weakref.ref(tr)
1265 1266 scmutil.registersummarycallback(self, tr, desc)
1266 1267 return tr
1267 1268
1268 1269 def _journalfiles(self):
1269 1270 return ((self.svfs, 'journal'),
1270 1271 (self.vfs, 'journal.dirstate'),
1271 1272 (self.vfs, 'journal.branch'),
1272 1273 (self.vfs, 'journal.desc'),
1273 1274 (self.vfs, 'journal.bookmarks'),
1274 1275 (self.svfs, 'journal.phaseroots'))
1275 1276
1276 1277 def undofiles(self):
1277 1278 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1278 1279
1279 1280 @unfilteredmethod
1280 1281 def _writejournal(self, desc):
1281 1282 self.dirstate.savebackup(None, 'journal.dirstate')
1282 1283 self.vfs.write("journal.branch",
1283 1284 encoding.fromlocal(self.dirstate.branch()))
1284 1285 self.vfs.write("journal.desc",
1285 1286 "%d\n%s\n" % (len(self), desc))
1286 1287 self.vfs.write("journal.bookmarks",
1287 1288 self.vfs.tryread("bookmarks"))
1288 1289 self.svfs.write("journal.phaseroots",
1289 1290 self.svfs.tryread("phaseroots"))
1290 1291
1291 1292 def recover(self):
1292 1293 with self.lock():
1293 1294 if self.svfs.exists("journal"):
1294 1295 self.ui.status(_("rolling back interrupted transaction\n"))
1295 1296 vfsmap = {'': self.svfs,
1296 1297 'plain': self.vfs,}
1297 1298 transaction.rollback(self.svfs, vfsmap, "journal",
1298 1299 self.ui.warn,
1299 1300 checkambigfiles=_cachedfiles)
1300 1301 self.invalidate()
1301 1302 return True
1302 1303 else:
1303 1304 self.ui.warn(_("no interrupted transaction available\n"))
1304 1305 return False
1305 1306
1306 1307 def rollback(self, dryrun=False, force=False):
1307 1308 wlock = lock = dsguard = None
1308 1309 try:
1309 1310 wlock = self.wlock()
1310 1311 lock = self.lock()
1311 1312 if self.svfs.exists("undo"):
1312 1313 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1313 1314
1314 1315 return self._rollback(dryrun, force, dsguard)
1315 1316 else:
1316 1317 self.ui.warn(_("no rollback information available\n"))
1317 1318 return 1
1318 1319 finally:
1319 1320 release(dsguard, lock, wlock)
1320 1321
1321 1322 @unfilteredmethod # Until we get smarter cache management
1322 1323 def _rollback(self, dryrun, force, dsguard):
1323 1324 ui = self.ui
1324 1325 try:
1325 1326 args = self.vfs.read('undo.desc').splitlines()
1326 1327 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1327 1328 if len(args) >= 3:
1328 1329 detail = args[2]
1329 1330 oldtip = oldlen - 1
1330 1331
1331 1332 if detail and ui.verbose:
1332 1333 msg = (_('repository tip rolled back to revision %d'
1333 1334 ' (undo %s: %s)\n')
1334 1335 % (oldtip, desc, detail))
1335 1336 else:
1336 1337 msg = (_('repository tip rolled back to revision %d'
1337 1338 ' (undo %s)\n')
1338 1339 % (oldtip, desc))
1339 1340 except IOError:
1340 1341 msg = _('rolling back unknown transaction\n')
1341 1342 desc = None
1342 1343
1343 1344 if not force and self['.'] != self['tip'] and desc == 'commit':
1344 1345 raise error.Abort(
1345 1346 _('rollback of last commit while not checked out '
1346 1347 'may lose data'), hint=_('use -f to force'))
1347 1348
1348 1349 ui.status(msg)
1349 1350 if dryrun:
1350 1351 return 0
1351 1352
1352 1353 parents = self.dirstate.parents()
1353 1354 self.destroying()
1354 1355 vfsmap = {'plain': self.vfs, '': self.svfs}
1355 1356 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1356 1357 checkambigfiles=_cachedfiles)
1357 1358 if self.vfs.exists('undo.bookmarks'):
1358 1359 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1359 1360 if self.svfs.exists('undo.phaseroots'):
1360 1361 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1361 1362 self.invalidate()
1362 1363
1363 1364 parentgone = (parents[0] not in self.changelog.nodemap or
1364 1365 parents[1] not in self.changelog.nodemap)
1365 1366 if parentgone:
1366 1367 # prevent dirstateguard from overwriting already restored one
1367 1368 dsguard.close()
1368 1369
1369 1370 self.dirstate.restorebackup(None, 'undo.dirstate')
1370 1371 try:
1371 1372 branch = self.vfs.read('undo.branch')
1372 1373 self.dirstate.setbranch(encoding.tolocal(branch))
1373 1374 except IOError:
1374 1375 ui.warn(_('named branch could not be reset: '
1375 1376 'current branch is still \'%s\'\n')
1376 1377 % self.dirstate.branch())
1377 1378
1378 1379 parents = tuple([p.rev() for p in self[None].parents()])
1379 1380 if len(parents) > 1:
1380 1381 ui.status(_('working directory now based on '
1381 1382 'revisions %d and %d\n') % parents)
1382 1383 else:
1383 1384 ui.status(_('working directory now based on '
1384 1385 'revision %d\n') % parents)
1385 1386 mergemod.mergestate.clean(self, self['.'].node())
1386 1387
1387 1388 # TODO: if we know which new heads may result from this rollback, pass
1388 1389 # them to destroy(), which will prevent the branchhead cache from being
1389 1390 # invalidated.
1390 1391 self.destroyed()
1391 1392 return 0
1392 1393
1393 1394 def _buildcacheupdater(self, newtransaction):
1394 1395 """called during transaction to build the callback updating cache
1395 1396
1396 1397 Lives on the repository to help extension who might want to augment
1397 1398 this logic. For this purpose, the created transaction is passed to the
1398 1399 method.
1399 1400 """
1400 1401 # we must avoid cyclic reference between repo and transaction.
1401 1402 reporef = weakref.ref(self)
1402 1403 def updater(tr):
1403 1404 repo = reporef()
1404 1405 repo.updatecaches(tr)
1405 1406 return updater
1406 1407
1407 1408 @unfilteredmethod
1408 1409 def updatecaches(self, tr=None):
1409 1410 """warm appropriate caches
1410 1411
1411 1412 If this function is called after a transaction closed. The transaction
1412 1413 will be available in the 'tr' argument. This can be used to selectively
1413 1414 update caches relevant to the changes in that transaction.
1414 1415 """
1415 1416 if tr is not None and tr.hookargs.get('source') == 'strip':
1416 1417 # During strip, many caches are invalid but
1417 1418 # later call to `destroyed` will refresh them.
1418 1419 return
1419 1420
1420 1421 if tr is None or tr.changes['revs']:
1421 1422 # updating the unfiltered branchmap should refresh all the others,
1422 1423 self.ui.debug('updating the branch cache\n')
1423 1424 branchmap.updatecache(self.filtered('served'))
1424 1425
1425 1426 def invalidatecaches(self):
1426 1427
1427 1428 if '_tagscache' in vars(self):
1428 1429 # can't use delattr on proxy
1429 1430 del self.__dict__['_tagscache']
1430 1431
1431 1432 self.unfiltered()._branchcaches.clear()
1432 1433 self.invalidatevolatilesets()
1433 1434 self._sparsesignaturecache.clear()
1434 1435
1435 1436 def invalidatevolatilesets(self):
1436 1437 self.filteredrevcache.clear()
1437 1438 obsolete.clearobscaches(self)
1438 1439
1439 1440 def invalidatedirstate(self):
1440 1441 '''Invalidates the dirstate, causing the next call to dirstate
1441 1442 to check if it was modified since the last time it was read,
1442 1443 rereading it if it has.
1443 1444
1444 1445 This is different to dirstate.invalidate() that it doesn't always
1445 1446 rereads the dirstate. Use dirstate.invalidate() if you want to
1446 1447 explicitly read the dirstate again (i.e. restoring it to a previous
1447 1448 known good state).'''
1448 1449 if hasunfilteredcache(self, 'dirstate'):
1449 1450 for k in self.dirstate._filecache:
1450 1451 try:
1451 1452 delattr(self.dirstate, k)
1452 1453 except AttributeError:
1453 1454 pass
1454 1455 delattr(self.unfiltered(), 'dirstate')
1455 1456
1456 1457 def invalidate(self, clearfilecache=False):
1457 1458 '''Invalidates both store and non-store parts other than dirstate
1458 1459
1459 1460 If a transaction is running, invalidation of store is omitted,
1460 1461 because discarding in-memory changes might cause inconsistency
1461 1462 (e.g. incomplete fncache causes unintentional failure, but
1462 1463 redundant one doesn't).
1463 1464 '''
1464 1465 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1465 1466 for k in list(self._filecache.keys()):
1466 1467 # dirstate is invalidated separately in invalidatedirstate()
1467 1468 if k == 'dirstate':
1468 1469 continue
1469 1470
1470 1471 if clearfilecache:
1471 1472 del self._filecache[k]
1472 1473 try:
1473 1474 delattr(unfiltered, k)
1474 1475 except AttributeError:
1475 1476 pass
1476 1477 self.invalidatecaches()
1477 1478 if not self.currenttransaction():
1478 1479 # TODO: Changing contents of store outside transaction
1479 1480 # causes inconsistency. We should make in-memory store
1480 1481 # changes detectable, and abort if changed.
1481 1482 self.store.invalidatecaches()
1482 1483
1483 1484 def invalidateall(self):
1484 1485 '''Fully invalidates both store and non-store parts, causing the
1485 1486 subsequent operation to reread any outside changes.'''
1486 1487 # extension should hook this to invalidate its caches
1487 1488 self.invalidate()
1488 1489 self.invalidatedirstate()
1489 1490
1490 1491 @unfilteredmethod
1491 1492 def _refreshfilecachestats(self, tr):
1492 1493 """Reload stats of cached files so that they are flagged as valid"""
1493 1494 for k, ce in self._filecache.items():
1494 1495 if k == 'dirstate' or k not in self.__dict__:
1495 1496 continue
1496 1497 ce.refresh()
1497 1498
1498 1499 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1499 1500 inheritchecker=None, parentenvvar=None):
1500 1501 parentlock = None
1501 1502 # the contents of parentenvvar are used by the underlying lock to
1502 1503 # determine whether it can be inherited
1503 1504 if parentenvvar is not None:
1504 1505 parentlock = encoding.environ.get(parentenvvar)
1505 1506 try:
1506 1507 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1507 1508 acquirefn=acquirefn, desc=desc,
1508 1509 inheritchecker=inheritchecker,
1509 1510 parentlock=parentlock)
1510 1511 except error.LockHeld as inst:
1511 1512 if not wait:
1512 1513 raise
1513 1514 # show more details for new-style locks
1514 1515 if ':' in inst.locker:
1515 1516 host, pid = inst.locker.split(":", 1)
1516 1517 self.ui.warn(
1517 1518 _("waiting for lock on %s held by process %r "
1518 1519 "on host %r\n") % (desc, pid, host))
1519 1520 else:
1520 1521 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1521 1522 (desc, inst.locker))
1522 1523 # default to 600 seconds timeout
1523 1524 l = lockmod.lock(vfs, lockname,
1524 1525 int(self.ui.config("ui", "timeout")),
1525 1526 releasefn=releasefn, acquirefn=acquirefn,
1526 1527 desc=desc)
1527 1528 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1528 1529 return l
1529 1530
1530 1531 def _afterlock(self, callback):
1531 1532 """add a callback to be run when the repository is fully unlocked
1532 1533
1533 1534 The callback will be executed when the outermost lock is released
1534 1535 (with wlock being higher level than 'lock')."""
1535 1536 for ref in (self._wlockref, self._lockref):
1536 1537 l = ref and ref()
1537 1538 if l and l.held:
1538 1539 l.postrelease.append(callback)
1539 1540 break
1540 1541 else: # no lock have been found.
1541 1542 callback()
1542 1543
1543 1544 def lock(self, wait=True):
1544 1545 '''Lock the repository store (.hg/store) and return a weak reference
1545 1546 to the lock. Use this before modifying the store (e.g. committing or
1546 1547 stripping). If you are opening a transaction, get a lock as well.)
1547 1548
1548 1549 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1549 1550 'wlock' first to avoid a dead-lock hazard.'''
1550 1551 l = self._currentlock(self._lockref)
1551 1552 if l is not None:
1552 1553 l.lock()
1553 1554 return l
1554 1555
1555 1556 l = self._lock(self.svfs, "lock", wait, None,
1556 1557 self.invalidate, _('repository %s') % self.origroot)
1557 1558 self._lockref = weakref.ref(l)
1558 1559 return l
1559 1560
1560 1561 def _wlockchecktransaction(self):
1561 1562 if self.currenttransaction() is not None:
1562 1563 raise error.LockInheritanceContractViolation(
1563 1564 'wlock cannot be inherited in the middle of a transaction')
1564 1565
1565 1566 def wlock(self, wait=True):
1566 1567 '''Lock the non-store parts of the repository (everything under
1567 1568 .hg except .hg/store) and return a weak reference to the lock.
1568 1569
1569 1570 Use this before modifying files in .hg.
1570 1571
1571 1572 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1572 1573 'wlock' first to avoid a dead-lock hazard.'''
1573 1574 l = self._wlockref and self._wlockref()
1574 1575 if l is not None and l.held:
1575 1576 l.lock()
1576 1577 return l
1577 1578
1578 1579 # We do not need to check for non-waiting lock acquisition. Such
1579 1580 # acquisition would not cause dead-lock as they would just fail.
1580 1581 if wait and (self.ui.configbool('devel', 'all-warnings')
1581 1582 or self.ui.configbool('devel', 'check-locks')):
1582 1583 if self._currentlock(self._lockref) is not None:
1583 1584 self.ui.develwarn('"wlock" acquired after "lock"')
1584 1585
1585 1586 def unlock():
1586 1587 if self.dirstate.pendingparentchange():
1587 1588 self.dirstate.invalidate()
1588 1589 else:
1589 1590 self.dirstate.write(None)
1590 1591
1591 1592 self._filecache['dirstate'].refresh()
1592 1593
1593 1594 l = self._lock(self.vfs, "wlock", wait, unlock,
1594 1595 self.invalidatedirstate, _('working directory of %s') %
1595 1596 self.origroot,
1596 1597 inheritchecker=self._wlockchecktransaction,
1597 1598 parentenvvar='HG_WLOCK_LOCKER')
1598 1599 self._wlockref = weakref.ref(l)
1599 1600 return l
1600 1601
1601 1602 def _currentlock(self, lockref):
1602 1603 """Returns the lock if it's held, or None if it's not."""
1603 1604 if lockref is None:
1604 1605 return None
1605 1606 l = lockref()
1606 1607 if l is None or not l.held:
1607 1608 return None
1608 1609 return l
1609 1610
1610 1611 def currentwlock(self):
1611 1612 """Returns the wlock if it's held, or None if it's not."""
1612 1613 return self._currentlock(self._wlockref)
1613 1614
1614 1615 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1615 1616 """
1616 1617 commit an individual file as part of a larger transaction
1617 1618 """
1618 1619
1619 1620 fname = fctx.path()
1620 1621 fparent1 = manifest1.get(fname, nullid)
1621 1622 fparent2 = manifest2.get(fname, nullid)
1622 1623 if isinstance(fctx, context.filectx):
1623 1624 node = fctx.filenode()
1624 1625 if node in [fparent1, fparent2]:
1625 1626 self.ui.debug('reusing %s filelog entry\n' % fname)
1626 1627 if manifest1.flags(fname) != fctx.flags():
1627 1628 changelist.append(fname)
1628 1629 return node
1629 1630
1630 1631 flog = self.file(fname)
1631 1632 meta = {}
1632 1633 copy = fctx.renamed()
1633 1634 if copy and copy[0] != fname:
1634 1635 # Mark the new revision of this file as a copy of another
1635 1636 # file. This copy data will effectively act as a parent
1636 1637 # of this new revision. If this is a merge, the first
1637 1638 # parent will be the nullid (meaning "look up the copy data")
1638 1639 # and the second one will be the other parent. For example:
1639 1640 #
1640 1641 # 0 --- 1 --- 3 rev1 changes file foo
1641 1642 # \ / rev2 renames foo to bar and changes it
1642 1643 # \- 2 -/ rev3 should have bar with all changes and
1643 1644 # should record that bar descends from
1644 1645 # bar in rev2 and foo in rev1
1645 1646 #
1646 1647 # this allows this merge to succeed:
1647 1648 #
1648 1649 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1649 1650 # \ / merging rev3 and rev4 should use bar@rev2
1650 1651 # \- 2 --- 4 as the merge base
1651 1652 #
1652 1653
1653 1654 cfname = copy[0]
1654 1655 crev = manifest1.get(cfname)
1655 1656 newfparent = fparent2
1656 1657
1657 1658 if manifest2: # branch merge
1658 1659 if fparent2 == nullid or crev is None: # copied on remote side
1659 1660 if cfname in manifest2:
1660 1661 crev = manifest2[cfname]
1661 1662 newfparent = fparent1
1662 1663
1663 1664 # Here, we used to search backwards through history to try to find
1664 1665 # where the file copy came from if the source of a copy was not in
1665 1666 # the parent directory. However, this doesn't actually make sense to
1666 1667 # do (what does a copy from something not in your working copy even
1667 1668 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1668 1669 # the user that copy information was dropped, so if they didn't
1669 1670 # expect this outcome it can be fixed, but this is the correct
1670 1671 # behavior in this circumstance.
1671 1672
1672 1673 if crev:
1673 1674 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1674 1675 meta["copy"] = cfname
1675 1676 meta["copyrev"] = hex(crev)
1676 1677 fparent1, fparent2 = nullid, newfparent
1677 1678 else:
1678 1679 self.ui.warn(_("warning: can't find ancestor for '%s' "
1679 1680 "copied from '%s'!\n") % (fname, cfname))
1680 1681
1681 1682 elif fparent1 == nullid:
1682 1683 fparent1, fparent2 = fparent2, nullid
1683 1684 elif fparent2 != nullid:
1684 1685 # is one parent an ancestor of the other?
1685 1686 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1686 1687 if fparent1 in fparentancestors:
1687 1688 fparent1, fparent2 = fparent2, nullid
1688 1689 elif fparent2 in fparentancestors:
1689 1690 fparent2 = nullid
1690 1691
1691 1692 # is the file changed?
1692 1693 text = fctx.data()
1693 1694 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1694 1695 changelist.append(fname)
1695 1696 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1696 1697 # are just the flags changed during merge?
1697 1698 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1698 1699 changelist.append(fname)
1699 1700
1700 1701 return fparent1
1701 1702
1702 1703 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1703 1704 """check for commit arguments that aren't committable"""
1704 1705 if match.isexact() or match.prefix():
1705 1706 matched = set(status.modified + status.added + status.removed)
1706 1707
1707 1708 for f in match.files():
1708 1709 f = self.dirstate.normalize(f)
1709 1710 if f == '.' or f in matched or f in wctx.substate:
1710 1711 continue
1711 1712 if f in status.deleted:
1712 1713 fail(f, _('file not found!'))
1713 1714 if f in vdirs: # visited directory
1714 1715 d = f + '/'
1715 1716 for mf in matched:
1716 1717 if mf.startswith(d):
1717 1718 break
1718 1719 else:
1719 1720 fail(f, _("no match under directory!"))
1720 1721 elif f not in self.dirstate:
1721 1722 fail(f, _("file not tracked!"))
1722 1723
1723 1724 @unfilteredmethod
1724 1725 def commit(self, text="", user=None, date=None, match=None, force=False,
1725 1726 editor=False, extra=None):
1726 1727 """Add a new revision to current repository.
1727 1728
1728 1729 Revision information is gathered from the working directory,
1729 1730 match can be used to filter the committed files. If editor is
1730 1731 supplied, it is called to get a commit message.
1731 1732 """
1732 1733 if extra is None:
1733 1734 extra = {}
1734 1735
1735 1736 def fail(f, msg):
1736 1737 raise error.Abort('%s: %s' % (f, msg))
1737 1738
1738 1739 if not match:
1739 1740 match = matchmod.always(self.root, '')
1740 1741
1741 1742 if not force:
1742 1743 vdirs = []
1743 1744 match.explicitdir = vdirs.append
1744 1745 match.bad = fail
1745 1746
1746 1747 wlock = lock = tr = None
1747 1748 try:
1748 1749 wlock = self.wlock()
1749 1750 lock = self.lock() # for recent changelog (see issue4368)
1750 1751
1751 1752 wctx = self[None]
1752 1753 merge = len(wctx.parents()) > 1
1753 1754
1754 1755 if not force and merge and not match.always():
1755 1756 raise error.Abort(_('cannot partially commit a merge '
1756 1757 '(do not specify files or patterns)'))
1757 1758
1758 1759 status = self.status(match=match, clean=force)
1759 1760 if force:
1760 1761 status.modified.extend(status.clean) # mq may commit clean files
1761 1762
1762 1763 # check subrepos
1763 1764 subs = []
1764 1765 commitsubs = set()
1765 1766 newstate = wctx.substate.copy()
1766 1767 # only manage subrepos and .hgsubstate if .hgsub is present
1767 1768 if '.hgsub' in wctx:
1768 1769 # we'll decide whether to track this ourselves, thanks
1769 1770 for c in status.modified, status.added, status.removed:
1770 1771 if '.hgsubstate' in c:
1771 1772 c.remove('.hgsubstate')
1772 1773
1773 1774 # compare current state to last committed state
1774 1775 # build new substate based on last committed state
1775 1776 oldstate = wctx.p1().substate
1776 1777 for s in sorted(newstate.keys()):
1777 1778 if not match(s):
1778 1779 # ignore working copy, use old state if present
1779 1780 if s in oldstate:
1780 1781 newstate[s] = oldstate[s]
1781 1782 continue
1782 1783 if not force:
1783 1784 raise error.Abort(
1784 1785 _("commit with new subrepo %s excluded") % s)
1785 1786 dirtyreason = wctx.sub(s).dirtyreason(True)
1786 1787 if dirtyreason:
1787 1788 if not self.ui.configbool('ui', 'commitsubrepos'):
1788 1789 raise error.Abort(dirtyreason,
1789 1790 hint=_("use --subrepos for recursive commit"))
1790 1791 subs.append(s)
1791 1792 commitsubs.add(s)
1792 1793 else:
1793 1794 bs = wctx.sub(s).basestate()
1794 1795 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1795 1796 if oldstate.get(s, (None, None, None))[1] != bs:
1796 1797 subs.append(s)
1797 1798
1798 1799 # check for removed subrepos
1799 1800 for p in wctx.parents():
1800 1801 r = [s for s in p.substate if s not in newstate]
1801 1802 subs += [s for s in r if match(s)]
1802 1803 if subs:
1803 1804 if (not match('.hgsub') and
1804 1805 '.hgsub' in (wctx.modified() + wctx.added())):
1805 1806 raise error.Abort(
1806 1807 _("can't commit subrepos without .hgsub"))
1807 1808 status.modified.insert(0, '.hgsubstate')
1808 1809
1809 1810 elif '.hgsub' in status.removed:
1810 1811 # clean up .hgsubstate when .hgsub is removed
1811 1812 if ('.hgsubstate' in wctx and
1812 1813 '.hgsubstate' not in (status.modified + status.added +
1813 1814 status.removed)):
1814 1815 status.removed.insert(0, '.hgsubstate')
1815 1816
1816 1817 # make sure all explicit patterns are matched
1817 1818 if not force:
1818 1819 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1819 1820
1820 1821 cctx = context.workingcommitctx(self, status,
1821 1822 text, user, date, extra)
1822 1823
1823 1824 # internal config: ui.allowemptycommit
1824 1825 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1825 1826 or extra.get('close') or merge or cctx.files()
1826 1827 or self.ui.configbool('ui', 'allowemptycommit'))
1827 1828 if not allowemptycommit:
1828 1829 return None
1829 1830
1830 1831 if merge and cctx.deleted():
1831 1832 raise error.Abort(_("cannot commit merge with missing files"))
1832 1833
1833 1834 ms = mergemod.mergestate.read(self)
1834 1835 mergeutil.checkunresolved(ms)
1835 1836
1836 1837 if editor:
1837 1838 cctx._text = editor(self, cctx, subs)
1838 1839 edited = (text != cctx._text)
1839 1840
1840 1841 # Save commit message in case this transaction gets rolled back
1841 1842 # (e.g. by a pretxncommit hook). Leave the content alone on
1842 1843 # the assumption that the user will use the same editor again.
1843 1844 msgfn = self.savecommitmessage(cctx._text)
1844 1845
1845 1846 # commit subs and write new state
1846 1847 if subs:
1847 1848 for s in sorted(commitsubs):
1848 1849 sub = wctx.sub(s)
1849 1850 self.ui.status(_('committing subrepository %s\n') %
1850 1851 subrepo.subrelpath(sub))
1851 1852 sr = sub.commit(cctx._text, user, date)
1852 1853 newstate[s] = (newstate[s][0], sr)
1853 1854 subrepo.writestate(self, newstate)
1854 1855
1855 1856 p1, p2 = self.dirstate.parents()
1856 1857 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1857 1858 try:
1858 1859 self.hook("precommit", throw=True, parent1=hookp1,
1859 1860 parent2=hookp2)
1860 1861 tr = self.transaction('commit')
1861 1862 ret = self.commitctx(cctx, True)
1862 1863 except: # re-raises
1863 1864 if edited:
1864 1865 self.ui.write(
1865 1866 _('note: commit message saved in %s\n') % msgfn)
1866 1867 raise
1867 1868 # update bookmarks, dirstate and mergestate
1868 1869 bookmarks.update(self, [p1, p2], ret)
1869 1870 cctx.markcommitted(ret)
1870 1871 ms.reset()
1871 1872 tr.close()
1872 1873
1873 1874 finally:
1874 1875 lockmod.release(tr, lock, wlock)
1875 1876
1876 1877 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1877 1878 # hack for command that use a temporary commit (eg: histedit)
1878 1879 # temporary commit got stripped before hook release
1879 1880 if self.changelog.hasnode(ret):
1880 1881 self.hook("commit", node=node, parent1=parent1,
1881 1882 parent2=parent2)
1882 1883 self._afterlock(commithook)
1883 1884 return ret
1884 1885
1885 1886 @unfilteredmethod
1886 1887 def commitctx(self, ctx, error=False):
1887 1888 """Add a new revision to current repository.
1888 1889 Revision information is passed via the context argument.
1889 1890 """
1890 1891
1891 1892 tr = None
1892 1893 p1, p2 = ctx.p1(), ctx.p2()
1893 1894 user = ctx.user()
1894 1895
1895 1896 lock = self.lock()
1896 1897 try:
1897 1898 tr = self.transaction("commit")
1898 1899 trp = weakref.proxy(tr)
1899 1900
1900 1901 if ctx.manifestnode():
1901 1902 # reuse an existing manifest revision
1902 1903 mn = ctx.manifestnode()
1903 1904 files = ctx.files()
1904 1905 elif ctx.files():
1905 1906 m1ctx = p1.manifestctx()
1906 1907 m2ctx = p2.manifestctx()
1907 1908 mctx = m1ctx.copy()
1908 1909
1909 1910 m = mctx.read()
1910 1911 m1 = m1ctx.read()
1911 1912 m2 = m2ctx.read()
1912 1913
1913 1914 # check in files
1914 1915 added = []
1915 1916 changed = []
1916 1917 removed = list(ctx.removed())
1917 1918 linkrev = len(self)
1918 1919 self.ui.note(_("committing files:\n"))
1919 1920 for f in sorted(ctx.modified() + ctx.added()):
1920 1921 self.ui.note(f + "\n")
1921 1922 try:
1922 1923 fctx = ctx[f]
1923 1924 if fctx is None:
1924 1925 removed.append(f)
1925 1926 else:
1926 1927 added.append(f)
1927 1928 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1928 1929 trp, changed)
1929 1930 m.setflag(f, fctx.flags())
1930 1931 except OSError as inst:
1931 1932 self.ui.warn(_("trouble committing %s!\n") % f)
1932 1933 raise
1933 1934 except IOError as inst:
1934 1935 errcode = getattr(inst, 'errno', errno.ENOENT)
1935 1936 if error or errcode and errcode != errno.ENOENT:
1936 1937 self.ui.warn(_("trouble committing %s!\n") % f)
1937 1938 raise
1938 1939
1939 1940 # update manifest
1940 1941 self.ui.note(_("committing manifest\n"))
1941 1942 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1942 1943 drop = [f for f in removed if f in m]
1943 1944 for f in drop:
1944 1945 del m[f]
1945 1946 mn = mctx.write(trp, linkrev,
1946 1947 p1.manifestnode(), p2.manifestnode(),
1947 1948 added, drop)
1948 1949 files = changed + removed
1949 1950 else:
1950 1951 mn = p1.manifestnode()
1951 1952 files = []
1952 1953
1953 1954 # update changelog
1954 1955 self.ui.note(_("committing changelog\n"))
1955 1956 self.changelog.delayupdate(tr)
1956 1957 n = self.changelog.add(mn, files, ctx.description(),
1957 1958 trp, p1.node(), p2.node(),
1958 1959 user, ctx.date(), ctx.extra().copy())
1959 1960 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1960 1961 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1961 1962 parent2=xp2)
1962 1963 # set the new commit is proper phase
1963 1964 targetphase = subrepo.newcommitphase(self.ui, ctx)
1964 1965 if targetphase:
1965 1966 # retract boundary do not alter parent changeset.
1966 1967 # if a parent have higher the resulting phase will
1967 1968 # be compliant anyway
1968 1969 #
1969 1970 # if minimal phase was 0 we don't need to retract anything
1970 1971 phases.registernew(self, tr, targetphase, [n])
1971 1972 tr.close()
1972 1973 return n
1973 1974 finally:
1974 1975 if tr:
1975 1976 tr.release()
1976 1977 lock.release()
1977 1978
1978 1979 @unfilteredmethod
1979 1980 def destroying(self):
1980 1981 '''Inform the repository that nodes are about to be destroyed.
1981 1982 Intended for use by strip and rollback, so there's a common
1982 1983 place for anything that has to be done before destroying history.
1983 1984
1984 1985 This is mostly useful for saving state that is in memory and waiting
1985 1986 to be flushed when the current lock is released. Because a call to
1986 1987 destroyed is imminent, the repo will be invalidated causing those
1987 1988 changes to stay in memory (waiting for the next unlock), or vanish
1988 1989 completely.
1989 1990 '''
1990 1991 # When using the same lock to commit and strip, the phasecache is left
1991 1992 # dirty after committing. Then when we strip, the repo is invalidated,
1992 1993 # causing those changes to disappear.
1993 1994 if '_phasecache' in vars(self):
1994 1995 self._phasecache.write()
1995 1996
1996 1997 @unfilteredmethod
1997 1998 def destroyed(self):
1998 1999 '''Inform the repository that nodes have been destroyed.
1999 2000 Intended for use by strip and rollback, so there's a common
2000 2001 place for anything that has to be done after destroying history.
2001 2002 '''
2002 2003 # When one tries to:
2003 2004 # 1) destroy nodes thus calling this method (e.g. strip)
2004 2005 # 2) use phasecache somewhere (e.g. commit)
2005 2006 #
2006 2007 # then 2) will fail because the phasecache contains nodes that were
2007 2008 # removed. We can either remove phasecache from the filecache,
2008 2009 # causing it to reload next time it is accessed, or simply filter
2009 2010 # the removed nodes now and write the updated cache.
2010 2011 self._phasecache.filterunknown(self)
2011 2012 self._phasecache.write()
2012 2013
2013 2014 # refresh all repository caches
2014 2015 self.updatecaches()
2015 2016
2016 2017 # Ensure the persistent tag cache is updated. Doing it now
2017 2018 # means that the tag cache only has to worry about destroyed
2018 2019 # heads immediately after a strip/rollback. That in turn
2019 2020 # guarantees that "cachetip == currenttip" (comparing both rev
2020 2021 # and node) always means no nodes have been added or destroyed.
2021 2022
2022 2023 # XXX this is suboptimal when qrefresh'ing: we strip the current
2023 2024 # head, refresh the tag cache, then immediately add a new head.
2024 2025 # But I think doing it this way is necessary for the "instant
2025 2026 # tag cache retrieval" case to work.
2026 2027 self.invalidate()
2027 2028
2028 2029 def walk(self, match, node=None):
2029 2030 '''
2030 2031 walk recursively through the directory tree or a given
2031 2032 changeset, finding all files matched by the match
2032 2033 function
2033 2034 '''
2034 2035 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2035 2036 return self[node].walk(match)
2036 2037
2037 2038 def status(self, node1='.', node2=None, match=None,
2038 2039 ignored=False, clean=False, unknown=False,
2039 2040 listsubrepos=False):
2040 2041 '''a convenience method that calls node1.status(node2)'''
2041 2042 return self[node1].status(node2, match, ignored, clean, unknown,
2042 2043 listsubrepos)
2043 2044
2044 2045 def addpostdsstatus(self, ps):
2045 2046 """Add a callback to run within the wlock, at the point at which status
2046 2047 fixups happen.
2047 2048
2048 2049 On status completion, callback(wctx, status) will be called with the
2049 2050 wlock held, unless the dirstate has changed from underneath or the wlock
2050 2051 couldn't be grabbed.
2051 2052
2052 2053 Callbacks should not capture and use a cached copy of the dirstate --
2053 2054 it might change in the meanwhile. Instead, they should access the
2054 2055 dirstate via wctx.repo().dirstate.
2055 2056
2056 2057 This list is emptied out after each status run -- extensions should
2057 2058 make sure it adds to this list each time dirstate.status is called.
2058 2059 Extensions should also make sure they don't call this for statuses
2059 2060 that don't involve the dirstate.
2060 2061 """
2061 2062
2062 2063 # The list is located here for uniqueness reasons -- it is actually
2063 2064 # managed by the workingctx, but that isn't unique per-repo.
2064 2065 self._postdsstatus.append(ps)
2065 2066
2066 2067 def postdsstatus(self):
2067 2068 """Used by workingctx to get the list of post-dirstate-status hooks."""
2068 2069 return self._postdsstatus
2069 2070
2070 2071 def clearpostdsstatus(self):
2071 2072 """Used by workingctx to clear post-dirstate-status hooks."""
2072 2073 del self._postdsstatus[:]
2073 2074
2074 2075 def heads(self, start=None):
2075 2076 if start is None:
2076 2077 cl = self.changelog
2077 2078 headrevs = reversed(cl.headrevs())
2078 2079 return [cl.node(rev) for rev in headrevs]
2079 2080
2080 2081 heads = self.changelog.heads(start)
2081 2082 # sort the output in rev descending order
2082 2083 return sorted(heads, key=self.changelog.rev, reverse=True)
2083 2084
2084 2085 def branchheads(self, branch=None, start=None, closed=False):
2085 2086 '''return a (possibly filtered) list of heads for the given branch
2086 2087
2087 2088 Heads are returned in topological order, from newest to oldest.
2088 2089 If branch is None, use the dirstate branch.
2089 2090 If start is not None, return only heads reachable from start.
2090 2091 If closed is True, return heads that are marked as closed as well.
2091 2092 '''
2092 2093 if branch is None:
2093 2094 branch = self[None].branch()
2094 2095 branches = self.branchmap()
2095 2096 if branch not in branches:
2096 2097 return []
2097 2098 # the cache returns heads ordered lowest to highest
2098 2099 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2099 2100 if start is not None:
2100 2101 # filter out the heads that cannot be reached from startrev
2101 2102 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2102 2103 bheads = [h for h in bheads if h in fbheads]
2103 2104 return bheads
2104 2105
2105 2106 def branches(self, nodes):
2106 2107 if not nodes:
2107 2108 nodes = [self.changelog.tip()]
2108 2109 b = []
2109 2110 for n in nodes:
2110 2111 t = n
2111 2112 while True:
2112 2113 p = self.changelog.parents(n)
2113 2114 if p[1] != nullid or p[0] == nullid:
2114 2115 b.append((t, n, p[0], p[1]))
2115 2116 break
2116 2117 n = p[0]
2117 2118 return b
2118 2119
2119 2120 def between(self, pairs):
2120 2121 r = []
2121 2122
2122 2123 for top, bottom in pairs:
2123 2124 n, l, i = top, [], 0
2124 2125 f = 1
2125 2126
2126 2127 while n != bottom and n != nullid:
2127 2128 p = self.changelog.parents(n)[0]
2128 2129 if i == f:
2129 2130 l.append(n)
2130 2131 f = f * 2
2131 2132 n = p
2132 2133 i += 1
2133 2134
2134 2135 r.append(l)
2135 2136
2136 2137 return r
2137 2138
2138 2139 def checkpush(self, pushop):
2139 2140 """Extensions can override this function if additional checks have
2140 2141 to be performed before pushing, or call it if they override push
2141 2142 command.
2142 2143 """
2143 2144 pass
2144 2145
2145 2146 @unfilteredpropertycache
2146 2147 def prepushoutgoinghooks(self):
2147 2148 """Return util.hooks consists of a pushop with repo, remote, outgoing
2148 2149 methods, which are called before pushing changesets.
2149 2150 """
2150 2151 return util.hooks()
2151 2152
2152 2153 def pushkey(self, namespace, key, old, new):
2153 2154 try:
2154 2155 tr = self.currenttransaction()
2155 2156 hookargs = {}
2156 2157 if tr is not None:
2157 2158 hookargs.update(tr.hookargs)
2158 2159 hookargs['namespace'] = namespace
2159 2160 hookargs['key'] = key
2160 2161 hookargs['old'] = old
2161 2162 hookargs['new'] = new
2162 2163 self.hook('prepushkey', throw=True, **hookargs)
2163 2164 except error.HookAbort as exc:
2164 2165 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2165 2166 if exc.hint:
2166 2167 self.ui.write_err(_("(%s)\n") % exc.hint)
2167 2168 return False
2168 2169 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2169 2170 ret = pushkey.push(self, namespace, key, old, new)
2170 2171 def runhook():
2171 2172 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2172 2173 ret=ret)
2173 2174 self._afterlock(runhook)
2174 2175 return ret
2175 2176
2176 2177 def listkeys(self, namespace):
2177 2178 self.hook('prelistkeys', throw=True, namespace=namespace)
2178 2179 self.ui.debug('listing keys for "%s"\n' % namespace)
2179 2180 values = pushkey.list(self, namespace)
2180 2181 self.hook('listkeys', namespace=namespace, values=values)
2181 2182 return values
2182 2183
2183 2184 def debugwireargs(self, one, two, three=None, four=None, five=None):
2184 2185 '''used to test argument passing over the wire'''
2185 2186 return "%s %s %s %s %s" % (one, two, three, four, five)
2186 2187
2187 2188 def savecommitmessage(self, text):
2188 2189 fp = self.vfs('last-message.txt', 'wb')
2189 2190 try:
2190 2191 fp.write(text)
2191 2192 finally:
2192 2193 fp.close()
2193 2194 return self.pathto(fp.name[len(self.root) + 1:])
2194 2195
2195 2196 # used to avoid circular references so destructors work
2196 2197 def aftertrans(files):
2197 2198 renamefiles = [tuple(t) for t in files]
2198 2199 def a():
2199 2200 for vfs, src, dest in renamefiles:
2200 2201 # if src and dest refer to a same file, vfs.rename is a no-op,
2201 2202 # leaving both src and dest on disk. delete dest to make sure
2202 2203 # the rename couldn't be such a no-op.
2203 2204 vfs.tryunlink(dest)
2204 2205 try:
2205 2206 vfs.rename(src, dest)
2206 2207 except OSError: # journal file does not yet exist
2207 2208 pass
2208 2209 return a
2209 2210
2210 2211 def undoname(fn):
2211 2212 base, name = os.path.split(fn)
2212 2213 assert name.startswith('journal')
2213 2214 return os.path.join(base, name.replace('journal', 'undo', 1))
2214 2215
2215 2216 def instance(ui, path, create):
2216 2217 return localrepository(ui, util.urllocalpath(path), create)
2217 2218
2218 2219 def islocal(path):
2219 2220 return True
2220 2221
2221 2222 def newreporequirements(repo):
2222 2223 """Determine the set of requirements for a new local repository.
2223 2224
2224 2225 Extensions can wrap this function to specify custom requirements for
2225 2226 new repositories.
2226 2227 """
2227 2228 ui = repo.ui
2228 2229 requirements = {'revlogv1'}
2229 2230 if ui.configbool('format', 'usestore'):
2230 2231 requirements.add('store')
2231 2232 if ui.configbool('format', 'usefncache'):
2232 2233 requirements.add('fncache')
2233 2234 if ui.configbool('format', 'dotencode'):
2234 2235 requirements.add('dotencode')
2235 2236
2236 2237 compengine = ui.config('experimental', 'format.compression')
2237 2238 if compengine not in util.compengines:
2238 2239 raise error.Abort(_('compression engine %s defined by '
2239 2240 'experimental.format.compression not available') %
2240 2241 compengine,
2241 2242 hint=_('run "hg debuginstall" to list available '
2242 2243 'compression engines'))
2243 2244
2244 2245 # zlib is the historical default and doesn't need an explicit requirement.
2245 2246 if compengine != 'zlib':
2246 2247 requirements.add('exp-compression-%s' % compengine)
2247 2248
2248 2249 if scmutil.gdinitconfig(ui):
2249 2250 requirements.add('generaldelta')
2250 2251 if ui.configbool('experimental', 'treemanifest'):
2251 2252 requirements.add('treemanifest')
2252 2253 if ui.configbool('experimental', 'manifestv2'):
2253 2254 requirements.add('manifestv2')
2254 2255
2255 2256 revlogv2 = ui.config('experimental', 'revlogv2')
2256 2257 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2257 2258 requirements.remove('revlogv1')
2258 2259 # generaldelta is implied by revlogv2.
2259 2260 requirements.discard('generaldelta')
2260 2261 requirements.add(REVLOGV2_REQUIREMENT)
2261 2262
2262 2263 return requirements
@@ -1,215 +1,221
1 1 from __future__ import absolute_import
2 2
3 3 import errno
4 4 import os
5 5 import posixpath
6 6 import stat
7 7
8 8 from .i18n import _
9 9 from . import (
10 10 encoding,
11 11 error,
12 12 pycompat,
13 13 util,
14 14 )
15 15
16 16 def _lowerclean(s):
17 17 return encoding.hfsignoreclean(s.lower())
18 18
19 19 class pathauditor(object):
20 20 '''ensure that a filesystem path contains no banned components.
21 21 the following properties of a path are checked:
22 22
23 23 - ends with a directory separator
24 24 - under top-level .hg
25 25 - starts at the root of a windows drive
26 26 - contains ".."
27 27
28 28 More check are also done about the file system states:
29 29 - traverses a symlink (e.g. a/symlink_here/b)
30 30 - inside a nested repository (a callback can be used to approve
31 31 some nested repositories, e.g., subrepositories)
32 32
33 33 The file system checks are only done when 'realfs' is set to True (the
34 34 default). They should be disable then we are auditing path for operation on
35 35 stored history.
36
37 If 'cached' is set to True, audited paths and sub-directories are cached.
38 Be careful to not keep the cache of unmanaged directories for long because
39 audited paths may be replaced with symlinks.
36 40 '''
37 41
38 def __init__(self, root, callback=None, realfs=True):
42 def __init__(self, root, callback=None, realfs=True, cached=False):
39 43 self.audited = set()
40 44 self.auditeddir = set()
41 45 self.root = root
42 46 self._realfs = realfs
47 self._cached = cached
43 48 self.callback = callback
44 49 if os.path.lexists(root) and not util.fscasesensitive(root):
45 50 self.normcase = util.normcase
46 51 else:
47 52 self.normcase = lambda x: x
48 53
49 54 def __call__(self, path, mode=None):
50 55 '''Check the relative path.
51 56 path may contain a pattern (e.g. foodir/**.txt)'''
52 57
53 58 path = util.localpath(path)
54 59 normpath = self.normcase(path)
55 60 if normpath in self.audited:
56 61 return
57 62 # AIX ignores "/" at end of path, others raise EISDIR.
58 63 if util.endswithsep(path):
59 64 raise error.Abort(_("path ends in directory separator: %s") % path)
60 65 parts = util.splitpath(path)
61 66 if (os.path.splitdrive(path)[0]
62 67 or _lowerclean(parts[0]) in ('.hg', '.hg.', '')
63 68 or os.pardir in parts):
64 69 raise error.Abort(_("path contains illegal component: %s") % path)
65 70 # Windows shortname aliases
66 71 for p in parts:
67 72 if "~" in p:
68 73 first, last = p.split("~", 1)
69 74 if last.isdigit() and first.upper() in ["HG", "HG8B6C"]:
70 75 raise error.Abort(_("path contains illegal component: %s")
71 76 % path)
72 77 if '.hg' in _lowerclean(path):
73 78 lparts = [_lowerclean(p.lower()) for p in parts]
74 79 for p in '.hg', '.hg.':
75 80 if p in lparts[1:]:
76 81 pos = lparts.index(p)
77 82 base = os.path.join(*parts[:pos])
78 83 raise error.Abort(_("path '%s' is inside nested repo %r")
79 84 % (path, base))
80 85
81 86 normparts = util.splitpath(normpath)
82 87 assert len(parts) == len(normparts)
83 88
84 89 parts.pop()
85 90 normparts.pop()
86 91 prefixes = []
87 92 # It's important that we check the path parts starting from the root.
88 93 # This means we won't accidentally traverse a symlink into some other
89 94 # filesystem (which is potentially expensive to access).
90 95 for i in range(len(parts)):
91 96 prefix = pycompat.ossep.join(parts[:i + 1])
92 97 normprefix = pycompat.ossep.join(normparts[:i + 1])
93 98 if normprefix in self.auditeddir:
94 99 continue
95 100 if self._realfs:
96 101 self._checkfs(prefix, path)
97 102 prefixes.append(normprefix)
98 103
99 self.audited.add(normpath)
100 # only add prefixes to the cache after checking everything: we don't
101 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
102 self.auditeddir.update(prefixes)
104 if self._cached:
105 self.audited.add(normpath)
106 # only add prefixes to the cache after checking everything: we don't
107 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
108 self.auditeddir.update(prefixes)
103 109
104 110 def _checkfs(self, prefix, path):
105 111 """raise exception if a file system backed check fails"""
106 112 curpath = os.path.join(self.root, prefix)
107 113 try:
108 114 st = os.lstat(curpath)
109 115 except OSError as err:
110 116 # EINVAL can be raised as invalid path syntax under win32.
111 117 # They must be ignored for patterns can be checked too.
112 118 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
113 119 raise
114 120 else:
115 121 if stat.S_ISLNK(st.st_mode):
116 122 msg = _('path %r traverses symbolic link %r') % (path, prefix)
117 123 raise error.Abort(msg)
118 124 elif (stat.S_ISDIR(st.st_mode) and
119 125 os.path.isdir(os.path.join(curpath, '.hg'))):
120 126 if not self.callback or not self.callback(curpath):
121 127 msg = _("path '%s' is inside nested repo %r")
122 128 raise error.Abort(msg % (path, prefix))
123 129
124 130 def check(self, path):
125 131 try:
126 132 self(path)
127 133 return True
128 134 except (OSError, error.Abort):
129 135 return False
130 136
131 137 def canonpath(root, cwd, myname, auditor=None):
132 138 '''return the canonical path of myname, given cwd and root'''
133 139 if util.endswithsep(root):
134 140 rootsep = root
135 141 else:
136 142 rootsep = root + pycompat.ossep
137 143 name = myname
138 144 if not os.path.isabs(name):
139 145 name = os.path.join(root, cwd, name)
140 146 name = os.path.normpath(name)
141 147 if auditor is None:
142 148 auditor = pathauditor(root)
143 149 if name != rootsep and name.startswith(rootsep):
144 150 name = name[len(rootsep):]
145 151 auditor(name)
146 152 return util.pconvert(name)
147 153 elif name == root:
148 154 return ''
149 155 else:
150 156 # Determine whether `name' is in the hierarchy at or beneath `root',
151 157 # by iterating name=dirname(name) until that causes no change (can't
152 158 # check name == '/', because that doesn't work on windows). The list
153 159 # `rel' holds the reversed list of components making up the relative
154 160 # file name we want.
155 161 rel = []
156 162 while True:
157 163 try:
158 164 s = util.samefile(name, root)
159 165 except OSError:
160 166 s = False
161 167 if s:
162 168 if not rel:
163 169 # name was actually the same as root (maybe a symlink)
164 170 return ''
165 171 rel.reverse()
166 172 name = os.path.join(*rel)
167 173 auditor(name)
168 174 return util.pconvert(name)
169 175 dirname, basename = util.split(name)
170 176 rel.append(basename)
171 177 if dirname == name:
172 178 break
173 179 name = dirname
174 180
175 181 # A common mistake is to use -R, but specify a file relative to the repo
176 182 # instead of cwd. Detect that case, and provide a hint to the user.
177 183 hint = None
178 184 try:
179 185 if cwd != root:
180 186 canonpath(root, root, myname, auditor)
181 187 hint = (_("consider using '--cwd %s'")
182 188 % os.path.relpath(root, cwd))
183 189 except error.Abort:
184 190 pass
185 191
186 192 raise error.Abort(_("%s not under root '%s'") % (myname, root),
187 193 hint=hint)
188 194
189 195 def normasprefix(path):
190 196 '''normalize the specified path as path prefix
191 197
192 198 Returned value can be used safely for "p.startswith(prefix)",
193 199 "p[len(prefix):]", and so on.
194 200
195 201 For efficiency, this expects "path" argument to be already
196 202 normalized by "os.path.normpath", "os.path.realpath", and so on.
197 203
198 204 See also issue3033 for detail about need of this function.
199 205
200 206 >>> normasprefix('/foo/bar').replace(os.sep, '/')
201 207 '/foo/bar/'
202 208 >>> normasprefix('/').replace(os.sep, '/')
203 209 '/'
204 210 '''
205 211 d, p = os.path.splitdrive(path)
206 212 if len(p) != len(pycompat.ossep):
207 213 return path + pycompat.ossep
208 214 else:
209 215 return path
210 216
211 217 # forward two methods from posixpath that do what we need, but we'd
212 218 # rather not let our internals know that we're thinking in posix terms
213 219 # - instead we'll let them be oblivious.
214 220 join = posixpath.join
215 221 dirname = posixpath.dirname
@@ -1,1105 +1,1105
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 wdirid,
23 23 wdirrev,
24 24 )
25 25
26 26 from . import (
27 27 encoding,
28 28 error,
29 29 match as matchmod,
30 30 obsolete,
31 31 obsutil,
32 32 pathutil,
33 33 phases,
34 34 pycompat,
35 35 revsetlang,
36 36 similar,
37 37 util,
38 38 )
39 39
40 40 if pycompat.osname == 'nt':
41 41 from . import scmwindows as scmplatform
42 42 else:
43 43 from . import scmposix as scmplatform
44 44
45 45 termsize = scmplatform.termsize
46 46
47 47 class status(tuple):
48 48 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
49 49 and 'ignored' properties are only relevant to the working copy.
50 50 '''
51 51
52 52 __slots__ = ()
53 53
54 54 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
55 55 clean):
56 56 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
57 57 ignored, clean))
58 58
59 59 @property
60 60 def modified(self):
61 61 '''files that have been modified'''
62 62 return self[0]
63 63
64 64 @property
65 65 def added(self):
66 66 '''files that have been added'''
67 67 return self[1]
68 68
69 69 @property
70 70 def removed(self):
71 71 '''files that have been removed'''
72 72 return self[2]
73 73
74 74 @property
75 75 def deleted(self):
76 76 '''files that are in the dirstate, but have been deleted from the
77 77 working copy (aka "missing")
78 78 '''
79 79 return self[3]
80 80
81 81 @property
82 82 def unknown(self):
83 83 '''files not in the dirstate that are not ignored'''
84 84 return self[4]
85 85
86 86 @property
87 87 def ignored(self):
88 88 '''files not in the dirstate that are ignored (by _dirignore())'''
89 89 return self[5]
90 90
91 91 @property
92 92 def clean(self):
93 93 '''files that have not been modified'''
94 94 return self[6]
95 95
96 96 def __repr__(self, *args, **kwargs):
97 97 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
98 98 'unknown=%r, ignored=%r, clean=%r>') % self)
99 99
100 100 def itersubrepos(ctx1, ctx2):
101 101 """find subrepos in ctx1 or ctx2"""
102 102 # Create a (subpath, ctx) mapping where we prefer subpaths from
103 103 # ctx1. The subpaths from ctx2 are important when the .hgsub file
104 104 # has been modified (in ctx2) but not yet committed (in ctx1).
105 105 subpaths = dict.fromkeys(ctx2.substate, ctx2)
106 106 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
107 107
108 108 missing = set()
109 109
110 110 for subpath in ctx2.substate:
111 111 if subpath not in ctx1.substate:
112 112 del subpaths[subpath]
113 113 missing.add(subpath)
114 114
115 115 for subpath, ctx in sorted(subpaths.iteritems()):
116 116 yield subpath, ctx.sub(subpath)
117 117
118 118 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
119 119 # status and diff will have an accurate result when it does
120 120 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
121 121 # against itself.
122 122 for subpath in missing:
123 123 yield subpath, ctx2.nullsub(subpath, ctx1)
124 124
125 125 def nochangesfound(ui, repo, excluded=None):
126 126 '''Report no changes for push/pull, excluded is None or a list of
127 127 nodes excluded from the push/pull.
128 128 '''
129 129 secretlist = []
130 130 if excluded:
131 131 for n in excluded:
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def callcatch(ui, func):
143 143 """call func() with global exception handling
144 144
145 145 return func() if no exception happens. otherwise do some error handling
146 146 and return an exit code accordingly. does not handle all exceptions.
147 147 """
148 148 try:
149 149 try:
150 150 return func()
151 151 except: # re-raises
152 152 ui.traceback()
153 153 raise
154 154 # Global exception handling, alphabetically
155 155 # Mercurial-specific first, followed by built-in and library exceptions
156 156 except error.LockHeld as inst:
157 157 if inst.errno == errno.ETIMEDOUT:
158 158 reason = _('timed out waiting for lock held by %r') % inst.locker
159 159 else:
160 160 reason = _('lock held by %r') % inst.locker
161 161 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
162 162 if not inst.locker:
163 163 ui.warn(_("(lock might be very busy)\n"))
164 164 except error.LockUnavailable as inst:
165 165 ui.warn(_("abort: could not lock %s: %s\n") %
166 166 (inst.desc or inst.filename, inst.strerror))
167 167 except error.OutOfBandError as inst:
168 168 if inst.args:
169 169 msg = _("abort: remote error:\n")
170 170 else:
171 171 msg = _("abort: remote error\n")
172 172 ui.warn(msg)
173 173 if inst.args:
174 174 ui.warn(''.join(inst.args))
175 175 if inst.hint:
176 176 ui.warn('(%s)\n' % inst.hint)
177 177 except error.RepoError as inst:
178 178 ui.warn(_("abort: %s!\n") % inst)
179 179 if inst.hint:
180 180 ui.warn(_("(%s)\n") % inst.hint)
181 181 except error.ResponseError as inst:
182 182 ui.warn(_("abort: %s") % inst.args[0])
183 183 if not isinstance(inst.args[1], basestring):
184 184 ui.warn(" %r\n" % (inst.args[1],))
185 185 elif not inst.args[1]:
186 186 ui.warn(_(" empty string\n"))
187 187 else:
188 188 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
189 189 except error.CensoredNodeError as inst:
190 190 ui.warn(_("abort: file censored %s!\n") % inst)
191 191 except error.RevlogError as inst:
192 192 ui.warn(_("abort: %s!\n") % inst)
193 193 except error.InterventionRequired as inst:
194 194 ui.warn("%s\n" % inst)
195 195 if inst.hint:
196 196 ui.warn(_("(%s)\n") % inst.hint)
197 197 return 1
198 198 except error.WdirUnsupported:
199 199 ui.warn(_("abort: working directory revision cannot be specified\n"))
200 200 except error.Abort as inst:
201 201 ui.warn(_("abort: %s\n") % inst)
202 202 if inst.hint:
203 203 ui.warn(_("(%s)\n") % inst.hint)
204 204 except ImportError as inst:
205 205 ui.warn(_("abort: %s!\n") % inst)
206 206 m = str(inst).split()[-1]
207 207 if m in "mpatch bdiff".split():
208 208 ui.warn(_("(did you forget to compile extensions?)\n"))
209 209 elif m in "zlib".split():
210 210 ui.warn(_("(is your Python install correct?)\n"))
211 211 except IOError as inst:
212 212 if util.safehasattr(inst, "code"):
213 213 ui.warn(_("abort: %s\n") % inst)
214 214 elif util.safehasattr(inst, "reason"):
215 215 try: # usually it is in the form (errno, strerror)
216 216 reason = inst.reason.args[1]
217 217 except (AttributeError, IndexError):
218 218 # it might be anything, for example a string
219 219 reason = inst.reason
220 220 if isinstance(reason, unicode):
221 221 # SSLError of Python 2.7.9 contains a unicode
222 222 reason = encoding.unitolocal(reason)
223 223 ui.warn(_("abort: error: %s\n") % reason)
224 224 elif (util.safehasattr(inst, "args")
225 225 and inst.args and inst.args[0] == errno.EPIPE):
226 226 pass
227 227 elif getattr(inst, "strerror", None):
228 228 if getattr(inst, "filename", None):
229 229 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
230 230 else:
231 231 ui.warn(_("abort: %s\n") % inst.strerror)
232 232 else:
233 233 raise
234 234 except OSError as inst:
235 235 if getattr(inst, "filename", None) is not None:
236 236 ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
237 237 else:
238 238 ui.warn(_("abort: %s\n") % inst.strerror)
239 239 except MemoryError:
240 240 ui.warn(_("abort: out of memory\n"))
241 241 except SystemExit as inst:
242 242 # Commands shouldn't sys.exit directly, but give a return code.
243 243 # Just in case catch this and and pass exit code to caller.
244 244 return inst.code
245 245 except socket.error as inst:
246 246 ui.warn(_("abort: %s\n") % inst.args[-1])
247 247
248 248 return -1
249 249
250 250 def checknewlabel(repo, lbl, kind):
251 251 # Do not use the "kind" parameter in ui output.
252 252 # It makes strings difficult to translate.
253 253 if lbl in ['tip', '.', 'null']:
254 254 raise error.Abort(_("the name '%s' is reserved") % lbl)
255 255 for c in (':', '\0', '\n', '\r'):
256 256 if c in lbl:
257 257 raise error.Abort(_("%r cannot be used in a name") % c)
258 258 try:
259 259 int(lbl)
260 260 raise error.Abort(_("cannot use an integer as a name"))
261 261 except ValueError:
262 262 pass
263 263
264 264 def checkfilename(f):
265 265 '''Check that the filename f is an acceptable filename for a tracked file'''
266 266 if '\r' in f or '\n' in f:
267 267 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
268 268
269 269 def checkportable(ui, f):
270 270 '''Check if filename f is portable and warn or abort depending on config'''
271 271 checkfilename(f)
272 272 abort, warn = checkportabilityalert(ui)
273 273 if abort or warn:
274 274 msg = util.checkwinfilename(f)
275 275 if msg:
276 276 msg = "%s: %r" % (msg, f)
277 277 if abort:
278 278 raise error.Abort(msg)
279 279 ui.warn(_("warning: %s\n") % msg)
280 280
281 281 def checkportabilityalert(ui):
282 282 '''check if the user's config requests nothing, a warning, or abort for
283 283 non-portable filenames'''
284 284 val = ui.config('ui', 'portablefilenames')
285 285 lval = val.lower()
286 286 bval = util.parsebool(val)
287 287 abort = pycompat.osname == 'nt' or lval == 'abort'
288 288 warn = bval or lval == 'warn'
289 289 if bval is None and not (warn or abort or lval == 'ignore'):
290 290 raise error.ConfigError(
291 291 _("ui.portablefilenames value is invalid ('%s')") % val)
292 292 return abort, warn
293 293
294 294 class casecollisionauditor(object):
295 295 def __init__(self, ui, abort, dirstate):
296 296 self._ui = ui
297 297 self._abort = abort
298 298 allfiles = '\0'.join(dirstate._map)
299 299 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
300 300 self._dirstate = dirstate
301 301 # The purpose of _newfiles is so that we don't complain about
302 302 # case collisions if someone were to call this object with the
303 303 # same filename twice.
304 304 self._newfiles = set()
305 305
306 306 def __call__(self, f):
307 307 if f in self._newfiles:
308 308 return
309 309 fl = encoding.lower(f)
310 310 if fl in self._loweredfiles and f not in self._dirstate:
311 311 msg = _('possible case-folding collision for %s') % f
312 312 if self._abort:
313 313 raise error.Abort(msg)
314 314 self._ui.warn(_("warning: %s\n") % msg)
315 315 self._loweredfiles.add(fl)
316 316 self._newfiles.add(f)
317 317
318 318 def filteredhash(repo, maxrev):
319 319 """build hash of filtered revisions in the current repoview.
320 320
321 321 Multiple caches perform up-to-date validation by checking that the
322 322 tiprev and tipnode stored in the cache file match the current repository.
323 323 However, this is not sufficient for validating repoviews because the set
324 324 of revisions in the view may change without the repository tiprev and
325 325 tipnode changing.
326 326
327 327 This function hashes all the revs filtered from the view and returns
328 328 that SHA-1 digest.
329 329 """
330 330 cl = repo.changelog
331 331 if not cl.filteredrevs:
332 332 return None
333 333 key = None
334 334 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
335 335 if revs:
336 336 s = hashlib.sha1()
337 337 for rev in revs:
338 338 s.update('%d;' % rev)
339 339 key = s.digest()
340 340 return key
341 341
342 342 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
343 343 '''yield every hg repository under path, always recursively.
344 344 The recurse flag will only control recursion into repo working dirs'''
345 345 def errhandler(err):
346 346 if err.filename == path:
347 347 raise err
348 348 samestat = getattr(os.path, 'samestat', None)
349 349 if followsym and samestat is not None:
350 350 def adddir(dirlst, dirname):
351 351 match = False
352 352 dirstat = os.stat(dirname)
353 353 for lstdirstat in dirlst:
354 354 if samestat(dirstat, lstdirstat):
355 355 match = True
356 356 break
357 357 if not match:
358 358 dirlst.append(dirstat)
359 359 return not match
360 360 else:
361 361 followsym = False
362 362
363 363 if (seen_dirs is None) and followsym:
364 364 seen_dirs = []
365 365 adddir(seen_dirs, path)
366 366 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
367 367 dirs.sort()
368 368 if '.hg' in dirs:
369 369 yield root # found a repository
370 370 qroot = os.path.join(root, '.hg', 'patches')
371 371 if os.path.isdir(os.path.join(qroot, '.hg')):
372 372 yield qroot # we have a patch queue repo here
373 373 if recurse:
374 374 # avoid recursing inside the .hg directory
375 375 dirs.remove('.hg')
376 376 else:
377 377 dirs[:] = [] # don't descend further
378 378 elif followsym:
379 379 newdirs = []
380 380 for d in dirs:
381 381 fname = os.path.join(root, d)
382 382 if adddir(seen_dirs, fname):
383 383 if os.path.islink(fname):
384 384 for hgname in walkrepos(fname, True, seen_dirs):
385 385 yield hgname
386 386 else:
387 387 newdirs.append(d)
388 388 dirs[:] = newdirs
389 389
390 390 def binnode(ctx):
391 391 """Return binary node id for a given basectx"""
392 392 node = ctx.node()
393 393 if node is None:
394 394 return wdirid
395 395 return node
396 396
397 397 def intrev(ctx):
398 398 """Return integer for a given basectx that can be used in comparison or
399 399 arithmetic operation"""
400 400 rev = ctx.rev()
401 401 if rev is None:
402 402 return wdirrev
403 403 return rev
404 404
405 405 def revsingle(repo, revspec, default='.'):
406 406 if not revspec and revspec != 0:
407 407 return repo[default]
408 408
409 409 l = revrange(repo, [revspec])
410 410 if not l:
411 411 raise error.Abort(_('empty revision set'))
412 412 return repo[l.last()]
413 413
414 414 def _pairspec(revspec):
415 415 tree = revsetlang.parse(revspec)
416 416 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
417 417
418 418 def revpair(repo, revs):
419 419 if not revs:
420 420 return repo.dirstate.p1(), None
421 421
422 422 l = revrange(repo, revs)
423 423
424 424 if not l:
425 425 first = second = None
426 426 elif l.isascending():
427 427 first = l.min()
428 428 second = l.max()
429 429 elif l.isdescending():
430 430 first = l.max()
431 431 second = l.min()
432 432 else:
433 433 first = l.first()
434 434 second = l.last()
435 435
436 436 if first is None:
437 437 raise error.Abort(_('empty revision range'))
438 438 if (first == second and len(revs) >= 2
439 439 and not all(revrange(repo, [r]) for r in revs)):
440 440 raise error.Abort(_('empty revision on one side of range'))
441 441
442 442 # if top-level is range expression, the result must always be a pair
443 443 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
444 444 return repo.lookup(first), None
445 445
446 446 return repo.lookup(first), repo.lookup(second)
447 447
448 448 def revrange(repo, specs):
449 449 """Execute 1 to many revsets and return the union.
450 450
451 451 This is the preferred mechanism for executing revsets using user-specified
452 452 config options, such as revset aliases.
453 453
454 454 The revsets specified by ``specs`` will be executed via a chained ``OR``
455 455 expression. If ``specs`` is empty, an empty result is returned.
456 456
457 457 ``specs`` can contain integers, in which case they are assumed to be
458 458 revision numbers.
459 459
460 460 It is assumed the revsets are already formatted. If you have arguments
461 461 that need to be expanded in the revset, call ``revsetlang.formatspec()``
462 462 and pass the result as an element of ``specs``.
463 463
464 464 Specifying a single revset is allowed.
465 465
466 466 Returns a ``revset.abstractsmartset`` which is a list-like interface over
467 467 integer revisions.
468 468 """
469 469 allspecs = []
470 470 for spec in specs:
471 471 if isinstance(spec, int):
472 472 spec = revsetlang.formatspec('rev(%d)', spec)
473 473 allspecs.append(spec)
474 474 return repo.anyrevs(allspecs, user=True)
475 475
476 476 def meaningfulparents(repo, ctx):
477 477 """Return list of meaningful (or all if debug) parentrevs for rev.
478 478
479 479 For merges (two non-nullrev revisions) both parents are meaningful.
480 480 Otherwise the first parent revision is considered meaningful if it
481 481 is not the preceding revision.
482 482 """
483 483 parents = ctx.parents()
484 484 if len(parents) > 1:
485 485 return parents
486 486 if repo.ui.debugflag:
487 487 return [parents[0], repo['null']]
488 488 if parents[0].rev() >= intrev(ctx) - 1:
489 489 return []
490 490 return parents
491 491
492 492 def expandpats(pats):
493 493 '''Expand bare globs when running on windows.
494 494 On posix we assume it already has already been done by sh.'''
495 495 if not util.expandglobs:
496 496 return list(pats)
497 497 ret = []
498 498 for kindpat in pats:
499 499 kind, pat = matchmod._patsplit(kindpat, None)
500 500 if kind is None:
501 501 try:
502 502 globbed = glob.glob(pat)
503 503 except re.error:
504 504 globbed = [pat]
505 505 if globbed:
506 506 ret.extend(globbed)
507 507 continue
508 508 ret.append(kindpat)
509 509 return ret
510 510
511 511 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
512 512 badfn=None):
513 513 '''Return a matcher and the patterns that were used.
514 514 The matcher will warn about bad matches, unless an alternate badfn callback
515 515 is provided.'''
516 516 if pats == ("",):
517 517 pats = []
518 518 if opts is None:
519 519 opts = {}
520 520 if not globbed and default == 'relpath':
521 521 pats = expandpats(pats or [])
522 522
523 523 def bad(f, msg):
524 524 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
525 525
526 526 if badfn is None:
527 527 badfn = bad
528 528
529 529 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
530 530 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
531 531
532 532 if m.always():
533 533 pats = []
534 534 return m, pats
535 535
536 536 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
537 537 badfn=None):
538 538 '''Return a matcher that will warn about bad matches.'''
539 539 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
540 540
541 541 def matchall(repo):
542 542 '''Return a matcher that will efficiently match everything.'''
543 543 return matchmod.always(repo.root, repo.getcwd())
544 544
545 545 def matchfiles(repo, files, badfn=None):
546 546 '''Return a matcher that will efficiently match exactly these files.'''
547 547 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
548 548
549 549 def origpath(ui, repo, filepath):
550 550 '''customize where .orig files are created
551 551
552 552 Fetch user defined path from config file: [ui] origbackuppath = <path>
553 553 Fall back to default (filepath) if not specified
554 554 '''
555 555 origbackuppath = ui.config('ui', 'origbackuppath')
556 556 if origbackuppath is None:
557 557 return filepath + ".orig"
558 558
559 559 filepathfromroot = os.path.relpath(filepath, start=repo.root)
560 560 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
561 561
562 562 origbackupdir = repo.vfs.dirname(fullorigpath)
563 563 if not repo.vfs.exists(origbackupdir):
564 564 ui.note(_('creating directory: %s\n') % origbackupdir)
565 565 util.makedirs(origbackupdir)
566 566
567 567 return fullorigpath + ".orig"
568 568
569 569 class _containsnode(object):
570 570 """proxy __contains__(node) to container.__contains__ which accepts revs"""
571 571
572 572 def __init__(self, repo, revcontainer):
573 573 self._torev = repo.changelog.rev
574 574 self._revcontains = revcontainer.__contains__
575 575
576 576 def __contains__(self, node):
577 577 return self._revcontains(self._torev(node))
578 578
579 579 def cleanupnodes(repo, mapping, operation):
580 580 """do common cleanups when old nodes are replaced by new nodes
581 581
582 582 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
583 583 (we might also want to move working directory parent in the future)
584 584
585 585 mapping is {oldnode: [newnode]} or a iterable of nodes if they do not have
586 586 replacements. operation is a string, like "rebase".
587 587 """
588 588 if not util.safehasattr(mapping, 'items'):
589 589 mapping = {n: () for n in mapping}
590 590
591 591 with repo.transaction('cleanup') as tr:
592 592 # Move bookmarks
593 593 bmarks = repo._bookmarks
594 594 bmarkchanges = []
595 595 allnewnodes = [n for ns in mapping.values() for n in ns]
596 596 for oldnode, newnodes in mapping.items():
597 597 oldbmarks = repo.nodebookmarks(oldnode)
598 598 if not oldbmarks:
599 599 continue
600 600 from . import bookmarks # avoid import cycle
601 601 if len(newnodes) > 1:
602 602 # usually a split, take the one with biggest rev number
603 603 newnode = next(repo.set('max(%ln)', newnodes)).node()
604 604 elif len(newnodes) == 0:
605 605 # move bookmark backwards
606 606 roots = list(repo.set('max((::%n) - %ln)', oldnode,
607 607 list(mapping)))
608 608 if roots:
609 609 newnode = roots[0].node()
610 610 else:
611 611 newnode = nullid
612 612 else:
613 613 newnode = newnodes[0]
614 614 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
615 615 (oldbmarks, hex(oldnode), hex(newnode)))
616 616 # Delete divergent bookmarks being parents of related newnodes
617 617 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
618 618 allnewnodes, newnode, oldnode)
619 619 deletenodes = _containsnode(repo, deleterevs)
620 620 for name in oldbmarks:
621 621 bmarkchanges.append((name, newnode))
622 622 for b in bookmarks.divergent2delete(repo, deletenodes, name):
623 623 bmarkchanges.append((b, None))
624 624
625 625 if bmarkchanges:
626 626 bmarks.applychanges(repo, tr, bmarkchanges)
627 627
628 628 # Obsolete or strip nodes
629 629 if obsolete.isenabled(repo, obsolete.createmarkersopt):
630 630 # If a node is already obsoleted, and we want to obsolete it
631 631 # without a successor, skip that obssolete request since it's
632 632 # unnecessary. That's the "if s or not isobs(n)" check below.
633 633 # Also sort the node in topology order, that might be useful for
634 634 # some obsstore logic.
635 635 # NOTE: the filtering and sorting might belong to createmarkers.
636 636 # Unfiltered repo is needed since nodes in mapping might be hidden.
637 637 unfi = repo.unfiltered()
638 638 isobs = unfi.obsstore.successors.__contains__
639 639 torev = unfi.changelog.rev
640 640 sortfunc = lambda ns: torev(ns[0])
641 641 rels = [(unfi[n], tuple(unfi[m] for m in s))
642 642 for n, s in sorted(mapping.items(), key=sortfunc)
643 643 if s or not isobs(n)]
644 644 obsolete.createmarkers(repo, rels, operation=operation)
645 645 else:
646 646 from . import repair # avoid import cycle
647 647 repair.delayedstrip(repo.ui, repo, list(mapping), operation)
648 648
649 649 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
650 650 if opts is None:
651 651 opts = {}
652 652 m = matcher
653 653 if dry_run is None:
654 654 dry_run = opts.get('dry_run')
655 655 if similarity is None:
656 656 similarity = float(opts.get('similarity') or 0)
657 657
658 658 ret = 0
659 659 join = lambda f: os.path.join(prefix, f)
660 660
661 661 wctx = repo[None]
662 662 for subpath in sorted(wctx.substate):
663 663 submatch = matchmod.subdirmatcher(subpath, m)
664 664 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
665 665 sub = wctx.sub(subpath)
666 666 try:
667 667 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
668 668 ret = 1
669 669 except error.LookupError:
670 670 repo.ui.status(_("skipping missing subrepository: %s\n")
671 671 % join(subpath))
672 672
673 673 rejected = []
674 674 def badfn(f, msg):
675 675 if f in m.files():
676 676 m.bad(f, msg)
677 677 rejected.append(f)
678 678
679 679 badmatch = matchmod.badmatch(m, badfn)
680 680 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
681 681 badmatch)
682 682
683 683 unknownset = set(unknown + forgotten)
684 684 toprint = unknownset.copy()
685 685 toprint.update(deleted)
686 686 for abs in sorted(toprint):
687 687 if repo.ui.verbose or not m.exact(abs):
688 688 if abs in unknownset:
689 689 status = _('adding %s\n') % m.uipath(abs)
690 690 else:
691 691 status = _('removing %s\n') % m.uipath(abs)
692 692 repo.ui.status(status)
693 693
694 694 renames = _findrenames(repo, m, added + unknown, removed + deleted,
695 695 similarity)
696 696
697 697 if not dry_run:
698 698 _markchanges(repo, unknown + forgotten, deleted, renames)
699 699
700 700 for f in rejected:
701 701 if f in m.files():
702 702 return 1
703 703 return ret
704 704
705 705 def marktouched(repo, files, similarity=0.0):
706 706 '''Assert that files have somehow been operated upon. files are relative to
707 707 the repo root.'''
708 708 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
709 709 rejected = []
710 710
711 711 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
712 712
713 713 if repo.ui.verbose:
714 714 unknownset = set(unknown + forgotten)
715 715 toprint = unknownset.copy()
716 716 toprint.update(deleted)
717 717 for abs in sorted(toprint):
718 718 if abs in unknownset:
719 719 status = _('adding %s\n') % abs
720 720 else:
721 721 status = _('removing %s\n') % abs
722 722 repo.ui.status(status)
723 723
724 724 renames = _findrenames(repo, m, added + unknown, removed + deleted,
725 725 similarity)
726 726
727 727 _markchanges(repo, unknown + forgotten, deleted, renames)
728 728
729 729 for f in rejected:
730 730 if f in m.files():
731 731 return 1
732 732 return 0
733 733
734 734 def _interestingfiles(repo, matcher):
735 735 '''Walk dirstate with matcher, looking for files that addremove would care
736 736 about.
737 737
738 738 This is different from dirstate.status because it doesn't care about
739 739 whether files are modified or clean.'''
740 740 added, unknown, deleted, removed, forgotten = [], [], [], [], []
741 audit_path = pathutil.pathauditor(repo.root)
741 audit_path = pathutil.pathauditor(repo.root, cached=True)
742 742
743 743 ctx = repo[None]
744 744 dirstate = repo.dirstate
745 745 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
746 746 full=False)
747 747 for abs, st in walkresults.iteritems():
748 748 dstate = dirstate[abs]
749 749 if dstate == '?' and audit_path.check(abs):
750 750 unknown.append(abs)
751 751 elif dstate != 'r' and not st:
752 752 deleted.append(abs)
753 753 elif dstate == 'r' and st:
754 754 forgotten.append(abs)
755 755 # for finding renames
756 756 elif dstate == 'r' and not st:
757 757 removed.append(abs)
758 758 elif dstate == 'a':
759 759 added.append(abs)
760 760
761 761 return added, unknown, deleted, removed, forgotten
762 762
763 763 def _findrenames(repo, matcher, added, removed, similarity):
764 764 '''Find renames from removed files to added ones.'''
765 765 renames = {}
766 766 if similarity > 0:
767 767 for old, new, score in similar.findrenames(repo, added, removed,
768 768 similarity):
769 769 if (repo.ui.verbose or not matcher.exact(old)
770 770 or not matcher.exact(new)):
771 771 repo.ui.status(_('recording removal of %s as rename to %s '
772 772 '(%d%% similar)\n') %
773 773 (matcher.rel(old), matcher.rel(new),
774 774 score * 100))
775 775 renames[new] = old
776 776 return renames
777 777
778 778 def _markchanges(repo, unknown, deleted, renames):
779 779 '''Marks the files in unknown as added, the files in deleted as removed,
780 780 and the files in renames as copied.'''
781 781 wctx = repo[None]
782 782 with repo.wlock():
783 783 wctx.forget(deleted)
784 784 wctx.add(unknown)
785 785 for new, old in renames.iteritems():
786 786 wctx.copy(old, new)
787 787
788 788 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
789 789 """Update the dirstate to reflect the intent of copying src to dst. For
790 790 different reasons it might not end with dst being marked as copied from src.
791 791 """
792 792 origsrc = repo.dirstate.copied(src) or src
793 793 if dst == origsrc: # copying back a copy?
794 794 if repo.dirstate[dst] not in 'mn' and not dryrun:
795 795 repo.dirstate.normallookup(dst)
796 796 else:
797 797 if repo.dirstate[origsrc] == 'a' and origsrc == src:
798 798 if not ui.quiet:
799 799 ui.warn(_("%s has not been committed yet, so no copy "
800 800 "data will be stored for %s.\n")
801 801 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
802 802 if repo.dirstate[dst] in '?r' and not dryrun:
803 803 wctx.add([dst])
804 804 elif not dryrun:
805 805 wctx.copy(origsrc, dst)
806 806
807 807 def readrequires(opener, supported):
808 808 '''Reads and parses .hg/requires and checks if all entries found
809 809 are in the list of supported features.'''
810 810 requirements = set(opener.read("requires").splitlines())
811 811 missings = []
812 812 for r in requirements:
813 813 if r not in supported:
814 814 if not r or not r[0].isalnum():
815 815 raise error.RequirementError(_(".hg/requires file is corrupt"))
816 816 missings.append(r)
817 817 missings.sort()
818 818 if missings:
819 819 raise error.RequirementError(
820 820 _("repository requires features unknown to this Mercurial: %s")
821 821 % " ".join(missings),
822 822 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
823 823 " for more information"))
824 824 return requirements
825 825
826 826 def writerequires(opener, requirements):
827 827 with opener('requires', 'w') as fp:
828 828 for r in sorted(requirements):
829 829 fp.write("%s\n" % r)
830 830
831 831 class filecachesubentry(object):
832 832 def __init__(self, path, stat):
833 833 self.path = path
834 834 self.cachestat = None
835 835 self._cacheable = None
836 836
837 837 if stat:
838 838 self.cachestat = filecachesubentry.stat(self.path)
839 839
840 840 if self.cachestat:
841 841 self._cacheable = self.cachestat.cacheable()
842 842 else:
843 843 # None means we don't know yet
844 844 self._cacheable = None
845 845
846 846 def refresh(self):
847 847 if self.cacheable():
848 848 self.cachestat = filecachesubentry.stat(self.path)
849 849
850 850 def cacheable(self):
851 851 if self._cacheable is not None:
852 852 return self._cacheable
853 853
854 854 # we don't know yet, assume it is for now
855 855 return True
856 856
857 857 def changed(self):
858 858 # no point in going further if we can't cache it
859 859 if not self.cacheable():
860 860 return True
861 861
862 862 newstat = filecachesubentry.stat(self.path)
863 863
864 864 # we may not know if it's cacheable yet, check again now
865 865 if newstat and self._cacheable is None:
866 866 self._cacheable = newstat.cacheable()
867 867
868 868 # check again
869 869 if not self._cacheable:
870 870 return True
871 871
872 872 if self.cachestat != newstat:
873 873 self.cachestat = newstat
874 874 return True
875 875 else:
876 876 return False
877 877
878 878 @staticmethod
879 879 def stat(path):
880 880 try:
881 881 return util.cachestat(path)
882 882 except OSError as e:
883 883 if e.errno != errno.ENOENT:
884 884 raise
885 885
886 886 class filecacheentry(object):
887 887 def __init__(self, paths, stat=True):
888 888 self._entries = []
889 889 for path in paths:
890 890 self._entries.append(filecachesubentry(path, stat))
891 891
892 892 def changed(self):
893 893 '''true if any entry has changed'''
894 894 for entry in self._entries:
895 895 if entry.changed():
896 896 return True
897 897 return False
898 898
899 899 def refresh(self):
900 900 for entry in self._entries:
901 901 entry.refresh()
902 902
903 903 class filecache(object):
904 904 '''A property like decorator that tracks files under .hg/ for updates.
905 905
906 906 Records stat info when called in _filecache.
907 907
908 908 On subsequent calls, compares old stat info with new info, and recreates the
909 909 object when any of the files changes, updating the new stat info in
910 910 _filecache.
911 911
912 912 Mercurial either atomic renames or appends for files under .hg,
913 913 so to ensure the cache is reliable we need the filesystem to be able
914 914 to tell us if a file has been replaced. If it can't, we fallback to
915 915 recreating the object on every call (essentially the same behavior as
916 916 propertycache).
917 917
918 918 '''
919 919 def __init__(self, *paths):
920 920 self.paths = paths
921 921
922 922 def join(self, obj, fname):
923 923 """Used to compute the runtime path of a cached file.
924 924
925 925 Users should subclass filecache and provide their own version of this
926 926 function to call the appropriate join function on 'obj' (an instance
927 927 of the class that its member function was decorated).
928 928 """
929 929 raise NotImplementedError
930 930
931 931 def __call__(self, func):
932 932 self.func = func
933 933 self.name = func.__name__.encode('ascii')
934 934 return self
935 935
936 936 def __get__(self, obj, type=None):
937 937 # if accessed on the class, return the descriptor itself.
938 938 if obj is None:
939 939 return self
940 940 # do we need to check if the file changed?
941 941 if self.name in obj.__dict__:
942 942 assert self.name in obj._filecache, self.name
943 943 return obj.__dict__[self.name]
944 944
945 945 entry = obj._filecache.get(self.name)
946 946
947 947 if entry:
948 948 if entry.changed():
949 949 entry.obj = self.func(obj)
950 950 else:
951 951 paths = [self.join(obj, path) for path in self.paths]
952 952
953 953 # We stat -before- creating the object so our cache doesn't lie if
954 954 # a writer modified between the time we read and stat
955 955 entry = filecacheentry(paths, True)
956 956 entry.obj = self.func(obj)
957 957
958 958 obj._filecache[self.name] = entry
959 959
960 960 obj.__dict__[self.name] = entry.obj
961 961 return entry.obj
962 962
963 963 def __set__(self, obj, value):
964 964 if self.name not in obj._filecache:
965 965 # we add an entry for the missing value because X in __dict__
966 966 # implies X in _filecache
967 967 paths = [self.join(obj, path) for path in self.paths]
968 968 ce = filecacheentry(paths, False)
969 969 obj._filecache[self.name] = ce
970 970 else:
971 971 ce = obj._filecache[self.name]
972 972
973 973 ce.obj = value # update cached copy
974 974 obj.__dict__[self.name] = value # update copy returned by obj.x
975 975
976 976 def __delete__(self, obj):
977 977 try:
978 978 del obj.__dict__[self.name]
979 979 except KeyError:
980 980 raise AttributeError(self.name)
981 981
982 982 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
983 983 if lock is None:
984 984 raise error.LockInheritanceContractViolation(
985 985 'lock can only be inherited while held')
986 986 if environ is None:
987 987 environ = {}
988 988 with lock.inherit() as locker:
989 989 environ[envvar] = locker
990 990 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
991 991
992 992 def wlocksub(repo, cmd, *args, **kwargs):
993 993 """run cmd as a subprocess that allows inheriting repo's wlock
994 994
995 995 This can only be called while the wlock is held. This takes all the
996 996 arguments that ui.system does, and returns the exit code of the
997 997 subprocess."""
998 998 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
999 999 **kwargs)
1000 1000
1001 1001 def gdinitconfig(ui):
1002 1002 """helper function to know if a repo should be created as general delta
1003 1003 """
1004 1004 # experimental config: format.generaldelta
1005 1005 return (ui.configbool('format', 'generaldelta')
1006 1006 or ui.configbool('format', 'usegeneraldelta'))
1007 1007
1008 1008 def gddeltaconfig(ui):
1009 1009 """helper function to know if incoming delta should be optimised
1010 1010 """
1011 1011 # experimental config: format.generaldelta
1012 1012 return ui.configbool('format', 'generaldelta')
1013 1013
1014 1014 class simplekeyvaluefile(object):
1015 1015 """A simple file with key=value lines
1016 1016
1017 1017 Keys must be alphanumerics and start with a letter, values must not
1018 1018 contain '\n' characters"""
1019 1019 firstlinekey = '__firstline'
1020 1020
1021 1021 def __init__(self, vfs, path, keys=None):
1022 1022 self.vfs = vfs
1023 1023 self.path = path
1024 1024
1025 1025 def read(self, firstlinenonkeyval=False):
1026 1026 """Read the contents of a simple key-value file
1027 1027
1028 1028 'firstlinenonkeyval' indicates whether the first line of file should
1029 1029 be treated as a key-value pair or reuturned fully under the
1030 1030 __firstline key."""
1031 1031 lines = self.vfs.readlines(self.path)
1032 1032 d = {}
1033 1033 if firstlinenonkeyval:
1034 1034 if not lines:
1035 1035 e = _("empty simplekeyvalue file")
1036 1036 raise error.CorruptedState(e)
1037 1037 # we don't want to include '\n' in the __firstline
1038 1038 d[self.firstlinekey] = lines[0][:-1]
1039 1039 del lines[0]
1040 1040
1041 1041 try:
1042 1042 # the 'if line.strip()' part prevents us from failing on empty
1043 1043 # lines which only contain '\n' therefore are not skipped
1044 1044 # by 'if line'
1045 1045 updatedict = dict(line[:-1].split('=', 1) for line in lines
1046 1046 if line.strip())
1047 1047 if self.firstlinekey in updatedict:
1048 1048 e = _("%r can't be used as a key")
1049 1049 raise error.CorruptedState(e % self.firstlinekey)
1050 1050 d.update(updatedict)
1051 1051 except ValueError as e:
1052 1052 raise error.CorruptedState(str(e))
1053 1053 return d
1054 1054
1055 1055 def write(self, data, firstline=None):
1056 1056 """Write key=>value mapping to a file
1057 1057 data is a dict. Keys must be alphanumerical and start with a letter.
1058 1058 Values must not contain newline characters.
1059 1059
1060 1060 If 'firstline' is not None, it is written to file before
1061 1061 everything else, as it is, not in a key=value form"""
1062 1062 lines = []
1063 1063 if firstline is not None:
1064 1064 lines.append('%s\n' % firstline)
1065 1065
1066 1066 for k, v in data.items():
1067 1067 if k == self.firstlinekey:
1068 1068 e = "key name '%s' is reserved" % self.firstlinekey
1069 1069 raise error.ProgrammingError(e)
1070 1070 if not k[0].isalpha():
1071 1071 e = "keys must start with a letter in a key-value file"
1072 1072 raise error.ProgrammingError(e)
1073 1073 if not k.isalnum():
1074 1074 e = "invalid key name in a simple key-value file"
1075 1075 raise error.ProgrammingError(e)
1076 1076 if '\n' in v:
1077 1077 e = "invalid value in a simple key-value file"
1078 1078 raise error.ProgrammingError(e)
1079 1079 lines.append("%s=%s\n" % (k, v))
1080 1080 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1081 1081 fp.write(''.join(lines))
1082 1082
1083 1083 _reportobsoletedsource = [
1084 1084 'debugobsolete',
1085 1085 'pull',
1086 1086 'push',
1087 1087 'serve',
1088 1088 'unbundle',
1089 1089 ]
1090 1090
1091 1091 def registersummarycallback(repo, otr, txnname=''):
1092 1092 """register a callback to issue a summary after the transaction is closed
1093 1093 """
1094 1094 for source in _reportobsoletedsource:
1095 1095 if txnname.startswith(source):
1096 1096 reporef = weakref.ref(repo)
1097 1097 def reportsummary(tr):
1098 1098 """the actual callback reporting the summary"""
1099 1099 repo = reporef()
1100 1100 obsoleted = obsutil.getobsoleted(repo, tr)
1101 1101 if obsoleted:
1102 1102 repo.ui.status(_('obsoleted %i changesets\n')
1103 1103 % len(obsoleted))
1104 1104 otr.addpostclose('00-txnreport', reportsummary)
1105 1105 break
@@ -1,642 +1,647
1 1 # vfs.py - Mercurial 'vfs' classes
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from __future__ import absolute_import
8 8
9 9 import contextlib
10 10 import errno
11 11 import os
12 12 import shutil
13 13 import stat
14 14 import tempfile
15 15 import threading
16 16
17 17 from .i18n import _
18 18 from . import (
19 19 error,
20 20 pathutil,
21 21 pycompat,
22 22 util,
23 23 )
24 24
25 25 def _avoidambig(path, oldstat):
26 26 """Avoid file stat ambiguity forcibly
27 27
28 28 This function causes copying ``path`` file, if it is owned by
29 29 another (see issue5418 and issue5584 for detail).
30 30 """
31 31 def checkandavoid():
32 32 newstat = util.filestat.frompath(path)
33 33 # return whether file stat ambiguity is (already) avoided
34 34 return (not newstat.isambig(oldstat) or
35 35 newstat.avoidambig(path, oldstat))
36 36 if not checkandavoid():
37 37 # simply copy to change owner of path to get privilege to
38 38 # advance mtime (see issue5418)
39 39 util.rename(util.mktempcopy(path), path)
40 40 checkandavoid()
41 41
42 42 class abstractvfs(object):
43 43 """Abstract base class; cannot be instantiated"""
44 44
45 45 def __init__(self, *args, **kwargs):
46 46 '''Prevent instantiation; don't call this from subclasses.'''
47 47 raise NotImplementedError('attempted instantiating ' + str(type(self)))
48 48
49 49 def tryread(self, path):
50 50 '''gracefully return an empty string for missing files'''
51 51 try:
52 52 return self.read(path)
53 53 except IOError as inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 return ""
57 57
58 58 def tryreadlines(self, path, mode='rb'):
59 59 '''gracefully return an empty array for missing files'''
60 60 try:
61 61 return self.readlines(path, mode=mode)
62 62 except IOError as inst:
63 63 if inst.errno != errno.ENOENT:
64 64 raise
65 65 return []
66 66
67 67 @util.propertycache
68 68 def open(self):
69 69 '''Open ``path`` file, which is relative to vfs root.
70 70
71 71 Newly created directories are marked as "not to be indexed by
72 72 the content indexing service", if ``notindexed`` is specified
73 73 for "write" mode access.
74 74 '''
75 75 return self.__call__
76 76
77 77 def read(self, path):
78 78 with self(path, 'rb') as fp:
79 79 return fp.read()
80 80
81 81 def readlines(self, path, mode='rb'):
82 82 with self(path, mode=mode) as fp:
83 83 return fp.readlines()
84 84
85 85 def write(self, path, data, backgroundclose=False):
86 86 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
87 87 return fp.write(data)
88 88
89 89 def writelines(self, path, data, mode='wb', notindexed=False):
90 90 with self(path, mode=mode, notindexed=notindexed) as fp:
91 91 return fp.writelines(data)
92 92
93 93 def append(self, path, data):
94 94 with self(path, 'ab') as fp:
95 95 return fp.write(data)
96 96
97 97 def basename(self, path):
98 98 """return base element of a path (as os.path.basename would do)
99 99
100 100 This exists to allow handling of strange encoding if needed."""
101 101 return os.path.basename(path)
102 102
103 103 def chmod(self, path, mode):
104 104 return os.chmod(self.join(path), mode)
105 105
106 106 def dirname(self, path):
107 107 """return dirname element of a path (as os.path.dirname would do)
108 108
109 109 This exists to allow handling of strange encoding if needed."""
110 110 return os.path.dirname(path)
111 111
112 112 def exists(self, path=None):
113 113 return os.path.exists(self.join(path))
114 114
115 115 def fstat(self, fp):
116 116 return util.fstat(fp)
117 117
118 118 def isdir(self, path=None):
119 119 return os.path.isdir(self.join(path))
120 120
121 121 def isfile(self, path=None):
122 122 return os.path.isfile(self.join(path))
123 123
124 124 def islink(self, path=None):
125 125 return os.path.islink(self.join(path))
126 126
127 127 def isfileorlink(self, path=None):
128 128 '''return whether path is a regular file or a symlink
129 129
130 130 Unlike isfile, this doesn't follow symlinks.'''
131 131 try:
132 132 st = self.lstat(path)
133 133 except OSError:
134 134 return False
135 135 mode = st.st_mode
136 136 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
137 137
138 138 def reljoin(self, *paths):
139 139 """join various elements of a path together (as os.path.join would do)
140 140
141 141 The vfs base is not injected so that path stay relative. This exists
142 142 to allow handling of strange encoding if needed."""
143 143 return os.path.join(*paths)
144 144
145 145 def split(self, path):
146 146 """split top-most element of a path (as os.path.split would do)
147 147
148 148 This exists to allow handling of strange encoding if needed."""
149 149 return os.path.split(path)
150 150
151 151 def lexists(self, path=None):
152 152 return os.path.lexists(self.join(path))
153 153
154 154 def lstat(self, path=None):
155 155 return os.lstat(self.join(path))
156 156
157 157 def listdir(self, path=None):
158 158 return os.listdir(self.join(path))
159 159
160 160 def makedir(self, path=None, notindexed=True):
161 161 return util.makedir(self.join(path), notindexed)
162 162
163 163 def makedirs(self, path=None, mode=None):
164 164 return util.makedirs(self.join(path), mode)
165 165
166 166 def makelock(self, info, path):
167 167 return util.makelock(info, self.join(path))
168 168
169 169 def mkdir(self, path=None):
170 170 return os.mkdir(self.join(path))
171 171
172 172 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
173 173 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
174 174 dir=self.join(dir), text=text)
175 175 dname, fname = util.split(name)
176 176 if dir:
177 177 return fd, os.path.join(dir, fname)
178 178 else:
179 179 return fd, fname
180 180
181 181 def readdir(self, path=None, stat=None, skip=None):
182 182 return util.listdir(self.join(path), stat, skip)
183 183
184 184 def readlock(self, path):
185 185 return util.readlock(self.join(path))
186 186
187 187 def rename(self, src, dst, checkambig=False):
188 188 """Rename from src to dst
189 189
190 190 checkambig argument is used with util.filestat, and is useful
191 191 only if destination file is guarded by any lock
192 192 (e.g. repo.lock or repo.wlock).
193 193
194 194 To avoid file stat ambiguity forcibly, checkambig=True involves
195 195 copying ``src`` file, if it is owned by another. Therefore, use
196 196 checkambig=True only in limited cases (see also issue5418 and
197 197 issue5584 for detail).
198 198 """
199 199 srcpath = self.join(src)
200 200 dstpath = self.join(dst)
201 201 oldstat = checkambig and util.filestat.frompath(dstpath)
202 202 if oldstat and oldstat.stat:
203 203 ret = util.rename(srcpath, dstpath)
204 204 _avoidambig(dstpath, oldstat)
205 205 return ret
206 206 return util.rename(srcpath, dstpath)
207 207
208 208 def readlink(self, path):
209 209 return os.readlink(self.join(path))
210 210
211 211 def removedirs(self, path=None):
212 212 """Remove a leaf directory and all empty intermediate ones
213 213 """
214 214 return util.removedirs(self.join(path))
215 215
216 216 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
217 217 """Remove a directory tree recursively
218 218
219 219 If ``forcibly``, this tries to remove READ-ONLY files, too.
220 220 """
221 221 if forcibly:
222 222 def onerror(function, path, excinfo):
223 223 if function is not os.remove:
224 224 raise
225 225 # read-only files cannot be unlinked under Windows
226 226 s = os.stat(path)
227 227 if (s.st_mode & stat.S_IWRITE) != 0:
228 228 raise
229 229 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
230 230 os.remove(path)
231 231 else:
232 232 onerror = None
233 233 return shutil.rmtree(self.join(path),
234 234 ignore_errors=ignore_errors, onerror=onerror)
235 235
236 236 def setflags(self, path, l, x):
237 237 return util.setflags(self.join(path), l, x)
238 238
239 239 def stat(self, path=None):
240 240 return os.stat(self.join(path))
241 241
242 242 def unlink(self, path=None):
243 243 return util.unlink(self.join(path))
244 244
245 245 def tryunlink(self, path=None):
246 246 """Attempt to remove a file, ignoring missing file errors."""
247 247 util.tryunlink(self.join(path))
248 248
249 249 def unlinkpath(self, path=None, ignoremissing=False):
250 250 return util.unlinkpath(self.join(path), ignoremissing=ignoremissing)
251 251
252 252 def utime(self, path=None, t=None):
253 253 return os.utime(self.join(path), t)
254 254
255 255 def walk(self, path=None, onerror=None):
256 256 """Yield (dirpath, dirs, files) tuple for each directories under path
257 257
258 258 ``dirpath`` is relative one from the root of this vfs. This
259 259 uses ``os.sep`` as path separator, even you specify POSIX
260 260 style ``path``.
261 261
262 262 "The root of this vfs" is represented as empty ``dirpath``.
263 263 """
264 264 root = os.path.normpath(self.join(None))
265 265 # when dirpath == root, dirpath[prefixlen:] becomes empty
266 266 # because len(dirpath) < prefixlen.
267 267 prefixlen = len(pathutil.normasprefix(root))
268 268 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
269 269 yield (dirpath[prefixlen:], dirs, files)
270 270
271 271 @contextlib.contextmanager
272 272 def backgroundclosing(self, ui, expectedcount=-1):
273 273 """Allow files to be closed asynchronously.
274 274
275 275 When this context manager is active, ``backgroundclose`` can be passed
276 276 to ``__call__``/``open`` to result in the file possibly being closed
277 277 asynchronously, on a background thread.
278 278 """
279 279 # This is an arbitrary restriction and could be changed if we ever
280 280 # have a use case.
281 281 vfs = getattr(self, 'vfs', self)
282 282 if getattr(vfs, '_backgroundfilecloser', None):
283 283 raise error.Abort(
284 284 _('can only have 1 active background file closer'))
285 285
286 286 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
287 287 try:
288 288 vfs._backgroundfilecloser = bfc
289 289 yield bfc
290 290 finally:
291 291 vfs._backgroundfilecloser = None
292 292
293 293 class vfs(abstractvfs):
294 294 '''Operate files relative to a base directory
295 295
296 296 This class is used to hide the details of COW semantics and
297 297 remote file access from higher level code.
298
299 'cacheaudited' should be enabled only if (a) vfs object is short-lived, or
300 (b) the base directory is managed by hg and considered sort-of append-only.
301 See pathutil.pathauditor() for details.
298 302 '''
299 def __init__(self, base, audit=True, expandpath=False, realpath=False):
303 def __init__(self, base, audit=True, cacheaudited=False, expandpath=False,
304 realpath=False):
300 305 if expandpath:
301 306 base = util.expandpath(base)
302 307 if realpath:
303 308 base = os.path.realpath(base)
304 309 self.base = base
305 310 self._audit = audit
306 311 if audit:
307 self.audit = pathutil.pathauditor(self.base)
312 self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
308 313 else:
309 314 self.audit = (lambda path, mode=None: True)
310 315 self.createmode = None
311 316 self._trustnlink = None
312 317
313 318 @util.propertycache
314 319 def _cansymlink(self):
315 320 return util.checklink(self.base)
316 321
317 322 @util.propertycache
318 323 def _chmod(self):
319 324 return util.checkexec(self.base)
320 325
321 326 def _fixfilemode(self, name):
322 327 if self.createmode is None or not self._chmod:
323 328 return
324 329 os.chmod(name, self.createmode & 0o666)
325 330
326 331 def __call__(self, path, mode="r", text=False, atomictemp=False,
327 332 notindexed=False, backgroundclose=False, checkambig=False,
328 333 auditpath=True):
329 334 '''Open ``path`` file, which is relative to vfs root.
330 335
331 336 Newly created directories are marked as "not to be indexed by
332 337 the content indexing service", if ``notindexed`` is specified
333 338 for "write" mode access.
334 339
335 340 If ``backgroundclose`` is passed, the file may be closed asynchronously.
336 341 It can only be used if the ``self.backgroundclosing()`` context manager
337 342 is active. This should only be specified if the following criteria hold:
338 343
339 344 1. There is a potential for writing thousands of files. Unless you
340 345 are writing thousands of files, the performance benefits of
341 346 asynchronously closing files is not realized.
342 347 2. Files are opened exactly once for the ``backgroundclosing``
343 348 active duration and are therefore free of race conditions between
344 349 closing a file on a background thread and reopening it. (If the
345 350 file were opened multiple times, there could be unflushed data
346 351 because the original file handle hasn't been flushed/closed yet.)
347 352
348 353 ``checkambig`` argument is passed to atomictemplfile (valid
349 354 only for writing), and is useful only if target file is
350 355 guarded by any lock (e.g. repo.lock or repo.wlock).
351 356
352 357 To avoid file stat ambiguity forcibly, checkambig=True involves
353 358 copying ``path`` file opened in "append" mode (e.g. for
354 359 truncation), if it is owned by another. Therefore, use
355 360 combination of append mode and checkambig=True only in limited
356 361 cases (see also issue5418 and issue5584 for detail).
357 362 '''
358 363 if auditpath:
359 364 if self._audit:
360 365 r = util.checkosfilename(path)
361 366 if r:
362 367 raise error.Abort("%s: %r" % (r, path))
363 368 self.audit(path, mode=mode)
364 369 f = self.join(path)
365 370
366 371 if not text and "b" not in mode:
367 372 mode += "b" # for that other OS
368 373
369 374 nlink = -1
370 375 if mode not in ('r', 'rb'):
371 376 dirname, basename = util.split(f)
372 377 # If basename is empty, then the path is malformed because it points
373 378 # to a directory. Let the posixfile() call below raise IOError.
374 379 if basename:
375 380 if atomictemp:
376 381 util.makedirs(dirname, self.createmode, notindexed)
377 382 return util.atomictempfile(f, mode, self.createmode,
378 383 checkambig=checkambig)
379 384 try:
380 385 if 'w' in mode:
381 386 util.unlink(f)
382 387 nlink = 0
383 388 else:
384 389 # nlinks() may behave differently for files on Windows
385 390 # shares if the file is open.
386 391 with util.posixfile(f):
387 392 nlink = util.nlinks(f)
388 393 if nlink < 1:
389 394 nlink = 2 # force mktempcopy (issue1922)
390 395 except (OSError, IOError) as e:
391 396 if e.errno != errno.ENOENT:
392 397 raise
393 398 nlink = 0
394 399 util.makedirs(dirname, self.createmode, notindexed)
395 400 if nlink > 0:
396 401 if self._trustnlink is None:
397 402 self._trustnlink = nlink > 1 or util.checknlink(f)
398 403 if nlink > 1 or not self._trustnlink:
399 404 util.rename(util.mktempcopy(f), f)
400 405 fp = util.posixfile(f, mode)
401 406 if nlink == 0:
402 407 self._fixfilemode(f)
403 408
404 409 if checkambig:
405 410 if mode in ('r', 'rb'):
406 411 raise error.Abort(_('implementation error: mode %s is not'
407 412 ' valid for checkambig=True') % mode)
408 413 fp = checkambigatclosing(fp)
409 414
410 415 if backgroundclose:
411 416 if not self._backgroundfilecloser:
412 417 raise error.Abort(_('backgroundclose can only be used when a '
413 418 'backgroundclosing context manager is active')
414 419 )
415 420
416 421 fp = delayclosedfile(fp, self._backgroundfilecloser)
417 422
418 423 return fp
419 424
420 425 def symlink(self, src, dst):
421 426 self.audit(dst)
422 427 linkname = self.join(dst)
423 428 util.tryunlink(linkname)
424 429
425 430 util.makedirs(os.path.dirname(linkname), self.createmode)
426 431
427 432 if self._cansymlink:
428 433 try:
429 434 os.symlink(src, linkname)
430 435 except OSError as err:
431 436 raise OSError(err.errno, _('could not symlink to %r: %s') %
432 437 (src, err.strerror), linkname)
433 438 else:
434 439 self.write(dst, src)
435 440
436 441 def join(self, path, *insidef):
437 442 if path:
438 443 return os.path.join(self.base, path, *insidef)
439 444 else:
440 445 return self.base
441 446
442 447 opener = vfs
443 448
444 449 class proxyvfs(object):
445 450 def __init__(self, vfs):
446 451 self.vfs = vfs
447 452
448 453 @property
449 454 def options(self):
450 455 return self.vfs.options
451 456
452 457 @options.setter
453 458 def options(self, value):
454 459 self.vfs.options = value
455 460
456 461 class filtervfs(abstractvfs, proxyvfs):
457 462 '''Wrapper vfs for filtering filenames with a function.'''
458 463
459 464 def __init__(self, vfs, filter):
460 465 proxyvfs.__init__(self, vfs)
461 466 self._filter = filter
462 467
463 468 def __call__(self, path, *args, **kwargs):
464 469 return self.vfs(self._filter(path), *args, **kwargs)
465 470
466 471 def join(self, path, *insidef):
467 472 if path:
468 473 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
469 474 else:
470 475 return self.vfs.join(path)
471 476
472 477 filteropener = filtervfs
473 478
474 479 class readonlyvfs(abstractvfs, proxyvfs):
475 480 '''Wrapper vfs preventing any writing.'''
476 481
477 482 def __init__(self, vfs):
478 483 proxyvfs.__init__(self, vfs)
479 484
480 485 def __call__(self, path, mode='r', *args, **kw):
481 486 if mode not in ('r', 'rb'):
482 487 raise error.Abort(_('this vfs is read only'))
483 488 return self.vfs(path, mode, *args, **kw)
484 489
485 490 def join(self, path, *insidef):
486 491 return self.vfs.join(path, *insidef)
487 492
488 493 class closewrapbase(object):
489 494 """Base class of wrapper, which hooks closing
490 495
491 496 Do not instantiate outside of the vfs layer.
492 497 """
493 498 def __init__(self, fh):
494 499 object.__setattr__(self, r'_origfh', fh)
495 500
496 501 def __getattr__(self, attr):
497 502 return getattr(self._origfh, attr)
498 503
499 504 def __setattr__(self, attr, value):
500 505 return setattr(self._origfh, attr, value)
501 506
502 507 def __delattr__(self, attr):
503 508 return delattr(self._origfh, attr)
504 509
505 510 def __enter__(self):
506 511 return self._origfh.__enter__()
507 512
508 513 def __exit__(self, exc_type, exc_value, exc_tb):
509 514 raise NotImplementedError('attempted instantiating ' + str(type(self)))
510 515
511 516 def close(self):
512 517 raise NotImplementedError('attempted instantiating ' + str(type(self)))
513 518
514 519 class delayclosedfile(closewrapbase):
515 520 """Proxy for a file object whose close is delayed.
516 521
517 522 Do not instantiate outside of the vfs layer.
518 523 """
519 524 def __init__(self, fh, closer):
520 525 super(delayclosedfile, self).__init__(fh)
521 526 object.__setattr__(self, r'_closer', closer)
522 527
523 528 def __exit__(self, exc_type, exc_value, exc_tb):
524 529 self._closer.close(self._origfh)
525 530
526 531 def close(self):
527 532 self._closer.close(self._origfh)
528 533
529 534 class backgroundfilecloser(object):
530 535 """Coordinates background closing of file handles on multiple threads."""
531 536 def __init__(self, ui, expectedcount=-1):
532 537 self._running = False
533 538 self._entered = False
534 539 self._threads = []
535 540 self._threadexception = None
536 541
537 542 # Only Windows/NTFS has slow file closing. So only enable by default
538 543 # on that platform. But allow to be enabled elsewhere for testing.
539 544 defaultenabled = pycompat.osname == 'nt'
540 545 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
541 546
542 547 if not enabled:
543 548 return
544 549
545 550 # There is overhead to starting and stopping the background threads.
546 551 # Don't do background processing unless the file count is large enough
547 552 # to justify it.
548 553 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount')
549 554 # FUTURE dynamically start background threads after minfilecount closes.
550 555 # (We don't currently have any callers that don't know their file count)
551 556 if expectedcount > 0 and expectedcount < minfilecount:
552 557 return
553 558
554 559 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue')
555 560 threadcount = ui.configint('worker', 'backgroundclosethreadcount')
556 561
557 562 ui.debug('starting %d threads for background file closing\n' %
558 563 threadcount)
559 564
560 565 self._queue = util.queue(maxsize=maxqueue)
561 566 self._running = True
562 567
563 568 for i in range(threadcount):
564 569 t = threading.Thread(target=self._worker, name='backgroundcloser')
565 570 self._threads.append(t)
566 571 t.start()
567 572
568 573 def __enter__(self):
569 574 self._entered = True
570 575 return self
571 576
572 577 def __exit__(self, exc_type, exc_value, exc_tb):
573 578 self._running = False
574 579
575 580 # Wait for threads to finish closing so open files don't linger for
576 581 # longer than lifetime of context manager.
577 582 for t in self._threads:
578 583 t.join()
579 584
580 585 def _worker(self):
581 586 """Main routine for worker thread."""
582 587 while True:
583 588 try:
584 589 fh = self._queue.get(block=True, timeout=0.100)
585 590 # Need to catch or the thread will terminate and
586 591 # we could orphan file descriptors.
587 592 try:
588 593 fh.close()
589 594 except Exception as e:
590 595 # Stash so can re-raise from main thread later.
591 596 self._threadexception = e
592 597 except util.empty:
593 598 if not self._running:
594 599 break
595 600
596 601 def close(self, fh):
597 602 """Schedule a file for closing."""
598 603 if not self._entered:
599 604 raise error.Abort(_('can only call close() when context manager '
600 605 'active'))
601 606
602 607 # If a background thread encountered an exception, raise now so we fail
603 608 # fast. Otherwise we may potentially go on for minutes until the error
604 609 # is acted on.
605 610 if self._threadexception:
606 611 e = self._threadexception
607 612 self._threadexception = None
608 613 raise e
609 614
610 615 # If we're not actively running, close synchronously.
611 616 if not self._running:
612 617 fh.close()
613 618 return
614 619
615 620 self._queue.put(fh, block=True, timeout=None)
616 621
617 622 class checkambigatclosing(closewrapbase):
618 623 """Proxy for a file object, to avoid ambiguity of file stat
619 624
620 625 See also util.filestat for detail about "ambiguity of file stat".
621 626
622 627 This proxy is useful only if the target file is guarded by any
623 628 lock (e.g. repo.lock or repo.wlock)
624 629
625 630 Do not instantiate outside of the vfs layer.
626 631 """
627 632 def __init__(self, fh):
628 633 super(checkambigatclosing, self).__init__(fh)
629 634 object.__setattr__(self, r'_oldstat', util.filestat.frompath(fh.name))
630 635
631 636 def _checkambig(self):
632 637 oldstat = self._oldstat
633 638 if oldstat.stat:
634 639 _avoidambig(self._origfh.name, oldstat)
635 640
636 641 def __exit__(self, exc_type, exc_value, exc_tb):
637 642 self._origfh.__exit__(exc_type, exc_value, exc_tb)
638 643 self._checkambig()
639 644
640 645 def close(self):
641 646 self._origfh.close()
642 647 self._checkambig()
@@ -1,232 +1,231
1 1 $ hg init
2 2
3 3 audit of .hg
4 4
5 5 $ hg add .hg/00changelog.i
6 6 abort: path contains illegal component: .hg/00changelog.i (glob)
7 7 [255]
8 8
9 9 #if symlink
10 10
11 11 Symlinks
12 12
13 13 $ mkdir a
14 14 $ echo a > a/a
15 15 $ hg ci -Ama
16 16 adding a/a
17 17 $ ln -s a b
18 18 $ echo b > a/b
19 19 $ hg add b/b
20 20 abort: path 'b/b' traverses symbolic link 'b' (glob)
21 21 [255]
22 22 $ hg add b
23 23
24 24 should still fail - maybe
25 25
26 26 $ hg add b/b
27 27 abort: path 'b/b' traverses symbolic link 'b' (glob)
28 28 [255]
29 29
30 30 $ hg commit -m 'add symlink b'
31 31
32 32
33 33 Test symlink traversing when accessing history:
34 34 -----------------------------------------------
35 35
36 36 (build a changeset where the path exists as a directory)
37 37
38 38 $ hg up 0
39 39 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
40 40 $ mkdir b
41 41 $ echo c > b/a
42 42 $ hg add b/a
43 43 $ hg ci -m 'add directory b'
44 44 created new head
45 45
46 46 Test that hg cat does not do anything wrong the working copy has 'b' as directory
47 47
48 48 $ hg cat b/a
49 49 c
50 50 $ hg cat -r "desc(directory)" b/a
51 51 c
52 52 $ hg cat -r "desc(symlink)" b/a
53 53 b/a: no such file in rev bc151a1f53bd
54 54 [1]
55 55
56 56 Test that hg cat does not do anything wrong the working copy has 'b' as a symlink (issue4749)
57 57
58 58 $ hg up 'desc(symlink)'
59 59 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
60 60 $ hg cat b/a
61 61 b/a: no such file in rev bc151a1f53bd
62 62 [1]
63 63 $ hg cat -r "desc(directory)" b/a
64 64 c
65 65 $ hg cat -r "desc(symlink)" b/a
66 66 b/a: no such file in rev bc151a1f53bd
67 67 [1]
68 68
69 69 #endif
70 70
71 71
72 72 unbundle tampered bundle
73 73
74 74 $ hg init target
75 75 $ cd target
76 76 $ hg unbundle "$TESTDIR/bundles/tampered.hg"
77 77 adding changesets
78 78 adding manifests
79 79 adding file changes
80 80 added 5 changesets with 6 changes to 6 files (+4 heads)
81 81 (run 'hg heads' to see heads, 'hg merge' to merge)
82 82
83 83 attack .hg/test
84 84
85 85 $ hg manifest -r0
86 86 .hg/test
87 87 $ hg update -Cr0
88 88 abort: path contains illegal component: .hg/test (glob)
89 89 [255]
90 90
91 91 attack foo/.hg/test
92 92
93 93 $ hg manifest -r1
94 94 foo/.hg/test
95 95 $ hg update -Cr1
96 96 abort: path 'foo/.hg/test' is inside nested repo 'foo' (glob)
97 97 [255]
98 98
99 99 attack back/test where back symlinks to ..
100 100
101 101 $ hg manifest -r2
102 102 back
103 103 back/test
104 104 #if symlink
105 105 $ hg update -Cr2
106 106 abort: path 'back/test' traverses symbolic link 'back'
107 107 [255]
108 108 #else
109 109 ('back' will be a file and cause some other system specific error)
110 110 $ hg update -Cr2
111 111 abort: * (glob)
112 112 [255]
113 113 #endif
114 114
115 115 attack ../test
116 116
117 117 $ hg manifest -r3
118 118 ../test
119 119 $ hg update -Cr3
120 120 abort: path contains illegal component: ../test (glob)
121 121 [255]
122 122
123 123 attack /tmp/test
124 124
125 125 $ hg manifest -r4
126 126 /tmp/test
127 127 $ hg update -Cr4
128 128 abort: path contains illegal component: /tmp/test (glob)
129 129 [255]
130 130
131 131 $ cd ..
132 132
133 133 Test symlink traversal on merge:
134 134 --------------------------------
135 135
136 136 #if symlink
137 137
138 138 set up symlink hell
139 139
140 140 $ mkdir merge-symlink-out
141 141 $ hg init merge-symlink
142 142 $ cd merge-symlink
143 143 $ touch base
144 144 $ hg commit -qAm base
145 145 $ ln -s ../merge-symlink-out a
146 146 $ hg commit -qAm 'symlink a -> ../merge-symlink-out'
147 147 $ hg up -q 0
148 148 $ mkdir a
149 149 $ touch a/poisoned
150 150 $ hg commit -qAm 'file a/poisoned'
151 151 $ hg log -G -T '{rev}: {desc}\n'
152 152 @ 2: file a/poisoned
153 153 |
154 154 | o 1: symlink a -> ../merge-symlink-out
155 155 |/
156 156 o 0: base
157 157
158 158
159 159 try trivial merge
160 160
161 161 $ hg up -qC 1
162 162 $ hg merge 2
163 163 abort: path 'a/poisoned' traverses symbolic link 'a'
164 164 [255]
165 165
166 166 try rebase onto other revision: cache of audited paths should be discarded,
167 167 and the rebase should fail (issue5628)
168 168
169 169 $ hg up -qC 2
170 170 $ hg rebase -s 2 -d 1 --config extensions.rebase=
171 171 rebasing 2:e73c21d6b244 "file a/poisoned" (tip)
172 saved backup bundle to * (glob)
172 abort: path 'a/poisoned' traverses symbolic link 'a'
173 [255]
173 174 $ ls ../merge-symlink-out
174 poisoned
175 175
176 176 $ cd ..
177 177
178 178 Test symlink traversal on update:
179 179 ---------------------------------
180 180
181 181 $ mkdir update-symlink-out
182 182 $ hg init update-symlink
183 183 $ cd update-symlink
184 184 $ ln -s ../update-symlink-out a
185 185 $ hg commit -qAm 'symlink a -> ../update-symlink-out'
186 186 $ hg rm a
187 187 $ mkdir a && touch a/b
188 188 $ hg ci -qAm 'file a/b' a/b
189 189 $ hg up -qC 0
190 190 $ hg rm a
191 191 $ mkdir a && touch a/c
192 192 $ hg ci -qAm 'rm a, file a/c'
193 193 $ hg log -G -T '{rev}: {desc}\n'
194 194 @ 2: rm a, file a/c
195 195 |
196 196 | o 1: file a/b
197 197 |/
198 198 o 0: symlink a -> ../update-symlink-out
199 199
200 200
201 201 try linear update where symlink already exists:
202 202
203 203 $ hg up -qC 0
204 204 $ hg up 1
205 205 abort: path 'a/b' traverses symbolic link 'a'
206 206 [255]
207 207
208 208 try linear update including symlinked directory and its content: paths are
209 209 audited first by calculateupdates(), where no symlink is created so both
210 210 'a' and 'a/b' are taken as good paths. still applyupdates() should fail.
211 211
212 212 $ hg up -qC null
213 213 $ hg up 1
214 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
214 abort: path 'a/b' traverses symbolic link 'a'
215 [255]
215 216 $ ls ../update-symlink-out
216 b
217 $ rm ../update-symlink-out/b
218 217
219 218 try branch update replacing directory with symlink, and its content: the
220 219 path 'a' is audited as a directory first, which should be audited again as
221 220 a symlink.
222 221
223 222 $ rm -f a
224 223 $ hg up -qC 2
225 224 $ hg up 1
226 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
225 abort: path 'a/b' traverses symbolic link 'a'
226 [255]
227 227 $ ls ../update-symlink-out
228 b
229 228
230 229 $ cd ..
231 230
232 231 #endif
@@ -1,987 +1,987
1 1 #if windows
2 2 $ PYTHONPATH="$TESTDIR/../contrib;$PYTHONPATH"
3 3 #else
4 4 $ PYTHONPATH="$TESTDIR/../contrib:$PYTHONPATH"
5 5 #endif
6 6 $ export PYTHONPATH
7 7
8 8 typical client does not want echo-back messages, so test without it:
9 9
10 10 $ grep -v '^promptecho ' < $HGRCPATH >> $HGRCPATH.new
11 11 $ mv $HGRCPATH.new $HGRCPATH
12 12
13 13 $ hg init repo
14 14 $ cd repo
15 15
16 16 >>> from __future__ import print_function
17 17 >>> from hgclient import readchannel, runcommand, check
18 18 >>> @check
19 19 ... def hellomessage(server):
20 20 ... ch, data = readchannel(server)
21 21 ... print('%c, %r' % (ch, data))
22 22 ... # run an arbitrary command to make sure the next thing the server
23 23 ... # sends isn't part of the hello message
24 24 ... runcommand(server, ['id'])
25 25 o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
26 26 *** runcommand id
27 27 000000000000 tip
28 28
29 29 >>> from hgclient import check
30 30 >>> @check
31 31 ... def unknowncommand(server):
32 32 ... server.stdin.write('unknowncommand\n')
33 33 abort: unknown command unknowncommand
34 34
35 35 >>> from hgclient import readchannel, runcommand, check
36 36 >>> @check
37 37 ... def checkruncommand(server):
38 38 ... # hello block
39 39 ... readchannel(server)
40 40 ...
41 41 ... # no args
42 42 ... runcommand(server, [])
43 43 ...
44 44 ... # global options
45 45 ... runcommand(server, ['id', '--quiet'])
46 46 ...
47 47 ... # make sure global options don't stick through requests
48 48 ... runcommand(server, ['id'])
49 49 ...
50 50 ... # --config
51 51 ... runcommand(server, ['id', '--config', 'ui.quiet=True'])
52 52 ...
53 53 ... # make sure --config doesn't stick
54 54 ... runcommand(server, ['id'])
55 55 ...
56 56 ... # negative return code should be masked
57 57 ... runcommand(server, ['id', '-runknown'])
58 58 *** runcommand
59 59 Mercurial Distributed SCM
60 60
61 61 basic commands:
62 62
63 63 add add the specified files on the next commit
64 64 annotate show changeset information by line for each file
65 65 clone make a copy of an existing repository
66 66 commit commit the specified files or all outstanding changes
67 67 diff diff repository (or selected files)
68 68 export dump the header and diffs for one or more changesets
69 69 forget forget the specified files on the next commit
70 70 init create a new repository in the given directory
71 71 log show revision history of entire repository or files
72 72 merge merge another revision into working directory
73 73 pull pull changes from the specified source
74 74 push push changes to the specified destination
75 75 remove remove the specified files on the next commit
76 76 serve start stand-alone webserver
77 77 status show changed files in the working directory
78 78 summary summarize working directory state
79 79 update update working directory (or switch revisions)
80 80
81 81 (use 'hg help' for the full list of commands or 'hg -v' for details)
82 82 *** runcommand id --quiet
83 83 000000000000
84 84 *** runcommand id
85 85 000000000000 tip
86 86 *** runcommand id --config ui.quiet=True
87 87 000000000000
88 88 *** runcommand id
89 89 000000000000 tip
90 90 *** runcommand id -runknown
91 91 abort: unknown revision 'unknown'!
92 92 [255]
93 93
94 94 >>> from hgclient import readchannel, check
95 95 >>> @check
96 96 ... def inputeof(server):
97 97 ... readchannel(server)
98 98 ... server.stdin.write('runcommand\n')
99 99 ... # close stdin while server is waiting for input
100 100 ... server.stdin.close()
101 101 ...
102 102 ... # server exits with 1 if the pipe closed while reading the command
103 103 ... print('server exit code =', server.wait())
104 104 server exit code = 1
105 105
106 106 >>> from hgclient import readchannel, runcommand, check, stringio
107 107 >>> @check
108 108 ... def serverinput(server):
109 109 ... readchannel(server)
110 110 ...
111 111 ... patch = """
112 112 ... # HG changeset patch
113 113 ... # User test
114 114 ... # Date 0 0
115 115 ... # Node ID c103a3dec114d882c98382d684d8af798d09d857
116 116 ... # Parent 0000000000000000000000000000000000000000
117 117 ... 1
118 118 ...
119 119 ... diff -r 000000000000 -r c103a3dec114 a
120 120 ... --- /dev/null Thu Jan 01 00:00:00 1970 +0000
121 121 ... +++ b/a Thu Jan 01 00:00:00 1970 +0000
122 122 ... @@ -0,0 +1,1 @@
123 123 ... +1
124 124 ... """
125 125 ...
126 126 ... runcommand(server, ['import', '-'], input=stringio(patch))
127 127 ... runcommand(server, ['log'])
128 128 *** runcommand import -
129 129 applying patch from stdin
130 130 *** runcommand log
131 131 changeset: 0:eff892de26ec
132 132 tag: tip
133 133 user: test
134 134 date: Thu Jan 01 00:00:00 1970 +0000
135 135 summary: 1
136 136
137 137
138 138 check that "histedit --commands=-" can read rules from the input channel:
139 139
140 140 >>> import cStringIO
141 141 >>> from hgclient import readchannel, runcommand, check
142 142 >>> @check
143 143 ... def serverinput(server):
144 144 ... readchannel(server)
145 145 ... rules = 'pick eff892de26ec\n'
146 146 ... runcommand(server, ['histedit', '0', '--commands=-',
147 147 ... '--config', 'extensions.histedit='],
148 148 ... input=cStringIO.StringIO(rules))
149 149 *** runcommand histedit 0 --commands=- --config extensions.histedit=
150 150
151 151 check that --cwd doesn't persist between requests:
152 152
153 153 $ mkdir foo
154 154 $ touch foo/bar
155 155 >>> from hgclient import readchannel, runcommand, check
156 156 >>> @check
157 157 ... def cwd(server):
158 158 ... readchannel(server)
159 159 ... runcommand(server, ['--cwd', 'foo', 'st', 'bar'])
160 160 ... runcommand(server, ['st', 'foo/bar'])
161 161 *** runcommand --cwd foo st bar
162 162 ? bar
163 163 *** runcommand st foo/bar
164 164 ? foo/bar
165 165
166 166 $ rm foo/bar
167 167
168 168
169 169 check that local configs for the cached repo aren't inherited when -R is used:
170 170
171 171 $ cat <<EOF >> .hg/hgrc
172 172 > [ui]
173 173 > foo = bar
174 174 > EOF
175 175
176 176 >>> from hgclient import readchannel, sep, runcommand, check
177 177 >>> @check
178 178 ... def localhgrc(server):
179 179 ... readchannel(server)
180 180 ...
181 181 ... # the cached repo local hgrc contains ui.foo=bar, so showconfig should
182 182 ... # show it
183 183 ... runcommand(server, ['showconfig'], outfilter=sep)
184 184 ...
185 185 ... # but not for this repo
186 186 ... runcommand(server, ['init', 'foo'])
187 187 ... runcommand(server, ['-R', 'foo', 'showconfig', 'ui', 'defaults'])
188 188 *** runcommand showconfig
189 189 bundle.mainreporoot=$TESTTMP/repo
190 190 devel.all-warnings=true
191 191 devel.default-date=0 0
192 192 extensions.fsmonitor= (fsmonitor !)
193 193 largefiles.usercache=$TESTTMP/.cache/largefiles
194 194 ui.slash=True
195 195 ui.interactive=False
196 196 ui.mergemarkers=detailed
197 197 ui.usehttp2=true (?)
198 198 ui.foo=bar
199 199 ui.nontty=true
200 200 web.address=localhost
201 201 web\.ipv6=(?:True|False) (re)
202 202 *** runcommand init foo
203 203 *** runcommand -R foo showconfig ui defaults
204 204 ui.slash=True
205 205 ui.interactive=False
206 206 ui.mergemarkers=detailed
207 207 ui.usehttp2=true (?)
208 208 ui.nontty=true
209 209
210 210 $ rm -R foo
211 211
212 212 #if windows
213 213 $ PYTHONPATH="$TESTTMP/repo;$PYTHONPATH"
214 214 #else
215 215 $ PYTHONPATH="$TESTTMP/repo:$PYTHONPATH"
216 216 #endif
217 217
218 218 $ cat <<EOF > hook.py
219 219 > from __future__ import print_function
220 220 > import sys
221 221 > def hook(**args):
222 222 > print('hook talking')
223 223 > print('now try to read something: %r' % sys.stdin.read())
224 224 > EOF
225 225
226 226 >>> from hgclient import readchannel, runcommand, check, stringio
227 227 >>> @check
228 228 ... def hookoutput(server):
229 229 ... readchannel(server)
230 230 ... runcommand(server, ['--config',
231 231 ... 'hooks.pre-identify=python:hook.hook',
232 232 ... 'id'],
233 233 ... input=stringio('some input'))
234 234 *** runcommand --config hooks.pre-identify=python:hook.hook id
235 235 eff892de26ec tip
236 236
237 237 Clean hook cached version
238 238 $ rm hook.py*
239 239 $ rm -Rf __pycache__
240 240
241 241 $ echo a >> a
242 242 >>> import os
243 243 >>> from hgclient import readchannel, runcommand, check
244 244 >>> @check
245 245 ... def outsidechanges(server):
246 246 ... readchannel(server)
247 247 ... runcommand(server, ['status'])
248 248 ... os.system('hg ci -Am2')
249 249 ... runcommand(server, ['tip'])
250 250 ... runcommand(server, ['status'])
251 251 *** runcommand status
252 252 M a
253 253 *** runcommand tip
254 254 changeset: 1:d3a0a68be6de
255 255 tag: tip
256 256 user: test
257 257 date: Thu Jan 01 00:00:00 1970 +0000
258 258 summary: 2
259 259
260 260 *** runcommand status
261 261
262 262 >>> import os
263 263 >>> from hgclient import readchannel, runcommand, check
264 264 >>> @check
265 265 ... def bookmarks(server):
266 266 ... readchannel(server)
267 267 ... runcommand(server, ['bookmarks'])
268 268 ...
269 269 ... # changes .hg/bookmarks
270 270 ... os.system('hg bookmark -i bm1')
271 271 ... os.system('hg bookmark -i bm2')
272 272 ... runcommand(server, ['bookmarks'])
273 273 ...
274 274 ... # changes .hg/bookmarks.current
275 275 ... os.system('hg upd bm1 -q')
276 276 ... runcommand(server, ['bookmarks'])
277 277 ...
278 278 ... runcommand(server, ['bookmarks', 'bm3'])
279 279 ... f = open('a', 'ab')
280 280 ... f.write('a\n')
281 281 ... f.close()
282 282 ... runcommand(server, ['commit', '-Amm'])
283 283 ... runcommand(server, ['bookmarks'])
284 284 *** runcommand bookmarks
285 285 no bookmarks set
286 286 *** runcommand bookmarks
287 287 bm1 1:d3a0a68be6de
288 288 bm2 1:d3a0a68be6de
289 289 *** runcommand bookmarks
290 290 * bm1 1:d3a0a68be6de
291 291 bm2 1:d3a0a68be6de
292 292 *** runcommand bookmarks bm3
293 293 *** runcommand commit -Amm
294 294 *** runcommand bookmarks
295 295 bm1 1:d3a0a68be6de
296 296 bm2 1:d3a0a68be6de
297 297 * bm3 2:aef17e88f5f0
298 298
299 299 >>> import os
300 300 >>> from hgclient import readchannel, runcommand, check
301 301 >>> @check
302 302 ... def tagscache(server):
303 303 ... readchannel(server)
304 304 ... runcommand(server, ['id', '-t', '-r', '0'])
305 305 ... os.system('hg tag -r 0 foo')
306 306 ... runcommand(server, ['id', '-t', '-r', '0'])
307 307 *** runcommand id -t -r 0
308 308
309 309 *** runcommand id -t -r 0
310 310 foo
311 311
312 312 >>> import os
313 313 >>> from hgclient import readchannel, runcommand, check
314 314 >>> @check
315 315 ... def setphase(server):
316 316 ... readchannel(server)
317 317 ... runcommand(server, ['phase', '-r', '.'])
318 318 ... os.system('hg phase -r . -p')
319 319 ... runcommand(server, ['phase', '-r', '.'])
320 320 *** runcommand phase -r .
321 321 3: draft
322 322 *** runcommand phase -r .
323 323 3: public
324 324
325 325 $ echo a >> a
326 326 >>> from hgclient import readchannel, runcommand, check
327 327 >>> @check
328 328 ... def rollback(server):
329 329 ... readchannel(server)
330 330 ... runcommand(server, ['phase', '-r', '.', '-p'])
331 331 ... runcommand(server, ['commit', '-Am.'])
332 332 ... runcommand(server, ['rollback'])
333 333 ... runcommand(server, ['phase', '-r', '.'])
334 334 *** runcommand phase -r . -p
335 335 no phases changed
336 336 *** runcommand commit -Am.
337 337 *** runcommand rollback
338 338 repository tip rolled back to revision 3 (undo commit)
339 339 working directory now based on revision 3
340 340 *** runcommand phase -r .
341 341 3: public
342 342
343 343 >>> import os
344 344 >>> from hgclient import readchannel, runcommand, check
345 345 >>> @check
346 346 ... def branch(server):
347 347 ... readchannel(server)
348 348 ... runcommand(server, ['branch'])
349 349 ... os.system('hg branch foo')
350 350 ... runcommand(server, ['branch'])
351 351 ... os.system('hg branch default')
352 352 *** runcommand branch
353 353 default
354 354 marked working directory as branch foo
355 355 (branches are permanent and global, did you want a bookmark?)
356 356 *** runcommand branch
357 357 foo
358 358 marked working directory as branch default
359 359 (branches are permanent and global, did you want a bookmark?)
360 360
361 361 $ touch .hgignore
362 362 >>> import os
363 363 >>> from hgclient import readchannel, runcommand, check
364 364 >>> @check
365 365 ... def hgignore(server):
366 366 ... readchannel(server)
367 367 ... runcommand(server, ['commit', '-Am.'])
368 368 ... f = open('ignored-file', 'ab')
369 369 ... f.write('')
370 370 ... f.close()
371 371 ... f = open('.hgignore', 'ab')
372 372 ... f.write('ignored-file')
373 373 ... f.close()
374 374 ... runcommand(server, ['status', '-i', '-u'])
375 375 *** runcommand commit -Am.
376 376 adding .hgignore
377 377 *** runcommand status -i -u
378 378 I ignored-file
379 379
380 380 cache of non-public revisions should be invalidated on repository change
381 381 (issue4855):
382 382
383 383 >>> import os
384 384 >>> from hgclient import readchannel, runcommand, check
385 385 >>> @check
386 386 ... def phasesetscacheaftercommit(server):
387 387 ... readchannel(server)
388 388 ... # load _phasecache._phaserevs and _phasesets
389 389 ... runcommand(server, ['log', '-qr', 'draft()'])
390 390 ... # create draft commits by another process
391 391 ... for i in xrange(5, 7):
392 392 ... f = open('a', 'ab')
393 393 ... f.seek(0, os.SEEK_END)
394 394 ... f.write('a\n')
395 395 ... f.close()
396 396 ... os.system('hg commit -Aqm%d' % i)
397 397 ... # new commits should be listed as draft revisions
398 398 ... runcommand(server, ['log', '-qr', 'draft()'])
399 399 *** runcommand log -qr draft()
400 400 4:7966c8e3734d
401 401 *** runcommand log -qr draft()
402 402 4:7966c8e3734d
403 403 5:41f6602d1c4f
404 404 6:10501e202c35
405 405
406 406 >>> import os
407 407 >>> from hgclient import readchannel, runcommand, check
408 408 >>> @check
409 409 ... def phasesetscacheafterstrip(server):
410 410 ... readchannel(server)
411 411 ... # load _phasecache._phaserevs and _phasesets
412 412 ... runcommand(server, ['log', '-qr', 'draft()'])
413 413 ... # strip cached revisions by another process
414 414 ... os.system('hg --config extensions.strip= strip -q 5')
415 415 ... # shouldn't abort by "unknown revision '6'"
416 416 ... runcommand(server, ['log', '-qr', 'draft()'])
417 417 *** runcommand log -qr draft()
418 418 4:7966c8e3734d
419 419 5:41f6602d1c4f
420 420 6:10501e202c35
421 421 *** runcommand log -qr draft()
422 422 4:7966c8e3734d
423 423
424 424 cache of phase roots should be invalidated on strip (issue3827):
425 425
426 426 >>> import os
427 427 >>> from hgclient import readchannel, sep, runcommand, check
428 428 >>> @check
429 429 ... def phasecacheafterstrip(server):
430 430 ... readchannel(server)
431 431 ...
432 432 ... # create new head, 5:731265503d86
433 433 ... runcommand(server, ['update', '-C', '0'])
434 434 ... f = open('a', 'ab')
435 435 ... f.write('a\n')
436 436 ... f.close()
437 437 ... runcommand(server, ['commit', '-Am.', 'a'])
438 438 ... runcommand(server, ['log', '-Gq'])
439 439 ...
440 440 ... # make it public; draft marker moves to 4:7966c8e3734d
441 441 ... runcommand(server, ['phase', '-p', '.'])
442 442 ... # load _phasecache.phaseroots
443 443 ... runcommand(server, ['phase', '.'], outfilter=sep)
444 444 ...
445 445 ... # strip 1::4 outside server
446 446 ... os.system('hg -q --config extensions.mq= strip 1')
447 447 ...
448 448 ... # shouldn't raise "7966c8e3734d: no node!"
449 449 ... runcommand(server, ['branches'])
450 450 *** runcommand update -C 0
451 451 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
452 452 (leaving bookmark bm3)
453 453 *** runcommand commit -Am. a
454 454 created new head
455 455 *** runcommand log -Gq
456 456 @ 5:731265503d86
457 457 |
458 458 | o 4:7966c8e3734d
459 459 | |
460 460 | o 3:b9b85890c400
461 461 | |
462 462 | o 2:aef17e88f5f0
463 463 | |
464 464 | o 1:d3a0a68be6de
465 465 |/
466 466 o 0:eff892de26ec
467 467
468 468 *** runcommand phase -p .
469 469 *** runcommand phase .
470 470 5: public
471 471 *** runcommand branches
472 472 default 1:731265503d86
473 473
474 474 in-memory cache must be reloaded if transaction is aborted. otherwise
475 475 changelog and manifest would have invalid node:
476 476
477 477 $ echo a >> a
478 478 >>> from hgclient import readchannel, runcommand, check
479 479 >>> @check
480 480 ... def txabort(server):
481 481 ... readchannel(server)
482 482 ... runcommand(server, ['commit', '--config', 'hooks.pretxncommit=false',
483 483 ... '-mfoo'])
484 484 ... runcommand(server, ['verify'])
485 485 *** runcommand commit --config hooks.pretxncommit=false -mfoo
486 486 transaction abort!
487 487 rollback completed
488 488 abort: pretxncommit hook exited with status 1
489 489 [255]
490 490 *** runcommand verify
491 491 checking changesets
492 492 checking manifests
493 493 crosschecking files in changesets and manifests
494 494 checking files
495 495 1 files, 2 changesets, 2 total revisions
496 496 $ hg revert --no-backup -aq
497 497
498 498 $ cat >> .hg/hgrc << EOF
499 499 > [experimental]
500 500 > evolution=createmarkers
501 501 > EOF
502 502
503 503 >>> import os
504 504 >>> from hgclient import readchannel, runcommand, check
505 505 >>> @check
506 506 ... def obsolete(server):
507 507 ... readchannel(server)
508 508 ...
509 509 ... runcommand(server, ['up', 'null'])
510 510 ... runcommand(server, ['phase', '-df', 'tip'])
511 511 ... cmd = 'hg debugobsolete `hg log -r tip --template {node}`'
512 512 ... if os.name == 'nt':
513 513 ... cmd = 'sh -c "%s"' % cmd # run in sh, not cmd.exe
514 514 ... os.system(cmd)
515 515 ... runcommand(server, ['log', '--hidden'])
516 516 ... runcommand(server, ['log'])
517 517 *** runcommand up null
518 518 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
519 519 *** runcommand phase -df tip
520 520 obsoleted 1 changesets
521 521 *** runcommand log --hidden
522 522 changeset: 1:731265503d86
523 523 tag: tip
524 524 user: test
525 525 date: Thu Jan 01 00:00:00 1970 +0000
526 526 summary: .
527 527
528 528 changeset: 0:eff892de26ec
529 529 bookmark: bm1
530 530 bookmark: bm2
531 531 bookmark: bm3
532 532 user: test
533 533 date: Thu Jan 01 00:00:00 1970 +0000
534 534 summary: 1
535 535
536 536 *** runcommand log
537 537 changeset: 0:eff892de26ec
538 538 bookmark: bm1
539 539 bookmark: bm2
540 540 bookmark: bm3
541 541 tag: tip
542 542 user: test
543 543 date: Thu Jan 01 00:00:00 1970 +0000
544 544 summary: 1
545 545
546 546
547 547 $ cat <<EOF >> .hg/hgrc
548 548 > [extensions]
549 549 > mq =
550 550 > EOF
551 551
552 552 >>> import os
553 553 >>> from hgclient import readchannel, runcommand, check
554 554 >>> @check
555 555 ... def mqoutsidechanges(server):
556 556 ... readchannel(server)
557 557 ...
558 558 ... # load repo.mq
559 559 ... runcommand(server, ['qapplied'])
560 560 ... os.system('hg qnew 0.diff')
561 561 ... # repo.mq should be invalidated
562 562 ... runcommand(server, ['qapplied'])
563 563 ...
564 564 ... runcommand(server, ['qpop', '--all'])
565 565 ... os.system('hg qqueue --create foo')
566 566 ... # repo.mq should be recreated to point to new queue
567 567 ... runcommand(server, ['qqueue', '--active'])
568 568 *** runcommand qapplied
569 569 *** runcommand qapplied
570 570 0.diff
571 571 *** runcommand qpop --all
572 572 popping 0.diff
573 573 patch queue now empty
574 574 *** runcommand qqueue --active
575 575 foo
576 576
577 577 $ cat <<EOF > dbgui.py
578 578 > import os, sys
579 579 > from mercurial import commands, registrar
580 580 > cmdtable = {}
581 581 > command = registrar.command(cmdtable)
582 582 > @command(b"debuggetpass", norepo=True)
583 583 > def debuggetpass(ui):
584 584 > ui.write("%s\\n" % ui.getpass())
585 585 > @command(b"debugprompt", norepo=True)
586 586 > def debugprompt(ui):
587 587 > ui.write("%s\\n" % ui.prompt("prompt:"))
588 588 > @command(b"debugreadstdin", norepo=True)
589 589 > def debugreadstdin(ui):
590 590 > ui.write("read: %r\n" % sys.stdin.read(1))
591 591 > @command(b"debugwritestdout", norepo=True)
592 592 > def debugwritestdout(ui):
593 593 > os.write(1, "low-level stdout fd and\n")
594 594 > sys.stdout.write("stdout should be redirected to /dev/null\n")
595 595 > sys.stdout.flush()
596 596 > EOF
597 597 $ cat <<EOF >> .hg/hgrc
598 598 > [extensions]
599 599 > dbgui = dbgui.py
600 600 > EOF
601 601
602 602 >>> from hgclient import readchannel, runcommand, check, stringio
603 603 >>> @check
604 604 ... def getpass(server):
605 605 ... readchannel(server)
606 606 ... runcommand(server, ['debuggetpass', '--config',
607 607 ... 'ui.interactive=True'],
608 608 ... input=stringio('1234\n'))
609 609 ... runcommand(server, ['debuggetpass', '--config',
610 610 ... 'ui.interactive=True'],
611 611 ... input=stringio('\n'))
612 612 ... runcommand(server, ['debuggetpass', '--config',
613 613 ... 'ui.interactive=True'],
614 614 ... input=stringio(''))
615 615 ... runcommand(server, ['debugprompt', '--config',
616 616 ... 'ui.interactive=True'],
617 617 ... input=stringio('5678\n'))
618 618 ... runcommand(server, ['debugreadstdin'])
619 619 ... runcommand(server, ['debugwritestdout'])
620 620 *** runcommand debuggetpass --config ui.interactive=True
621 621 password: 1234
622 622 *** runcommand debuggetpass --config ui.interactive=True
623 623 password:
624 624 *** runcommand debuggetpass --config ui.interactive=True
625 625 password: abort: response expected
626 626 [255]
627 627 *** runcommand debugprompt --config ui.interactive=True
628 628 prompt: 5678
629 629 *** runcommand debugreadstdin
630 630 read: ''
631 631 *** runcommand debugwritestdout
632 632
633 633
634 634 run commandserver in commandserver, which is silly but should work:
635 635
636 636 >>> from __future__ import print_function
637 637 >>> from hgclient import readchannel, runcommand, check, stringio
638 638 >>> @check
639 639 ... def nested(server):
640 640 ... print('%c, %r' % readchannel(server))
641 641 ... class nestedserver(object):
642 642 ... stdin = stringio('getencoding\n')
643 643 ... stdout = stringio()
644 644 ... runcommand(server, ['serve', '--cmdserver', 'pipe'],
645 645 ... output=nestedserver.stdout, input=nestedserver.stdin)
646 646 ... nestedserver.stdout.seek(0)
647 647 ... print('%c, %r' % readchannel(nestedserver)) # hello
648 648 ... print('%c, %r' % readchannel(nestedserver)) # getencoding
649 649 o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
650 650 *** runcommand serve --cmdserver pipe
651 651 o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
652 652 r, '*' (glob)
653 653
654 654
655 655 start without repository:
656 656
657 657 $ cd ..
658 658
659 659 >>> from __future__ import print_function
660 660 >>> from hgclient import readchannel, runcommand, check
661 661 >>> @check
662 662 ... def hellomessage(server):
663 663 ... ch, data = readchannel(server)
664 664 ... print('%c, %r' % (ch, data))
665 665 ... # run an arbitrary command to make sure the next thing the server
666 666 ... # sends isn't part of the hello message
667 667 ... runcommand(server, ['id'])
668 668 o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
669 669 *** runcommand id
670 670 abort: there is no Mercurial repository here (.hg not found)
671 671 [255]
672 672
673 673 >>> from hgclient import readchannel, runcommand, check
674 674 >>> @check
675 675 ... def startwithoutrepo(server):
676 676 ... readchannel(server)
677 677 ... runcommand(server, ['init', 'repo2'])
678 678 ... runcommand(server, ['id', '-R', 'repo2'])
679 679 *** runcommand init repo2
680 680 *** runcommand id -R repo2
681 681 000000000000 tip
682 682
683 683
684 684 don't fall back to cwd if invalid -R path is specified (issue4805):
685 685
686 686 $ cd repo
687 687 $ hg serve --cmdserver pipe -R ../nonexistent
688 688 abort: repository ../nonexistent not found!
689 689 [255]
690 690 $ cd ..
691 691
692 692
693 693 unix domain socket:
694 694
695 695 $ cd repo
696 696 $ hg update -q
697 697
698 698 #if unix-socket unix-permissions
699 699
700 700 >>> from __future__ import print_function
701 701 >>> from hgclient import unixserver, readchannel, runcommand, check, stringio
702 702 >>> server = unixserver('.hg/server.sock', '.hg/server.log')
703 703 >>> def hellomessage(conn):
704 704 ... ch, data = readchannel(conn)
705 705 ... print('%c, %r' % (ch, data))
706 706 ... runcommand(conn, ['id'])
707 707 >>> check(hellomessage, server.connect)
708 708 o, 'capabilities: getencoding runcommand\nencoding: *\npid: *' (glob)
709 709 *** runcommand id
710 710 eff892de26ec tip bm1/bm2/bm3
711 711 >>> def unknowncommand(conn):
712 712 ... readchannel(conn)
713 713 ... conn.stdin.write('unknowncommand\n')
714 714 >>> check(unknowncommand, server.connect) # error sent to server.log
715 715 >>> def serverinput(conn):
716 716 ... readchannel(conn)
717 717 ... patch = """
718 718 ... # HG changeset patch
719 719 ... # User test
720 720 ... # Date 0 0
721 721 ... 2
722 722 ...
723 723 ... diff -r eff892de26ec -r 1ed24be7e7a0 a
724 724 ... --- a/a
725 725 ... +++ b/a
726 726 ... @@ -1,1 +1,2 @@
727 727 ... 1
728 728 ... +2
729 729 ... """
730 730 ... runcommand(conn, ['import', '-'], input=stringio(patch))
731 731 ... runcommand(conn, ['log', '-rtip', '-q'])
732 732 >>> check(serverinput, server.connect)
733 733 *** runcommand import -
734 734 applying patch from stdin
735 735 *** runcommand log -rtip -q
736 736 2:1ed24be7e7a0
737 737 >>> server.shutdown()
738 738
739 739 $ cat .hg/server.log
740 740 listening at .hg/server.sock
741 741 abort: unknown command unknowncommand
742 742 killed!
743 743 $ rm .hg/server.log
744 744
745 745 if server crashed before hello, traceback will be sent to 'e' channel as
746 746 last ditch:
747 747
748 748 $ cat <<EOF >> .hg/hgrc
749 749 > [cmdserver]
750 750 > log = inexistent/path.log
751 751 > EOF
752 752 >>> from __future__ import print_function
753 753 >>> from hgclient import unixserver, readchannel, check
754 754 >>> server = unixserver('.hg/server.sock', '.hg/server.log')
755 755 >>> def earlycrash(conn):
756 756 ... while True:
757 757 ... try:
758 758 ... ch, data = readchannel(conn)
759 759 ... if not data.startswith(' '):
760 760 ... print('%c, %r' % (ch, data))
761 761 ... except EOFError:
762 762 ... break
763 763 >>> check(earlycrash, server.connect)
764 764 e, 'Traceback (most recent call last):\n'
765 765 e, "IOError: *" (glob)
766 766 >>> server.shutdown()
767 767
768 768 $ cat .hg/server.log | grep -v '^ '
769 769 listening at .hg/server.sock
770 770 Traceback (most recent call last):
771 771 IOError: * (glob)
772 772 killed!
773 773 #endif
774 774 #if no-unix-socket
775 775
776 776 $ hg serve --cmdserver unix -a .hg/server.sock
777 777 abort: unsupported platform
778 778 [255]
779 779
780 780 #endif
781 781
782 782 $ cd ..
783 783
784 784 Test that accessing to invalid changelog cache is avoided at
785 785 subsequent operations even if repo object is reused even after failure
786 786 of transaction (see 0a7610758c42 also)
787 787
788 788 "hg log" after failure of transaction is needed to detect invalid
789 789 cache in repoview: this can't detect by "hg verify" only.
790 790
791 791 Combination of "finalization" and "empty-ness of changelog" (2 x 2 =
792 792 4) are tested, because '00changelog.i' are differently changed in each
793 793 cases.
794 794
795 795 $ cat > $TESTTMP/failafterfinalize.py <<EOF
796 796 > # extension to abort transaction after finalization forcibly
797 797 > from mercurial import commands, error, extensions, lock as lockmod
798 798 > def fail(tr):
799 799 > raise error.Abort('fail after finalization')
800 800 > def reposetup(ui, repo):
801 801 > class failrepo(repo.__class__):
802 802 > def commitctx(self, ctx, error=False):
803 803 > if self.ui.configbool('failafterfinalize', 'fail'):
804 804 > # 'sorted()' by ASCII code on category names causes
805 805 > # invoking 'fail' after finalization of changelog
806 806 > # using "'cl-%i' % id(self)" as category name
807 807 > self.currenttransaction().addfinalize('zzzzzzzz', fail)
808 808 > return super(failrepo, self).commitctx(ctx, error)
809 809 > repo.__class__ = failrepo
810 810 > EOF
811 811
812 812 $ hg init repo3
813 813 $ cd repo3
814 814
815 815 $ cat <<EOF >> $HGRCPATH
816 816 > [ui]
817 817 > logtemplate = {rev} {desc|firstline} ({files})\n
818 818 >
819 819 > [extensions]
820 820 > failafterfinalize = $TESTTMP/failafterfinalize.py
821 821 > EOF
822 822
823 823 - test failure with "empty changelog"
824 824
825 825 $ echo foo > foo
826 826 $ hg add foo
827 827
828 828 (failure before finalization)
829 829
830 830 >>> from hgclient import readchannel, runcommand, check
831 831 >>> @check
832 832 ... def abort(server):
833 833 ... readchannel(server)
834 834 ... runcommand(server, ['commit',
835 835 ... '--config', 'hooks.pretxncommit=false',
836 836 ... '-mfoo'])
837 837 ... runcommand(server, ['log'])
838 838 ... runcommand(server, ['verify', '-q'])
839 839 *** runcommand commit --config hooks.pretxncommit=false -mfoo
840 840 transaction abort!
841 841 rollback completed
842 842 abort: pretxncommit hook exited with status 1
843 843 [255]
844 844 *** runcommand log
845 845 *** runcommand verify -q
846 846
847 847 (failure after finalization)
848 848
849 849 >>> from hgclient import readchannel, runcommand, check
850 850 >>> @check
851 851 ... def abort(server):
852 852 ... readchannel(server)
853 853 ... runcommand(server, ['commit',
854 854 ... '--config', 'failafterfinalize.fail=true',
855 855 ... '-mfoo'])
856 856 ... runcommand(server, ['log'])
857 857 ... runcommand(server, ['verify', '-q'])
858 858 *** runcommand commit --config failafterfinalize.fail=true -mfoo
859 859 transaction abort!
860 860 rollback completed
861 861 abort: fail after finalization
862 862 [255]
863 863 *** runcommand log
864 864 *** runcommand verify -q
865 865
866 866 - test failure with "not-empty changelog"
867 867
868 868 $ echo bar > bar
869 869 $ hg add bar
870 870 $ hg commit -mbar bar
871 871
872 872 (failure before finalization)
873 873
874 874 >>> from hgclient import readchannel, runcommand, check
875 875 >>> @check
876 876 ... def abort(server):
877 877 ... readchannel(server)
878 878 ... runcommand(server, ['commit',
879 879 ... '--config', 'hooks.pretxncommit=false',
880 880 ... '-mfoo', 'foo'])
881 881 ... runcommand(server, ['log'])
882 882 ... runcommand(server, ['verify', '-q'])
883 883 *** runcommand commit --config hooks.pretxncommit=false -mfoo foo
884 884 transaction abort!
885 885 rollback completed
886 886 abort: pretxncommit hook exited with status 1
887 887 [255]
888 888 *** runcommand log
889 889 0 bar (bar)
890 890 *** runcommand verify -q
891 891
892 892 (failure after finalization)
893 893
894 894 >>> from hgclient import readchannel, runcommand, check
895 895 >>> @check
896 896 ... def abort(server):
897 897 ... readchannel(server)
898 898 ... runcommand(server, ['commit',
899 899 ... '--config', 'failafterfinalize.fail=true',
900 900 ... '-mfoo', 'foo'])
901 901 ... runcommand(server, ['log'])
902 902 ... runcommand(server, ['verify', '-q'])
903 903 *** runcommand commit --config failafterfinalize.fail=true -mfoo foo
904 904 transaction abort!
905 905 rollback completed
906 906 abort: fail after finalization
907 907 [255]
908 908 *** runcommand log
909 909 0 bar (bar)
910 910 *** runcommand verify -q
911 911
912 912 $ cd ..
913 913
914 914 Test symlink traversal over cached audited paths:
915 915 -------------------------------------------------
916 916
917 917 #if symlink
918 918
919 919 set up symlink hell
920 920
921 921 $ mkdir merge-symlink-out
922 922 $ hg init merge-symlink
923 923 $ cd merge-symlink
924 924 $ touch base
925 925 $ hg commit -qAm base
926 926 $ ln -s ../merge-symlink-out a
927 927 $ hg commit -qAm 'symlink a -> ../merge-symlink-out'
928 928 $ hg up -q 0
929 929 $ mkdir a
930 930 $ touch a/poisoned
931 931 $ hg commit -qAm 'file a/poisoned'
932 932 $ hg log -G -T '{rev}: {desc}\n'
933 933 @ 2: file a/poisoned
934 934 |
935 935 | o 1: symlink a -> ../merge-symlink-out
936 936 |/
937 937 o 0: base
938 938
939 939
940 940 try trivial merge after update: cache of audited paths should be discarded,
941 941 and the merge should fail (issue5628)
942 942
943 943 $ hg up -q null
944 944 >>> from hgclient import readchannel, runcommand, check
945 945 >>> @check
946 946 ... def merge(server):
947 947 ... readchannel(server)
948 948 ... # audit a/poisoned as a good path
949 949 ... runcommand(server, ['up', '-qC', '2'])
950 950 ... runcommand(server, ['up', '-qC', '1'])
951 951 ... # here a is a symlink, so a/poisoned is bad
952 952 ... runcommand(server, ['merge', '2'])
953 953 *** runcommand up -qC 2
954 954 *** runcommand up -qC 1
955 955 *** runcommand merge 2
956 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
957 (branch merge, don't forget to commit)
956 abort: path 'a/poisoned' traverses symbolic link 'a'
957 [255]
958 958 $ ls ../merge-symlink-out
959 poisoned
960 959
961 960 cache of repo.auditor should be discarded, so matcher would never traverse
962 961 symlinks:
963 962
964 963 $ hg up -qC 0
965 964 $ touch ../merge-symlink-out/poisoned
966 965 >>> from hgclient import readchannel, runcommand, check
967 966 >>> @check
968 967 ... def files(server):
969 968 ... readchannel(server)
970 969 ... runcommand(server, ['up', '-qC', '2'])
971 970 ... # audit a/poisoned as a good path
972 971 ... runcommand(server, ['files', 'a/poisoned'])
973 972 ... runcommand(server, ['up', '-qC', '0'])
974 973 ... runcommand(server, ['up', '-qC', '1'])
975 974 ... # here 'a' is a symlink, so a/poisoned should be warned
976 975 ... runcommand(server, ['files', 'a/poisoned'])
977 976 *** runcommand up -qC 2
978 977 *** runcommand files a/poisoned
979 978 a/poisoned
980 979 *** runcommand up -qC 0
981 980 *** runcommand up -qC 1
982 981 *** runcommand files a/poisoned
983 [1]
982 abort: path 'a/poisoned' traverses symbolic link 'a'
983 [255]
984 984
985 985 $ cd ..
986 986
987 987 #endif
General Comments 0
You need to be logged in to leave comments. Login now