##// END OF EJS Templates
color: initialize color for the localrepo ui...
Pierre-Yves David -
r31111:95ec3ad6 default
parent child Browse files
Show More
@@ -1,2047 +1,2049 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 color,
31 32 context,
32 33 dirstate,
33 34 dirstateguard,
34 35 encoding,
35 36 error,
36 37 exchange,
37 38 extensions,
38 39 filelog,
39 40 hook,
40 41 lock as lockmod,
41 42 manifest,
42 43 match as matchmod,
43 44 merge as mergemod,
44 45 mergeutil,
45 46 namespaces,
46 47 obsolete,
47 48 pathutil,
48 49 peer,
49 50 phases,
50 51 pushkey,
51 52 repoview,
52 53 revset,
53 54 revsetlang,
54 55 scmutil,
55 56 store,
56 57 subrepo,
57 58 tags as tagsmod,
58 59 transaction,
59 60 txnutil,
60 61 util,
61 62 )
62 63
63 64 release = lockmod.release
64 65 urlerr = util.urlerr
65 66 urlreq = util.urlreq
66 67
67 68 class repofilecache(scmutil.filecache):
68 69 """All filecache usage on repo are done for logic that should be unfiltered
69 70 """
70 71
71 72 def __get__(self, repo, type=None):
72 73 if repo is None:
73 74 return self
74 75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
75 76 def __set__(self, repo, value):
76 77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
77 78 def __delete__(self, repo):
78 79 return super(repofilecache, self).__delete__(repo.unfiltered())
79 80
80 81 class storecache(repofilecache):
81 82 """filecache for files in the store"""
82 83 def join(self, obj, fname):
83 84 return obj.sjoin(fname)
84 85
85 86 class unfilteredpropertycache(util.propertycache):
86 87 """propertycache that apply to unfiltered repo only"""
87 88
88 89 def __get__(self, repo, type=None):
89 90 unfi = repo.unfiltered()
90 91 if unfi is repo:
91 92 return super(unfilteredpropertycache, self).__get__(unfi)
92 93 return getattr(unfi, self.name)
93 94
94 95 class filteredpropertycache(util.propertycache):
95 96 """propertycache that must take filtering in account"""
96 97
97 98 def cachevalue(self, obj, value):
98 99 object.__setattr__(obj, self.name, value)
99 100
100 101
101 102 def hasunfilteredcache(repo, name):
102 103 """check if a repo has an unfilteredpropertycache value for <name>"""
103 104 return name in vars(repo.unfiltered())
104 105
105 106 def unfilteredmethod(orig):
106 107 """decorate method that always need to be run on unfiltered version"""
107 108 def wrapper(repo, *args, **kwargs):
108 109 return orig(repo.unfiltered(), *args, **kwargs)
109 110 return wrapper
110 111
111 112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
112 113 'unbundle'))
113 114 legacycaps = moderncaps.union(set(['changegroupsubset']))
114 115
115 116 class localpeer(peer.peerrepository):
116 117 '''peer for a local repo; reflects only the most recent API'''
117 118
118 119 def __init__(self, repo, caps=moderncaps):
119 120 peer.peerrepository.__init__(self)
120 121 self._repo = repo.filtered('served')
121 122 self.ui = repo.ui
122 123 self._caps = repo._restrictcapabilities(caps)
123 124 self.requirements = repo.requirements
124 125 self.supportedformats = repo.supportedformats
125 126
126 127 def close(self):
127 128 self._repo.close()
128 129
129 130 def _capabilities(self):
130 131 return self._caps
131 132
132 133 def local(self):
133 134 return self._repo
134 135
135 136 def canpush(self):
136 137 return True
137 138
138 139 def url(self):
139 140 return self._repo.url()
140 141
141 142 def lookup(self, key):
142 143 return self._repo.lookup(key)
143 144
144 145 def branchmap(self):
145 146 return self._repo.branchmap()
146 147
147 148 def heads(self):
148 149 return self._repo.heads()
149 150
150 151 def known(self, nodes):
151 152 return self._repo.known(nodes)
152 153
153 154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
154 155 **kwargs):
155 156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
156 157 common=common, bundlecaps=bundlecaps,
157 158 **kwargs)
158 159 cb = util.chunkbuffer(chunks)
159 160
160 161 if bundlecaps is not None and 'HG20' in bundlecaps:
161 162 # When requesting a bundle2, getbundle returns a stream to make the
162 163 # wire level function happier. We need to build a proper object
163 164 # from it in local peer.
164 165 return bundle2.getunbundler(self.ui, cb)
165 166 else:
166 167 return changegroup.getunbundler('01', cb, None)
167 168
168 169 # TODO We might want to move the next two calls into legacypeer and add
169 170 # unbundle instead.
170 171
171 172 def unbundle(self, cg, heads, url):
172 173 """apply a bundle on a repo
173 174
174 175 This function handles the repo locking itself."""
175 176 try:
176 177 try:
177 178 cg = exchange.readbundle(self.ui, cg, None)
178 179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
179 180 if util.safehasattr(ret, 'getchunks'):
180 181 # This is a bundle20 object, turn it into an unbundler.
181 182 # This little dance should be dropped eventually when the
182 183 # API is finally improved.
183 184 stream = util.chunkbuffer(ret.getchunks())
184 185 ret = bundle2.getunbundler(self.ui, stream)
185 186 return ret
186 187 except Exception as exc:
187 188 # If the exception contains output salvaged from a bundle2
188 189 # reply, we need to make sure it is printed before continuing
189 190 # to fail. So we build a bundle2 with such output and consume
190 191 # it directly.
191 192 #
192 193 # This is not very elegant but allows a "simple" solution for
193 194 # issue4594
194 195 output = getattr(exc, '_bundle2salvagedoutput', ())
195 196 if output:
196 197 bundler = bundle2.bundle20(self._repo.ui)
197 198 for out in output:
198 199 bundler.addpart(out)
199 200 stream = util.chunkbuffer(bundler.getchunks())
200 201 b = bundle2.getunbundler(self.ui, stream)
201 202 bundle2.processbundle(self._repo, b)
202 203 raise
203 204 except error.PushRaced as exc:
204 205 raise error.ResponseError(_('push failed:'), str(exc))
205 206
206 207 def lock(self):
207 208 return self._repo.lock()
208 209
209 210 def addchangegroup(self, cg, source, url):
210 211 return cg.apply(self._repo, source, url)
211 212
212 213 def pushkey(self, namespace, key, old, new):
213 214 return self._repo.pushkey(namespace, key, old, new)
214 215
215 216 def listkeys(self, namespace):
216 217 return self._repo.listkeys(namespace)
217 218
218 219 def debugwireargs(self, one, two, three=None, four=None, five=None):
219 220 '''used to test argument passing over the wire'''
220 221 return "%s %s %s %s %s" % (one, two, three, four, five)
221 222
222 223 class locallegacypeer(localpeer):
223 224 '''peer extension which implements legacy methods too; used for tests with
224 225 restricted capabilities'''
225 226
226 227 def __init__(self, repo):
227 228 localpeer.__init__(self, repo, caps=legacycaps)
228 229
229 230 def branches(self, nodes):
230 231 return self._repo.branches(nodes)
231 232
232 233 def between(self, pairs):
233 234 return self._repo.between(pairs)
234 235
235 236 def changegroup(self, basenodes, source):
236 237 return changegroup.changegroup(self._repo, basenodes, source)
237 238
238 239 def changegroupsubset(self, bases, heads, source):
239 240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
240 241
241 242 class localrepository(object):
242 243
243 244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
244 245 'manifestv2'))
245 246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
246 247 'dotencode'))
247 248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
248 249 filtername = None
249 250
250 251 # a list of (ui, featureset) functions.
251 252 # only functions defined in module of enabled extensions are invoked
252 253 featuresetupfuncs = set()
253 254
254 255 def __init__(self, baseui, path, create=False):
255 256 self.requirements = set()
256 257 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
257 258 self.wopener = self.wvfs
258 259 self.root = self.wvfs.base
259 260 self.path = self.wvfs.join(".hg")
260 261 self.origroot = path
261 262 self.auditor = pathutil.pathauditor(self.root, self._checknested)
262 263 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
263 264 realfs=False)
264 265 self.vfs = scmutil.vfs(self.path)
265 266 self.opener = self.vfs
266 267 self.baseui = baseui
267 268 self.ui = baseui.copy()
268 269 self.ui.copy = baseui.copy # prevent copying repo configuration
269 270 # A list of callback to shape the phase if no data were found.
270 271 # Callback are in the form: func(repo, roots) --> processed root.
271 272 # This list it to be filled by extension during repo setup
272 273 self._phasedefaults = []
273 274 try:
274 275 self.ui.readconfig(self.join("hgrc"), self.root)
275 276 self._loadextensions()
276 277 except IOError:
277 278 pass
278 279
279 280 if self.featuresetupfuncs:
280 281 self.supported = set(self._basesupported) # use private copy
281 282 extmods = set(m.__name__ for n, m
282 283 in extensions.extensions(self.ui))
283 284 for setupfunc in self.featuresetupfuncs:
284 285 if setupfunc.__module__ in extmods:
285 286 setupfunc(self.ui, self.supported)
286 287 else:
287 288 self.supported = self._basesupported
289 color.setup(self.ui)
288 290
289 291 # Add compression engines.
290 292 for name in util.compengines:
291 293 engine = util.compengines[name]
292 294 if engine.revlogheader():
293 295 self.supported.add('exp-compression-%s' % name)
294 296
295 297 if not self.vfs.isdir():
296 298 if create:
297 299 self.requirements = newreporequirements(self)
298 300
299 301 if not self.wvfs.exists():
300 302 self.wvfs.makedirs()
301 303 self.vfs.makedir(notindexed=True)
302 304
303 305 if 'store' in self.requirements:
304 306 self.vfs.mkdir("store")
305 307
306 308 # create an invalid changelog
307 309 self.vfs.append(
308 310 "00changelog.i",
309 311 '\0\0\0\2' # represents revlogv2
310 312 ' dummy changelog to prevent using the old repo layout'
311 313 )
312 314 else:
313 315 raise error.RepoError(_("repository %s not found") % path)
314 316 elif create:
315 317 raise error.RepoError(_("repository %s already exists") % path)
316 318 else:
317 319 try:
318 320 self.requirements = scmutil.readrequires(
319 321 self.vfs, self.supported)
320 322 except IOError as inst:
321 323 if inst.errno != errno.ENOENT:
322 324 raise
323 325
324 326 self.sharedpath = self.path
325 327 try:
326 328 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
327 329 realpath=True)
328 330 s = vfs.base
329 331 if not vfs.exists():
330 332 raise error.RepoError(
331 333 _('.hg/sharedpath points to nonexistent directory %s') % s)
332 334 self.sharedpath = s
333 335 except IOError as inst:
334 336 if inst.errno != errno.ENOENT:
335 337 raise
336 338
337 339 self.store = store.store(
338 340 self.requirements, self.sharedpath, scmutil.vfs)
339 341 self.spath = self.store.path
340 342 self.svfs = self.store.vfs
341 343 self.sjoin = self.store.join
342 344 self.vfs.createmode = self.store.createmode
343 345 self._applyopenerreqs()
344 346 if create:
345 347 self._writerequirements()
346 348
347 349 self._dirstatevalidatewarned = False
348 350
349 351 self._branchcaches = {}
350 352 self._revbranchcache = None
351 353 self.filterpats = {}
352 354 self._datafilters = {}
353 355 self._transref = self._lockref = self._wlockref = None
354 356
355 357 # A cache for various files under .hg/ that tracks file changes,
356 358 # (used by the filecache decorator)
357 359 #
358 360 # Maps a property name to its util.filecacheentry
359 361 self._filecache = {}
360 362
361 363 # hold sets of revision to be filtered
362 364 # should be cleared when something might have changed the filter value:
363 365 # - new changesets,
364 366 # - phase change,
365 367 # - new obsolescence marker,
366 368 # - working directory parent change,
367 369 # - bookmark changes
368 370 self.filteredrevcache = {}
369 371
370 372 # generic mapping between names and nodes
371 373 self.names = namespaces.namespaces()
372 374
373 375 def close(self):
374 376 self._writecaches()
375 377
376 378 def _loadextensions(self):
377 379 extensions.loadall(self.ui)
378 380
379 381 def _writecaches(self):
380 382 if self._revbranchcache:
381 383 self._revbranchcache.write()
382 384
383 385 def _restrictcapabilities(self, caps):
384 386 if self.ui.configbool('experimental', 'bundle2-advertise', True):
385 387 caps = set(caps)
386 388 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
387 389 caps.add('bundle2=' + urlreq.quote(capsblob))
388 390 return caps
389 391
390 392 def _applyopenerreqs(self):
391 393 self.svfs.options = dict((r, 1) for r in self.requirements
392 394 if r in self.openerreqs)
393 395 # experimental config: format.chunkcachesize
394 396 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
395 397 if chunkcachesize is not None:
396 398 self.svfs.options['chunkcachesize'] = chunkcachesize
397 399 # experimental config: format.maxchainlen
398 400 maxchainlen = self.ui.configint('format', 'maxchainlen')
399 401 if maxchainlen is not None:
400 402 self.svfs.options['maxchainlen'] = maxchainlen
401 403 # experimental config: format.manifestcachesize
402 404 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
403 405 if manifestcachesize is not None:
404 406 self.svfs.options['manifestcachesize'] = manifestcachesize
405 407 # experimental config: format.aggressivemergedeltas
406 408 aggressivemergedeltas = self.ui.configbool('format',
407 409 'aggressivemergedeltas', False)
408 410 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
409 411 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
410 412
411 413 for r in self.requirements:
412 414 if r.startswith('exp-compression-'):
413 415 self.svfs.options['compengine'] = r[len('exp-compression-'):]
414 416
415 417 def _writerequirements(self):
416 418 scmutil.writerequires(self.vfs, self.requirements)
417 419
418 420 def _checknested(self, path):
419 421 """Determine if path is a legal nested repository."""
420 422 if not path.startswith(self.root):
421 423 return False
422 424 subpath = path[len(self.root) + 1:]
423 425 normsubpath = util.pconvert(subpath)
424 426
425 427 # XXX: Checking against the current working copy is wrong in
426 428 # the sense that it can reject things like
427 429 #
428 430 # $ hg cat -r 10 sub/x.txt
429 431 #
430 432 # if sub/ is no longer a subrepository in the working copy
431 433 # parent revision.
432 434 #
433 435 # However, it can of course also allow things that would have
434 436 # been rejected before, such as the above cat command if sub/
435 437 # is a subrepository now, but was a normal directory before.
436 438 # The old path auditor would have rejected by mistake since it
437 439 # panics when it sees sub/.hg/.
438 440 #
439 441 # All in all, checking against the working copy seems sensible
440 442 # since we want to prevent access to nested repositories on
441 443 # the filesystem *now*.
442 444 ctx = self[None]
443 445 parts = util.splitpath(subpath)
444 446 while parts:
445 447 prefix = '/'.join(parts)
446 448 if prefix in ctx.substate:
447 449 if prefix == normsubpath:
448 450 return True
449 451 else:
450 452 sub = ctx.sub(prefix)
451 453 return sub.checknested(subpath[len(prefix) + 1:])
452 454 else:
453 455 parts.pop()
454 456 return False
455 457
456 458 def peer(self):
457 459 return localpeer(self) # not cached to avoid reference cycle
458 460
459 461 def unfiltered(self):
460 462 """Return unfiltered version of the repository
461 463
462 464 Intended to be overwritten by filtered repo."""
463 465 return self
464 466
465 467 def filtered(self, name):
466 468 """Return a filtered version of a repository"""
467 469 # build a new class with the mixin and the current class
468 470 # (possibly subclass of the repo)
469 471 class proxycls(repoview.repoview, self.unfiltered().__class__):
470 472 pass
471 473 return proxycls(self, name)
472 474
473 475 @repofilecache('bookmarks', 'bookmarks.current')
474 476 def _bookmarks(self):
475 477 return bookmarks.bmstore(self)
476 478
477 479 @property
478 480 def _activebookmark(self):
479 481 return self._bookmarks.active
480 482
481 483 def bookmarkheads(self, bookmark):
482 484 name = bookmark.split('@', 1)[0]
483 485 heads = []
484 486 for mark, n in self._bookmarks.iteritems():
485 487 if mark.split('@', 1)[0] == name:
486 488 heads.append(n)
487 489 return heads
488 490
489 491 # _phaserevs and _phasesets depend on changelog. what we need is to
490 492 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
491 493 # can't be easily expressed in filecache mechanism.
492 494 @storecache('phaseroots', '00changelog.i')
493 495 def _phasecache(self):
494 496 return phases.phasecache(self, self._phasedefaults)
495 497
496 498 @storecache('obsstore')
497 499 def obsstore(self):
498 500 # read default format for new obsstore.
499 501 # developer config: format.obsstore-version
500 502 defaultformat = self.ui.configint('format', 'obsstore-version', None)
501 503 # rely on obsstore class default when possible.
502 504 kwargs = {}
503 505 if defaultformat is not None:
504 506 kwargs['defaultformat'] = defaultformat
505 507 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
506 508 store = obsolete.obsstore(self.svfs, readonly=readonly,
507 509 **kwargs)
508 510 if store and readonly:
509 511 self.ui.warn(
510 512 _('obsolete feature not enabled but %i markers found!\n')
511 513 % len(list(store)))
512 514 return store
513 515
514 516 @storecache('00changelog.i')
515 517 def changelog(self):
516 518 c = changelog.changelog(self.svfs)
517 519 if txnutil.mayhavepending(self.root):
518 520 c.readpending('00changelog.i.a')
519 521 return c
520 522
521 523 def _constructmanifest(self):
522 524 # This is a temporary function while we migrate from manifest to
523 525 # manifestlog. It allows bundlerepo and unionrepo to intercept the
524 526 # manifest creation.
525 527 return manifest.manifestrevlog(self.svfs)
526 528
527 529 @storecache('00manifest.i')
528 530 def manifestlog(self):
529 531 return manifest.manifestlog(self.svfs, self)
530 532
531 533 @repofilecache('dirstate')
532 534 def dirstate(self):
533 535 return dirstate.dirstate(self.vfs, self.ui, self.root,
534 536 self._dirstatevalidate)
535 537
536 538 def _dirstatevalidate(self, node):
537 539 try:
538 540 self.changelog.rev(node)
539 541 return node
540 542 except error.LookupError:
541 543 if not self._dirstatevalidatewarned:
542 544 self._dirstatevalidatewarned = True
543 545 self.ui.warn(_("warning: ignoring unknown"
544 546 " working parent %s!\n") % short(node))
545 547 return nullid
546 548
547 549 def __getitem__(self, changeid):
548 550 if changeid is None or changeid == wdirrev:
549 551 return context.workingctx(self)
550 552 if isinstance(changeid, slice):
551 553 return [context.changectx(self, i)
552 554 for i in xrange(*changeid.indices(len(self)))
553 555 if i not in self.changelog.filteredrevs]
554 556 return context.changectx(self, changeid)
555 557
556 558 def __contains__(self, changeid):
557 559 try:
558 560 self[changeid]
559 561 return True
560 562 except error.RepoLookupError:
561 563 return False
562 564
563 565 def __nonzero__(self):
564 566 return True
565 567
566 568 def __len__(self):
567 569 return len(self.changelog)
568 570
569 571 def __iter__(self):
570 572 return iter(self.changelog)
571 573
572 574 def revs(self, expr, *args):
573 575 '''Find revisions matching a revset.
574 576
575 577 The revset is specified as a string ``expr`` that may contain
576 578 %-formatting to escape certain types. See ``revsetlang.formatspec``.
577 579
578 580 Revset aliases from the configuration are not expanded. To expand
579 581 user aliases, consider calling ``scmutil.revrange()`` or
580 582 ``repo.anyrevs([expr], user=True)``.
581 583
582 584 Returns a revset.abstractsmartset, which is a list-like interface
583 585 that contains integer revisions.
584 586 '''
585 587 expr = revsetlang.formatspec(expr, *args)
586 588 m = revset.match(None, expr)
587 589 return m(self)
588 590
589 591 def set(self, expr, *args):
590 592 '''Find revisions matching a revset and emit changectx instances.
591 593
592 594 This is a convenience wrapper around ``revs()`` that iterates the
593 595 result and is a generator of changectx instances.
594 596
595 597 Revset aliases from the configuration are not expanded. To expand
596 598 user aliases, consider calling ``scmutil.revrange()``.
597 599 '''
598 600 for r in self.revs(expr, *args):
599 601 yield self[r]
600 602
601 603 def anyrevs(self, specs, user=False):
602 604 '''Find revisions matching one of the given revsets.
603 605
604 606 Revset aliases from the configuration are not expanded by default. To
605 607 expand user aliases, specify ``user=True``.
606 608 '''
607 609 if user:
608 610 m = revset.matchany(self.ui, specs, repo=self)
609 611 else:
610 612 m = revset.matchany(None, specs)
611 613 return m(self)
612 614
613 615 def url(self):
614 616 return 'file:' + self.root
615 617
616 618 def hook(self, name, throw=False, **args):
617 619 """Call a hook, passing this repo instance.
618 620
619 621 This a convenience method to aid invoking hooks. Extensions likely
620 622 won't call this unless they have registered a custom hook or are
621 623 replacing code that is expected to call a hook.
622 624 """
623 625 return hook.hook(self.ui, self, name, throw, **args)
624 626
625 627 @unfilteredmethod
626 628 def _tag(self, names, node, message, local, user, date, extra=None,
627 629 editor=False):
628 630 if isinstance(names, str):
629 631 names = (names,)
630 632
631 633 branches = self.branchmap()
632 634 for name in names:
633 635 self.hook('pretag', throw=True, node=hex(node), tag=name,
634 636 local=local)
635 637 if name in branches:
636 638 self.ui.warn(_("warning: tag %s conflicts with existing"
637 639 " branch name\n") % name)
638 640
639 641 def writetags(fp, names, munge, prevtags):
640 642 fp.seek(0, 2)
641 643 if prevtags and prevtags[-1] != '\n':
642 644 fp.write('\n')
643 645 for name in names:
644 646 if munge:
645 647 m = munge(name)
646 648 else:
647 649 m = name
648 650
649 651 if (self._tagscache.tagtypes and
650 652 name in self._tagscache.tagtypes):
651 653 old = self.tags().get(name, nullid)
652 654 fp.write('%s %s\n' % (hex(old), m))
653 655 fp.write('%s %s\n' % (hex(node), m))
654 656 fp.close()
655 657
656 658 prevtags = ''
657 659 if local:
658 660 try:
659 661 fp = self.vfs('localtags', 'r+')
660 662 except IOError:
661 663 fp = self.vfs('localtags', 'a')
662 664 else:
663 665 prevtags = fp.read()
664 666
665 667 # local tags are stored in the current charset
666 668 writetags(fp, names, None, prevtags)
667 669 for name in names:
668 670 self.hook('tag', node=hex(node), tag=name, local=local)
669 671 return
670 672
671 673 try:
672 674 fp = self.wfile('.hgtags', 'rb+')
673 675 except IOError as e:
674 676 if e.errno != errno.ENOENT:
675 677 raise
676 678 fp = self.wfile('.hgtags', 'ab')
677 679 else:
678 680 prevtags = fp.read()
679 681
680 682 # committed tags are stored in UTF-8
681 683 writetags(fp, names, encoding.fromlocal, prevtags)
682 684
683 685 fp.close()
684 686
685 687 self.invalidatecaches()
686 688
687 689 if '.hgtags' not in self.dirstate:
688 690 self[None].add(['.hgtags'])
689 691
690 692 m = matchmod.exact(self.root, '', ['.hgtags'])
691 693 tagnode = self.commit(message, user, date, extra=extra, match=m,
692 694 editor=editor)
693 695
694 696 for name in names:
695 697 self.hook('tag', node=hex(node), tag=name, local=local)
696 698
697 699 return tagnode
698 700
699 701 def tag(self, names, node, message, local, user, date, editor=False):
700 702 '''tag a revision with one or more symbolic names.
701 703
702 704 names is a list of strings or, when adding a single tag, names may be a
703 705 string.
704 706
705 707 if local is True, the tags are stored in a per-repository file.
706 708 otherwise, they are stored in the .hgtags file, and a new
707 709 changeset is committed with the change.
708 710
709 711 keyword arguments:
710 712
711 713 local: whether to store tags in non-version-controlled file
712 714 (default False)
713 715
714 716 message: commit message to use if committing
715 717
716 718 user: name of user to use if committing
717 719
718 720 date: date tuple to use if committing'''
719 721
720 722 if not local:
721 723 m = matchmod.exact(self.root, '', ['.hgtags'])
722 724 if any(self.status(match=m, unknown=True, ignored=True)):
723 725 raise error.Abort(_('working copy of .hgtags is changed'),
724 726 hint=_('please commit .hgtags manually'))
725 727
726 728 self.tags() # instantiate the cache
727 729 self._tag(names, node, message, local, user, date, editor=editor)
728 730
729 731 @filteredpropertycache
730 732 def _tagscache(self):
731 733 '''Returns a tagscache object that contains various tags related
732 734 caches.'''
733 735
734 736 # This simplifies its cache management by having one decorated
735 737 # function (this one) and the rest simply fetch things from it.
736 738 class tagscache(object):
737 739 def __init__(self):
738 740 # These two define the set of tags for this repository. tags
739 741 # maps tag name to node; tagtypes maps tag name to 'global' or
740 742 # 'local'. (Global tags are defined by .hgtags across all
741 743 # heads, and local tags are defined in .hg/localtags.)
742 744 # They constitute the in-memory cache of tags.
743 745 self.tags = self.tagtypes = None
744 746
745 747 self.nodetagscache = self.tagslist = None
746 748
747 749 cache = tagscache()
748 750 cache.tags, cache.tagtypes = self._findtags()
749 751
750 752 return cache
751 753
752 754 def tags(self):
753 755 '''return a mapping of tag to node'''
754 756 t = {}
755 757 if self.changelog.filteredrevs:
756 758 tags, tt = self._findtags()
757 759 else:
758 760 tags = self._tagscache.tags
759 761 for k, v in tags.iteritems():
760 762 try:
761 763 # ignore tags to unknown nodes
762 764 self.changelog.rev(v)
763 765 t[k] = v
764 766 except (error.LookupError, ValueError):
765 767 pass
766 768 return t
767 769
768 770 def _findtags(self):
769 771 '''Do the hard work of finding tags. Return a pair of dicts
770 772 (tags, tagtypes) where tags maps tag name to node, and tagtypes
771 773 maps tag name to a string like \'global\' or \'local\'.
772 774 Subclasses or extensions are free to add their own tags, but
773 775 should be aware that the returned dicts will be retained for the
774 776 duration of the localrepo object.'''
775 777
776 778 # XXX what tagtype should subclasses/extensions use? Currently
777 779 # mq and bookmarks add tags, but do not set the tagtype at all.
778 780 # Should each extension invent its own tag type? Should there
779 781 # be one tagtype for all such "virtual" tags? Or is the status
780 782 # quo fine?
781 783
782 784 alltags = {} # map tag name to (node, hist)
783 785 tagtypes = {}
784 786
785 787 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
786 788 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
787 789
788 790 # Build the return dicts. Have to re-encode tag names because
789 791 # the tags module always uses UTF-8 (in order not to lose info
790 792 # writing to the cache), but the rest of Mercurial wants them in
791 793 # local encoding.
792 794 tags = {}
793 795 for (name, (node, hist)) in alltags.iteritems():
794 796 if node != nullid:
795 797 tags[encoding.tolocal(name)] = node
796 798 tags['tip'] = self.changelog.tip()
797 799 tagtypes = dict([(encoding.tolocal(name), value)
798 800 for (name, value) in tagtypes.iteritems()])
799 801 return (tags, tagtypes)
800 802
801 803 def tagtype(self, tagname):
802 804 '''
803 805 return the type of the given tag. result can be:
804 806
805 807 'local' : a local tag
806 808 'global' : a global tag
807 809 None : tag does not exist
808 810 '''
809 811
810 812 return self._tagscache.tagtypes.get(tagname)
811 813
812 814 def tagslist(self):
813 815 '''return a list of tags ordered by revision'''
814 816 if not self._tagscache.tagslist:
815 817 l = []
816 818 for t, n in self.tags().iteritems():
817 819 l.append((self.changelog.rev(n), t, n))
818 820 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
819 821
820 822 return self._tagscache.tagslist
821 823
822 824 def nodetags(self, node):
823 825 '''return the tags associated with a node'''
824 826 if not self._tagscache.nodetagscache:
825 827 nodetagscache = {}
826 828 for t, n in self._tagscache.tags.iteritems():
827 829 nodetagscache.setdefault(n, []).append(t)
828 830 for tags in nodetagscache.itervalues():
829 831 tags.sort()
830 832 self._tagscache.nodetagscache = nodetagscache
831 833 return self._tagscache.nodetagscache.get(node, [])
832 834
833 835 def nodebookmarks(self, node):
834 836 """return the list of bookmarks pointing to the specified node"""
835 837 marks = []
836 838 for bookmark, n in self._bookmarks.iteritems():
837 839 if n == node:
838 840 marks.append(bookmark)
839 841 return sorted(marks)
840 842
841 843 def branchmap(self):
842 844 '''returns a dictionary {branch: [branchheads]} with branchheads
843 845 ordered by increasing revision number'''
844 846 branchmap.updatecache(self)
845 847 return self._branchcaches[self.filtername]
846 848
847 849 @unfilteredmethod
848 850 def revbranchcache(self):
849 851 if not self._revbranchcache:
850 852 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
851 853 return self._revbranchcache
852 854
853 855 def branchtip(self, branch, ignoremissing=False):
854 856 '''return the tip node for a given branch
855 857
856 858 If ignoremissing is True, then this method will not raise an error.
857 859 This is helpful for callers that only expect None for a missing branch
858 860 (e.g. namespace).
859 861
860 862 '''
861 863 try:
862 864 return self.branchmap().branchtip(branch)
863 865 except KeyError:
864 866 if not ignoremissing:
865 867 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
866 868 else:
867 869 pass
868 870
869 871 def lookup(self, key):
870 872 return self[key].node()
871 873
872 874 def lookupbranch(self, key, remote=None):
873 875 repo = remote or self
874 876 if key in repo.branchmap():
875 877 return key
876 878
877 879 repo = (remote and remote.local()) and remote or self
878 880 return repo[key].branch()
879 881
880 882 def known(self, nodes):
881 883 cl = self.changelog
882 884 nm = cl.nodemap
883 885 filtered = cl.filteredrevs
884 886 result = []
885 887 for n in nodes:
886 888 r = nm.get(n)
887 889 resp = not (r is None or r in filtered)
888 890 result.append(resp)
889 891 return result
890 892
891 893 def local(self):
892 894 return self
893 895
894 896 def publishing(self):
895 897 # it's safe (and desirable) to trust the publish flag unconditionally
896 898 # so that we don't finalize changes shared between users via ssh or nfs
897 899 return self.ui.configbool('phases', 'publish', True, untrusted=True)
898 900
899 901 def cancopy(self):
900 902 # so statichttprepo's override of local() works
901 903 if not self.local():
902 904 return False
903 905 if not self.publishing():
904 906 return True
905 907 # if publishing we can't copy if there is filtered content
906 908 return not self.filtered('visible').changelog.filteredrevs
907 909
908 910 def shared(self):
909 911 '''the type of shared repository (None if not shared)'''
910 912 if self.sharedpath != self.path:
911 913 return 'store'
912 914 return None
913 915
914 916 def join(self, f, *insidef):
915 917 return self.vfs.join(os.path.join(f, *insidef))
916 918
917 919 def wjoin(self, f, *insidef):
918 920 return self.vfs.reljoin(self.root, f, *insidef)
919 921
920 922 def file(self, f):
921 923 if f[0] == '/':
922 924 f = f[1:]
923 925 return filelog.filelog(self.svfs, f)
924 926
925 927 def changectx(self, changeid):
926 928 return self[changeid]
927 929
928 930 def setparents(self, p1, p2=nullid):
929 931 self.dirstate.beginparentchange()
930 932 copies = self.dirstate.setparents(p1, p2)
931 933 pctx = self[p1]
932 934 if copies:
933 935 # Adjust copy records, the dirstate cannot do it, it
934 936 # requires access to parents manifests. Preserve them
935 937 # only for entries added to first parent.
936 938 for f in copies:
937 939 if f not in pctx and copies[f] in pctx:
938 940 self.dirstate.copy(copies[f], f)
939 941 if p2 == nullid:
940 942 for f, s in sorted(self.dirstate.copies().items()):
941 943 if f not in pctx and s not in pctx:
942 944 self.dirstate.copy(None, f)
943 945 self.dirstate.endparentchange()
944 946
945 947 def filectx(self, path, changeid=None, fileid=None):
946 948 """changeid can be a changeset revision, node, or tag.
947 949 fileid can be a file revision or node."""
948 950 return context.filectx(self, path, changeid, fileid)
949 951
950 952 def getcwd(self):
951 953 return self.dirstate.getcwd()
952 954
953 955 def pathto(self, f, cwd=None):
954 956 return self.dirstate.pathto(f, cwd)
955 957
956 958 def wfile(self, f, mode='r'):
957 959 return self.wvfs(f, mode)
958 960
959 961 def _link(self, f):
960 962 return self.wvfs.islink(f)
961 963
962 964 def _loadfilter(self, filter):
963 965 if filter not in self.filterpats:
964 966 l = []
965 967 for pat, cmd in self.ui.configitems(filter):
966 968 if cmd == '!':
967 969 continue
968 970 mf = matchmod.match(self.root, '', [pat])
969 971 fn = None
970 972 params = cmd
971 973 for name, filterfn in self._datafilters.iteritems():
972 974 if cmd.startswith(name):
973 975 fn = filterfn
974 976 params = cmd[len(name):].lstrip()
975 977 break
976 978 if not fn:
977 979 fn = lambda s, c, **kwargs: util.filter(s, c)
978 980 # Wrap old filters not supporting keyword arguments
979 981 if not inspect.getargspec(fn)[2]:
980 982 oldfn = fn
981 983 fn = lambda s, c, **kwargs: oldfn(s, c)
982 984 l.append((mf, fn, params))
983 985 self.filterpats[filter] = l
984 986 return self.filterpats[filter]
985 987
986 988 def _filter(self, filterpats, filename, data):
987 989 for mf, fn, cmd in filterpats:
988 990 if mf(filename):
989 991 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
990 992 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
991 993 break
992 994
993 995 return data
994 996
995 997 @unfilteredpropertycache
996 998 def _encodefilterpats(self):
997 999 return self._loadfilter('encode')
998 1000
999 1001 @unfilteredpropertycache
1000 1002 def _decodefilterpats(self):
1001 1003 return self._loadfilter('decode')
1002 1004
1003 1005 def adddatafilter(self, name, filter):
1004 1006 self._datafilters[name] = filter
1005 1007
1006 1008 def wread(self, filename):
1007 1009 if self._link(filename):
1008 1010 data = self.wvfs.readlink(filename)
1009 1011 else:
1010 1012 data = self.wvfs.read(filename)
1011 1013 return self._filter(self._encodefilterpats, filename, data)
1012 1014
1013 1015 def wwrite(self, filename, data, flags, backgroundclose=False):
1014 1016 """write ``data`` into ``filename`` in the working directory
1015 1017
1016 1018 This returns length of written (maybe decoded) data.
1017 1019 """
1018 1020 data = self._filter(self._decodefilterpats, filename, data)
1019 1021 if 'l' in flags:
1020 1022 self.wvfs.symlink(data, filename)
1021 1023 else:
1022 1024 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1023 1025 if 'x' in flags:
1024 1026 self.wvfs.setflags(filename, False, True)
1025 1027 return len(data)
1026 1028
1027 1029 def wwritedata(self, filename, data):
1028 1030 return self._filter(self._decodefilterpats, filename, data)
1029 1031
1030 1032 def currenttransaction(self):
1031 1033 """return the current transaction or None if non exists"""
1032 1034 if self._transref:
1033 1035 tr = self._transref()
1034 1036 else:
1035 1037 tr = None
1036 1038
1037 1039 if tr and tr.running():
1038 1040 return tr
1039 1041 return None
1040 1042
1041 1043 def transaction(self, desc, report=None):
1042 1044 if (self.ui.configbool('devel', 'all-warnings')
1043 1045 or self.ui.configbool('devel', 'check-locks')):
1044 1046 if self._currentlock(self._lockref) is None:
1045 1047 raise error.ProgrammingError('transaction requires locking')
1046 1048 tr = self.currenttransaction()
1047 1049 if tr is not None:
1048 1050 return tr.nest()
1049 1051
1050 1052 # abort here if the journal already exists
1051 1053 if self.svfs.exists("journal"):
1052 1054 raise error.RepoError(
1053 1055 _("abandoned transaction found"),
1054 1056 hint=_("run 'hg recover' to clean up transaction"))
1055 1057
1056 1058 idbase = "%.40f#%f" % (random.random(), time.time())
1057 1059 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1058 1060 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1059 1061
1060 1062 self._writejournal(desc)
1061 1063 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1062 1064 if report:
1063 1065 rp = report
1064 1066 else:
1065 1067 rp = self.ui.warn
1066 1068 vfsmap = {'plain': self.vfs} # root of .hg/
1067 1069 # we must avoid cyclic reference between repo and transaction.
1068 1070 reporef = weakref.ref(self)
1069 1071 def validate(tr):
1070 1072 """will run pre-closing hooks"""
1071 1073 reporef().hook('pretxnclose', throw=True,
1072 1074 txnname=desc, **tr.hookargs)
1073 1075 def releasefn(tr, success):
1074 1076 repo = reporef()
1075 1077 if success:
1076 1078 # this should be explicitly invoked here, because
1077 1079 # in-memory changes aren't written out at closing
1078 1080 # transaction, if tr.addfilegenerator (via
1079 1081 # dirstate.write or so) isn't invoked while
1080 1082 # transaction running
1081 1083 repo.dirstate.write(None)
1082 1084 else:
1083 1085 # discard all changes (including ones already written
1084 1086 # out) in this transaction
1085 1087 repo.dirstate.restorebackup(None, prefix='journal.')
1086 1088
1087 1089 repo.invalidate(clearfilecache=True)
1088 1090
1089 1091 tr = transaction.transaction(rp, self.svfs, vfsmap,
1090 1092 "journal",
1091 1093 "undo",
1092 1094 aftertrans(renames),
1093 1095 self.store.createmode,
1094 1096 validator=validate,
1095 1097 releasefn=releasefn)
1096 1098
1097 1099 tr.hookargs['txnid'] = txnid
1098 1100 # note: writing the fncache only during finalize mean that the file is
1099 1101 # outdated when running hooks. As fncache is used for streaming clone,
1100 1102 # this is not expected to break anything that happen during the hooks.
1101 1103 tr.addfinalize('flush-fncache', self.store.write)
1102 1104 def txnclosehook(tr2):
1103 1105 """To be run if transaction is successful, will schedule a hook run
1104 1106 """
1105 1107 # Don't reference tr2 in hook() so we don't hold a reference.
1106 1108 # This reduces memory consumption when there are multiple
1107 1109 # transactions per lock. This can likely go away if issue5045
1108 1110 # fixes the function accumulation.
1109 1111 hookargs = tr2.hookargs
1110 1112
1111 1113 def hook():
1112 1114 reporef().hook('txnclose', throw=False, txnname=desc,
1113 1115 **hookargs)
1114 1116 reporef()._afterlock(hook)
1115 1117 tr.addfinalize('txnclose-hook', txnclosehook)
1116 1118 def txnaborthook(tr2):
1117 1119 """To be run if transaction is aborted
1118 1120 """
1119 1121 reporef().hook('txnabort', throw=False, txnname=desc,
1120 1122 **tr2.hookargs)
1121 1123 tr.addabort('txnabort-hook', txnaborthook)
1122 1124 # avoid eager cache invalidation. in-memory data should be identical
1123 1125 # to stored data if transaction has no error.
1124 1126 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1125 1127 self._transref = weakref.ref(tr)
1126 1128 return tr
1127 1129
1128 1130 def _journalfiles(self):
1129 1131 return ((self.svfs, 'journal'),
1130 1132 (self.vfs, 'journal.dirstate'),
1131 1133 (self.vfs, 'journal.branch'),
1132 1134 (self.vfs, 'journal.desc'),
1133 1135 (self.vfs, 'journal.bookmarks'),
1134 1136 (self.svfs, 'journal.phaseroots'))
1135 1137
1136 1138 def undofiles(self):
1137 1139 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1138 1140
1139 1141 def _writejournal(self, desc):
1140 1142 self.dirstate.savebackup(None, prefix='journal.')
1141 1143 self.vfs.write("journal.branch",
1142 1144 encoding.fromlocal(self.dirstate.branch()))
1143 1145 self.vfs.write("journal.desc",
1144 1146 "%d\n%s\n" % (len(self), desc))
1145 1147 self.vfs.write("journal.bookmarks",
1146 1148 self.vfs.tryread("bookmarks"))
1147 1149 self.svfs.write("journal.phaseroots",
1148 1150 self.svfs.tryread("phaseroots"))
1149 1151
1150 1152 def recover(self):
1151 1153 with self.lock():
1152 1154 if self.svfs.exists("journal"):
1153 1155 self.ui.status(_("rolling back interrupted transaction\n"))
1154 1156 vfsmap = {'': self.svfs,
1155 1157 'plain': self.vfs,}
1156 1158 transaction.rollback(self.svfs, vfsmap, "journal",
1157 1159 self.ui.warn)
1158 1160 self.invalidate()
1159 1161 return True
1160 1162 else:
1161 1163 self.ui.warn(_("no interrupted transaction available\n"))
1162 1164 return False
1163 1165
1164 1166 def rollback(self, dryrun=False, force=False):
1165 1167 wlock = lock = dsguard = None
1166 1168 try:
1167 1169 wlock = self.wlock()
1168 1170 lock = self.lock()
1169 1171 if self.svfs.exists("undo"):
1170 1172 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1171 1173
1172 1174 return self._rollback(dryrun, force, dsguard)
1173 1175 else:
1174 1176 self.ui.warn(_("no rollback information available\n"))
1175 1177 return 1
1176 1178 finally:
1177 1179 release(dsguard, lock, wlock)
1178 1180
1179 1181 @unfilteredmethod # Until we get smarter cache management
1180 1182 def _rollback(self, dryrun, force, dsguard):
1181 1183 ui = self.ui
1182 1184 try:
1183 1185 args = self.vfs.read('undo.desc').splitlines()
1184 1186 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1185 1187 if len(args) >= 3:
1186 1188 detail = args[2]
1187 1189 oldtip = oldlen - 1
1188 1190
1189 1191 if detail and ui.verbose:
1190 1192 msg = (_('repository tip rolled back to revision %s'
1191 1193 ' (undo %s: %s)\n')
1192 1194 % (oldtip, desc, detail))
1193 1195 else:
1194 1196 msg = (_('repository tip rolled back to revision %s'
1195 1197 ' (undo %s)\n')
1196 1198 % (oldtip, desc))
1197 1199 except IOError:
1198 1200 msg = _('rolling back unknown transaction\n')
1199 1201 desc = None
1200 1202
1201 1203 if not force and self['.'] != self['tip'] and desc == 'commit':
1202 1204 raise error.Abort(
1203 1205 _('rollback of last commit while not checked out '
1204 1206 'may lose data'), hint=_('use -f to force'))
1205 1207
1206 1208 ui.status(msg)
1207 1209 if dryrun:
1208 1210 return 0
1209 1211
1210 1212 parents = self.dirstate.parents()
1211 1213 self.destroying()
1212 1214 vfsmap = {'plain': self.vfs, '': self.svfs}
1213 1215 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1214 1216 if self.vfs.exists('undo.bookmarks'):
1215 1217 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1216 1218 if self.svfs.exists('undo.phaseroots'):
1217 1219 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1218 1220 self.invalidate()
1219 1221
1220 1222 parentgone = (parents[0] not in self.changelog.nodemap or
1221 1223 parents[1] not in self.changelog.nodemap)
1222 1224 if parentgone:
1223 1225 # prevent dirstateguard from overwriting already restored one
1224 1226 dsguard.close()
1225 1227
1226 1228 self.dirstate.restorebackup(None, prefix='undo.')
1227 1229 try:
1228 1230 branch = self.vfs.read('undo.branch')
1229 1231 self.dirstate.setbranch(encoding.tolocal(branch))
1230 1232 except IOError:
1231 1233 ui.warn(_('named branch could not be reset: '
1232 1234 'current branch is still \'%s\'\n')
1233 1235 % self.dirstate.branch())
1234 1236
1235 1237 parents = tuple([p.rev() for p in self[None].parents()])
1236 1238 if len(parents) > 1:
1237 1239 ui.status(_('working directory now based on '
1238 1240 'revisions %d and %d\n') % parents)
1239 1241 else:
1240 1242 ui.status(_('working directory now based on '
1241 1243 'revision %d\n') % parents)
1242 1244 mergemod.mergestate.clean(self, self['.'].node())
1243 1245
1244 1246 # TODO: if we know which new heads may result from this rollback, pass
1245 1247 # them to destroy(), which will prevent the branchhead cache from being
1246 1248 # invalidated.
1247 1249 self.destroyed()
1248 1250 return 0
1249 1251
1250 1252 def invalidatecaches(self):
1251 1253
1252 1254 if '_tagscache' in vars(self):
1253 1255 # can't use delattr on proxy
1254 1256 del self.__dict__['_tagscache']
1255 1257
1256 1258 self.unfiltered()._branchcaches.clear()
1257 1259 self.invalidatevolatilesets()
1258 1260
1259 1261 def invalidatevolatilesets(self):
1260 1262 self.filteredrevcache.clear()
1261 1263 obsolete.clearobscaches(self)
1262 1264
1263 1265 def invalidatedirstate(self):
1264 1266 '''Invalidates the dirstate, causing the next call to dirstate
1265 1267 to check if it was modified since the last time it was read,
1266 1268 rereading it if it has.
1267 1269
1268 1270 This is different to dirstate.invalidate() that it doesn't always
1269 1271 rereads the dirstate. Use dirstate.invalidate() if you want to
1270 1272 explicitly read the dirstate again (i.e. restoring it to a previous
1271 1273 known good state).'''
1272 1274 if hasunfilteredcache(self, 'dirstate'):
1273 1275 for k in self.dirstate._filecache:
1274 1276 try:
1275 1277 delattr(self.dirstate, k)
1276 1278 except AttributeError:
1277 1279 pass
1278 1280 delattr(self.unfiltered(), 'dirstate')
1279 1281
1280 1282 def invalidate(self, clearfilecache=False):
1281 1283 '''Invalidates both store and non-store parts other than dirstate
1282 1284
1283 1285 If a transaction is running, invalidation of store is omitted,
1284 1286 because discarding in-memory changes might cause inconsistency
1285 1287 (e.g. incomplete fncache causes unintentional failure, but
1286 1288 redundant one doesn't).
1287 1289 '''
1288 1290 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1289 1291 for k in self._filecache.keys():
1290 1292 # dirstate is invalidated separately in invalidatedirstate()
1291 1293 if k == 'dirstate':
1292 1294 continue
1293 1295
1294 1296 if clearfilecache:
1295 1297 del self._filecache[k]
1296 1298 try:
1297 1299 delattr(unfiltered, k)
1298 1300 except AttributeError:
1299 1301 pass
1300 1302 self.invalidatecaches()
1301 1303 if not self.currenttransaction():
1302 1304 # TODO: Changing contents of store outside transaction
1303 1305 # causes inconsistency. We should make in-memory store
1304 1306 # changes detectable, and abort if changed.
1305 1307 self.store.invalidatecaches()
1306 1308
1307 1309 def invalidateall(self):
1308 1310 '''Fully invalidates both store and non-store parts, causing the
1309 1311 subsequent operation to reread any outside changes.'''
1310 1312 # extension should hook this to invalidate its caches
1311 1313 self.invalidate()
1312 1314 self.invalidatedirstate()
1313 1315
1314 1316 @unfilteredmethod
1315 1317 def _refreshfilecachestats(self, tr):
1316 1318 """Reload stats of cached files so that they are flagged as valid"""
1317 1319 for k, ce in self._filecache.items():
1318 1320 if k == 'dirstate' or k not in self.__dict__:
1319 1321 continue
1320 1322 ce.refresh()
1321 1323
1322 1324 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1323 1325 inheritchecker=None, parentenvvar=None):
1324 1326 parentlock = None
1325 1327 # the contents of parentenvvar are used by the underlying lock to
1326 1328 # determine whether it can be inherited
1327 1329 if parentenvvar is not None:
1328 1330 parentlock = encoding.environ.get(parentenvvar)
1329 1331 try:
1330 1332 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1331 1333 acquirefn=acquirefn, desc=desc,
1332 1334 inheritchecker=inheritchecker,
1333 1335 parentlock=parentlock)
1334 1336 except error.LockHeld as inst:
1335 1337 if not wait:
1336 1338 raise
1337 1339 # show more details for new-style locks
1338 1340 if ':' in inst.locker:
1339 1341 host, pid = inst.locker.split(":", 1)
1340 1342 self.ui.warn(
1341 1343 _("waiting for lock on %s held by process %r "
1342 1344 "on host %r\n") % (desc, pid, host))
1343 1345 else:
1344 1346 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1345 1347 (desc, inst.locker))
1346 1348 # default to 600 seconds timeout
1347 1349 l = lockmod.lock(vfs, lockname,
1348 1350 int(self.ui.config("ui", "timeout", "600")),
1349 1351 releasefn=releasefn, acquirefn=acquirefn,
1350 1352 desc=desc)
1351 1353 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1352 1354 return l
1353 1355
1354 1356 def _afterlock(self, callback):
1355 1357 """add a callback to be run when the repository is fully unlocked
1356 1358
1357 1359 The callback will be executed when the outermost lock is released
1358 1360 (with wlock being higher level than 'lock')."""
1359 1361 for ref in (self._wlockref, self._lockref):
1360 1362 l = ref and ref()
1361 1363 if l and l.held:
1362 1364 l.postrelease.append(callback)
1363 1365 break
1364 1366 else: # no lock have been found.
1365 1367 callback()
1366 1368
1367 1369 def lock(self, wait=True):
1368 1370 '''Lock the repository store (.hg/store) and return a weak reference
1369 1371 to the lock. Use this before modifying the store (e.g. committing or
1370 1372 stripping). If you are opening a transaction, get a lock as well.)
1371 1373
1372 1374 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1373 1375 'wlock' first to avoid a dead-lock hazard.'''
1374 1376 l = self._currentlock(self._lockref)
1375 1377 if l is not None:
1376 1378 l.lock()
1377 1379 return l
1378 1380
1379 1381 l = self._lock(self.svfs, "lock", wait, None,
1380 1382 self.invalidate, _('repository %s') % self.origroot)
1381 1383 self._lockref = weakref.ref(l)
1382 1384 return l
1383 1385
1384 1386 def _wlockchecktransaction(self):
1385 1387 if self.currenttransaction() is not None:
1386 1388 raise error.LockInheritanceContractViolation(
1387 1389 'wlock cannot be inherited in the middle of a transaction')
1388 1390
1389 1391 def wlock(self, wait=True):
1390 1392 '''Lock the non-store parts of the repository (everything under
1391 1393 .hg except .hg/store) and return a weak reference to the lock.
1392 1394
1393 1395 Use this before modifying files in .hg.
1394 1396
1395 1397 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1396 1398 'wlock' first to avoid a dead-lock hazard.'''
1397 1399 l = self._wlockref and self._wlockref()
1398 1400 if l is not None and l.held:
1399 1401 l.lock()
1400 1402 return l
1401 1403
1402 1404 # We do not need to check for non-waiting lock acquisition. Such
1403 1405 # acquisition would not cause dead-lock as they would just fail.
1404 1406 if wait and (self.ui.configbool('devel', 'all-warnings')
1405 1407 or self.ui.configbool('devel', 'check-locks')):
1406 1408 if self._currentlock(self._lockref) is not None:
1407 1409 self.ui.develwarn('"wlock" acquired after "lock"')
1408 1410
1409 1411 def unlock():
1410 1412 if self.dirstate.pendingparentchange():
1411 1413 self.dirstate.invalidate()
1412 1414 else:
1413 1415 self.dirstate.write(None)
1414 1416
1415 1417 self._filecache['dirstate'].refresh()
1416 1418
1417 1419 l = self._lock(self.vfs, "wlock", wait, unlock,
1418 1420 self.invalidatedirstate, _('working directory of %s') %
1419 1421 self.origroot,
1420 1422 inheritchecker=self._wlockchecktransaction,
1421 1423 parentenvvar='HG_WLOCK_LOCKER')
1422 1424 self._wlockref = weakref.ref(l)
1423 1425 return l
1424 1426
1425 1427 def _currentlock(self, lockref):
1426 1428 """Returns the lock if it's held, or None if it's not."""
1427 1429 if lockref is None:
1428 1430 return None
1429 1431 l = lockref()
1430 1432 if l is None or not l.held:
1431 1433 return None
1432 1434 return l
1433 1435
1434 1436 def currentwlock(self):
1435 1437 """Returns the wlock if it's held, or None if it's not."""
1436 1438 return self._currentlock(self._wlockref)
1437 1439
1438 1440 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1439 1441 """
1440 1442 commit an individual file as part of a larger transaction
1441 1443 """
1442 1444
1443 1445 fname = fctx.path()
1444 1446 fparent1 = manifest1.get(fname, nullid)
1445 1447 fparent2 = manifest2.get(fname, nullid)
1446 1448 if isinstance(fctx, context.filectx):
1447 1449 node = fctx.filenode()
1448 1450 if node in [fparent1, fparent2]:
1449 1451 self.ui.debug('reusing %s filelog entry\n' % fname)
1450 1452 if manifest1.flags(fname) != fctx.flags():
1451 1453 changelist.append(fname)
1452 1454 return node
1453 1455
1454 1456 flog = self.file(fname)
1455 1457 meta = {}
1456 1458 copy = fctx.renamed()
1457 1459 if copy and copy[0] != fname:
1458 1460 # Mark the new revision of this file as a copy of another
1459 1461 # file. This copy data will effectively act as a parent
1460 1462 # of this new revision. If this is a merge, the first
1461 1463 # parent will be the nullid (meaning "look up the copy data")
1462 1464 # and the second one will be the other parent. For example:
1463 1465 #
1464 1466 # 0 --- 1 --- 3 rev1 changes file foo
1465 1467 # \ / rev2 renames foo to bar and changes it
1466 1468 # \- 2 -/ rev3 should have bar with all changes and
1467 1469 # should record that bar descends from
1468 1470 # bar in rev2 and foo in rev1
1469 1471 #
1470 1472 # this allows this merge to succeed:
1471 1473 #
1472 1474 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1473 1475 # \ / merging rev3 and rev4 should use bar@rev2
1474 1476 # \- 2 --- 4 as the merge base
1475 1477 #
1476 1478
1477 1479 cfname = copy[0]
1478 1480 crev = manifest1.get(cfname)
1479 1481 newfparent = fparent2
1480 1482
1481 1483 if manifest2: # branch merge
1482 1484 if fparent2 == nullid or crev is None: # copied on remote side
1483 1485 if cfname in manifest2:
1484 1486 crev = manifest2[cfname]
1485 1487 newfparent = fparent1
1486 1488
1487 1489 # Here, we used to search backwards through history to try to find
1488 1490 # where the file copy came from if the source of a copy was not in
1489 1491 # the parent directory. However, this doesn't actually make sense to
1490 1492 # do (what does a copy from something not in your working copy even
1491 1493 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1492 1494 # the user that copy information was dropped, so if they didn't
1493 1495 # expect this outcome it can be fixed, but this is the correct
1494 1496 # behavior in this circumstance.
1495 1497
1496 1498 if crev:
1497 1499 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1498 1500 meta["copy"] = cfname
1499 1501 meta["copyrev"] = hex(crev)
1500 1502 fparent1, fparent2 = nullid, newfparent
1501 1503 else:
1502 1504 self.ui.warn(_("warning: can't find ancestor for '%s' "
1503 1505 "copied from '%s'!\n") % (fname, cfname))
1504 1506
1505 1507 elif fparent1 == nullid:
1506 1508 fparent1, fparent2 = fparent2, nullid
1507 1509 elif fparent2 != nullid:
1508 1510 # is one parent an ancestor of the other?
1509 1511 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1510 1512 if fparent1 in fparentancestors:
1511 1513 fparent1, fparent2 = fparent2, nullid
1512 1514 elif fparent2 in fparentancestors:
1513 1515 fparent2 = nullid
1514 1516
1515 1517 # is the file changed?
1516 1518 text = fctx.data()
1517 1519 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1518 1520 changelist.append(fname)
1519 1521 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1520 1522 # are just the flags changed during merge?
1521 1523 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1522 1524 changelist.append(fname)
1523 1525
1524 1526 return fparent1
1525 1527
1526 1528 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1527 1529 """check for commit arguments that aren't committable"""
1528 1530 if match.isexact() or match.prefix():
1529 1531 matched = set(status.modified + status.added + status.removed)
1530 1532
1531 1533 for f in match.files():
1532 1534 f = self.dirstate.normalize(f)
1533 1535 if f == '.' or f in matched or f in wctx.substate:
1534 1536 continue
1535 1537 if f in status.deleted:
1536 1538 fail(f, _('file not found!'))
1537 1539 if f in vdirs: # visited directory
1538 1540 d = f + '/'
1539 1541 for mf in matched:
1540 1542 if mf.startswith(d):
1541 1543 break
1542 1544 else:
1543 1545 fail(f, _("no match under directory!"))
1544 1546 elif f not in self.dirstate:
1545 1547 fail(f, _("file not tracked!"))
1546 1548
1547 1549 @unfilteredmethod
1548 1550 def commit(self, text="", user=None, date=None, match=None, force=False,
1549 1551 editor=False, extra=None):
1550 1552 """Add a new revision to current repository.
1551 1553
1552 1554 Revision information is gathered from the working directory,
1553 1555 match can be used to filter the committed files. If editor is
1554 1556 supplied, it is called to get a commit message.
1555 1557 """
1556 1558 if extra is None:
1557 1559 extra = {}
1558 1560
1559 1561 def fail(f, msg):
1560 1562 raise error.Abort('%s: %s' % (f, msg))
1561 1563
1562 1564 if not match:
1563 1565 match = matchmod.always(self.root, '')
1564 1566
1565 1567 if not force:
1566 1568 vdirs = []
1567 1569 match.explicitdir = vdirs.append
1568 1570 match.bad = fail
1569 1571
1570 1572 wlock = lock = tr = None
1571 1573 try:
1572 1574 wlock = self.wlock()
1573 1575 lock = self.lock() # for recent changelog (see issue4368)
1574 1576
1575 1577 wctx = self[None]
1576 1578 merge = len(wctx.parents()) > 1
1577 1579
1578 1580 if not force and merge and match.ispartial():
1579 1581 raise error.Abort(_('cannot partially commit a merge '
1580 1582 '(do not specify files or patterns)'))
1581 1583
1582 1584 status = self.status(match=match, clean=force)
1583 1585 if force:
1584 1586 status.modified.extend(status.clean) # mq may commit clean files
1585 1587
1586 1588 # check subrepos
1587 1589 subs = []
1588 1590 commitsubs = set()
1589 1591 newstate = wctx.substate.copy()
1590 1592 # only manage subrepos and .hgsubstate if .hgsub is present
1591 1593 if '.hgsub' in wctx:
1592 1594 # we'll decide whether to track this ourselves, thanks
1593 1595 for c in status.modified, status.added, status.removed:
1594 1596 if '.hgsubstate' in c:
1595 1597 c.remove('.hgsubstate')
1596 1598
1597 1599 # compare current state to last committed state
1598 1600 # build new substate based on last committed state
1599 1601 oldstate = wctx.p1().substate
1600 1602 for s in sorted(newstate.keys()):
1601 1603 if not match(s):
1602 1604 # ignore working copy, use old state if present
1603 1605 if s in oldstate:
1604 1606 newstate[s] = oldstate[s]
1605 1607 continue
1606 1608 if not force:
1607 1609 raise error.Abort(
1608 1610 _("commit with new subrepo %s excluded") % s)
1609 1611 dirtyreason = wctx.sub(s).dirtyreason(True)
1610 1612 if dirtyreason:
1611 1613 if not self.ui.configbool('ui', 'commitsubrepos'):
1612 1614 raise error.Abort(dirtyreason,
1613 1615 hint=_("use --subrepos for recursive commit"))
1614 1616 subs.append(s)
1615 1617 commitsubs.add(s)
1616 1618 else:
1617 1619 bs = wctx.sub(s).basestate()
1618 1620 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1619 1621 if oldstate.get(s, (None, None, None))[1] != bs:
1620 1622 subs.append(s)
1621 1623
1622 1624 # check for removed subrepos
1623 1625 for p in wctx.parents():
1624 1626 r = [s for s in p.substate if s not in newstate]
1625 1627 subs += [s for s in r if match(s)]
1626 1628 if subs:
1627 1629 if (not match('.hgsub') and
1628 1630 '.hgsub' in (wctx.modified() + wctx.added())):
1629 1631 raise error.Abort(
1630 1632 _("can't commit subrepos without .hgsub"))
1631 1633 status.modified.insert(0, '.hgsubstate')
1632 1634
1633 1635 elif '.hgsub' in status.removed:
1634 1636 # clean up .hgsubstate when .hgsub is removed
1635 1637 if ('.hgsubstate' in wctx and
1636 1638 '.hgsubstate' not in (status.modified + status.added +
1637 1639 status.removed)):
1638 1640 status.removed.insert(0, '.hgsubstate')
1639 1641
1640 1642 # make sure all explicit patterns are matched
1641 1643 if not force:
1642 1644 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1643 1645
1644 1646 cctx = context.workingcommitctx(self, status,
1645 1647 text, user, date, extra)
1646 1648
1647 1649 # internal config: ui.allowemptycommit
1648 1650 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1649 1651 or extra.get('close') or merge or cctx.files()
1650 1652 or self.ui.configbool('ui', 'allowemptycommit'))
1651 1653 if not allowemptycommit:
1652 1654 return None
1653 1655
1654 1656 if merge and cctx.deleted():
1655 1657 raise error.Abort(_("cannot commit merge with missing files"))
1656 1658
1657 1659 ms = mergemod.mergestate.read(self)
1658 1660 mergeutil.checkunresolved(ms)
1659 1661
1660 1662 if editor:
1661 1663 cctx._text = editor(self, cctx, subs)
1662 1664 edited = (text != cctx._text)
1663 1665
1664 1666 # Save commit message in case this transaction gets rolled back
1665 1667 # (e.g. by a pretxncommit hook). Leave the content alone on
1666 1668 # the assumption that the user will use the same editor again.
1667 1669 msgfn = self.savecommitmessage(cctx._text)
1668 1670
1669 1671 # commit subs and write new state
1670 1672 if subs:
1671 1673 for s in sorted(commitsubs):
1672 1674 sub = wctx.sub(s)
1673 1675 self.ui.status(_('committing subrepository %s\n') %
1674 1676 subrepo.subrelpath(sub))
1675 1677 sr = sub.commit(cctx._text, user, date)
1676 1678 newstate[s] = (newstate[s][0], sr)
1677 1679 subrepo.writestate(self, newstate)
1678 1680
1679 1681 p1, p2 = self.dirstate.parents()
1680 1682 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1681 1683 try:
1682 1684 self.hook("precommit", throw=True, parent1=hookp1,
1683 1685 parent2=hookp2)
1684 1686 tr = self.transaction('commit')
1685 1687 ret = self.commitctx(cctx, True)
1686 1688 except: # re-raises
1687 1689 if edited:
1688 1690 self.ui.write(
1689 1691 _('note: commit message saved in %s\n') % msgfn)
1690 1692 raise
1691 1693 # update bookmarks, dirstate and mergestate
1692 1694 bookmarks.update(self, [p1, p2], ret)
1693 1695 cctx.markcommitted(ret)
1694 1696 ms.reset()
1695 1697 tr.close()
1696 1698
1697 1699 finally:
1698 1700 lockmod.release(tr, lock, wlock)
1699 1701
1700 1702 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1701 1703 # hack for command that use a temporary commit (eg: histedit)
1702 1704 # temporary commit got stripped before hook release
1703 1705 if self.changelog.hasnode(ret):
1704 1706 self.hook("commit", node=node, parent1=parent1,
1705 1707 parent2=parent2)
1706 1708 self._afterlock(commithook)
1707 1709 return ret
1708 1710
1709 1711 @unfilteredmethod
1710 1712 def commitctx(self, ctx, error=False):
1711 1713 """Add a new revision to current repository.
1712 1714 Revision information is passed via the context argument.
1713 1715 """
1714 1716
1715 1717 tr = None
1716 1718 p1, p2 = ctx.p1(), ctx.p2()
1717 1719 user = ctx.user()
1718 1720
1719 1721 lock = self.lock()
1720 1722 try:
1721 1723 tr = self.transaction("commit")
1722 1724 trp = weakref.proxy(tr)
1723 1725
1724 1726 if ctx.manifestnode():
1725 1727 # reuse an existing manifest revision
1726 1728 mn = ctx.manifestnode()
1727 1729 files = ctx.files()
1728 1730 elif ctx.files():
1729 1731 m1ctx = p1.manifestctx()
1730 1732 m2ctx = p2.manifestctx()
1731 1733 mctx = m1ctx.copy()
1732 1734
1733 1735 m = mctx.read()
1734 1736 m1 = m1ctx.read()
1735 1737 m2 = m2ctx.read()
1736 1738
1737 1739 # check in files
1738 1740 added = []
1739 1741 changed = []
1740 1742 removed = list(ctx.removed())
1741 1743 linkrev = len(self)
1742 1744 self.ui.note(_("committing files:\n"))
1743 1745 for f in sorted(ctx.modified() + ctx.added()):
1744 1746 self.ui.note(f + "\n")
1745 1747 try:
1746 1748 fctx = ctx[f]
1747 1749 if fctx is None:
1748 1750 removed.append(f)
1749 1751 else:
1750 1752 added.append(f)
1751 1753 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1752 1754 trp, changed)
1753 1755 m.setflag(f, fctx.flags())
1754 1756 except OSError as inst:
1755 1757 self.ui.warn(_("trouble committing %s!\n") % f)
1756 1758 raise
1757 1759 except IOError as inst:
1758 1760 errcode = getattr(inst, 'errno', errno.ENOENT)
1759 1761 if error or errcode and errcode != errno.ENOENT:
1760 1762 self.ui.warn(_("trouble committing %s!\n") % f)
1761 1763 raise
1762 1764
1763 1765 # update manifest
1764 1766 self.ui.note(_("committing manifest\n"))
1765 1767 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1766 1768 drop = [f for f in removed if f in m]
1767 1769 for f in drop:
1768 1770 del m[f]
1769 1771 mn = mctx.write(trp, linkrev,
1770 1772 p1.manifestnode(), p2.manifestnode(),
1771 1773 added, drop)
1772 1774 files = changed + removed
1773 1775 else:
1774 1776 mn = p1.manifestnode()
1775 1777 files = []
1776 1778
1777 1779 # update changelog
1778 1780 self.ui.note(_("committing changelog\n"))
1779 1781 self.changelog.delayupdate(tr)
1780 1782 n = self.changelog.add(mn, files, ctx.description(),
1781 1783 trp, p1.node(), p2.node(),
1782 1784 user, ctx.date(), ctx.extra().copy())
1783 1785 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1784 1786 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1785 1787 parent2=xp2)
1786 1788 # set the new commit is proper phase
1787 1789 targetphase = subrepo.newcommitphase(self.ui, ctx)
1788 1790 if targetphase:
1789 1791 # retract boundary do not alter parent changeset.
1790 1792 # if a parent have higher the resulting phase will
1791 1793 # be compliant anyway
1792 1794 #
1793 1795 # if minimal phase was 0 we don't need to retract anything
1794 1796 phases.retractboundary(self, tr, targetphase, [n])
1795 1797 tr.close()
1796 1798 branchmap.updatecache(self.filtered('served'))
1797 1799 return n
1798 1800 finally:
1799 1801 if tr:
1800 1802 tr.release()
1801 1803 lock.release()
1802 1804
1803 1805 @unfilteredmethod
1804 1806 def destroying(self):
1805 1807 '''Inform the repository that nodes are about to be destroyed.
1806 1808 Intended for use by strip and rollback, so there's a common
1807 1809 place for anything that has to be done before destroying history.
1808 1810
1809 1811 This is mostly useful for saving state that is in memory and waiting
1810 1812 to be flushed when the current lock is released. Because a call to
1811 1813 destroyed is imminent, the repo will be invalidated causing those
1812 1814 changes to stay in memory (waiting for the next unlock), or vanish
1813 1815 completely.
1814 1816 '''
1815 1817 # When using the same lock to commit and strip, the phasecache is left
1816 1818 # dirty after committing. Then when we strip, the repo is invalidated,
1817 1819 # causing those changes to disappear.
1818 1820 if '_phasecache' in vars(self):
1819 1821 self._phasecache.write()
1820 1822
1821 1823 @unfilteredmethod
1822 1824 def destroyed(self):
1823 1825 '''Inform the repository that nodes have been destroyed.
1824 1826 Intended for use by strip and rollback, so there's a common
1825 1827 place for anything that has to be done after destroying history.
1826 1828 '''
1827 1829 # When one tries to:
1828 1830 # 1) destroy nodes thus calling this method (e.g. strip)
1829 1831 # 2) use phasecache somewhere (e.g. commit)
1830 1832 #
1831 1833 # then 2) will fail because the phasecache contains nodes that were
1832 1834 # removed. We can either remove phasecache from the filecache,
1833 1835 # causing it to reload next time it is accessed, or simply filter
1834 1836 # the removed nodes now and write the updated cache.
1835 1837 self._phasecache.filterunknown(self)
1836 1838 self._phasecache.write()
1837 1839
1838 1840 # update the 'served' branch cache to help read only server process
1839 1841 # Thanks to branchcache collaboration this is done from the nearest
1840 1842 # filtered subset and it is expected to be fast.
1841 1843 branchmap.updatecache(self.filtered('served'))
1842 1844
1843 1845 # Ensure the persistent tag cache is updated. Doing it now
1844 1846 # means that the tag cache only has to worry about destroyed
1845 1847 # heads immediately after a strip/rollback. That in turn
1846 1848 # guarantees that "cachetip == currenttip" (comparing both rev
1847 1849 # and node) always means no nodes have been added or destroyed.
1848 1850
1849 1851 # XXX this is suboptimal when qrefresh'ing: we strip the current
1850 1852 # head, refresh the tag cache, then immediately add a new head.
1851 1853 # But I think doing it this way is necessary for the "instant
1852 1854 # tag cache retrieval" case to work.
1853 1855 self.invalidate()
1854 1856
1855 1857 def walk(self, match, node=None):
1856 1858 '''
1857 1859 walk recursively through the directory tree or a given
1858 1860 changeset, finding all files matched by the match
1859 1861 function
1860 1862 '''
1861 1863 return self[node].walk(match)
1862 1864
1863 1865 def status(self, node1='.', node2=None, match=None,
1864 1866 ignored=False, clean=False, unknown=False,
1865 1867 listsubrepos=False):
1866 1868 '''a convenience method that calls node1.status(node2)'''
1867 1869 return self[node1].status(node2, match, ignored, clean, unknown,
1868 1870 listsubrepos)
1869 1871
1870 1872 def heads(self, start=None):
1871 1873 if start is None:
1872 1874 cl = self.changelog
1873 1875 headrevs = reversed(cl.headrevs())
1874 1876 return [cl.node(rev) for rev in headrevs]
1875 1877
1876 1878 heads = self.changelog.heads(start)
1877 1879 # sort the output in rev descending order
1878 1880 return sorted(heads, key=self.changelog.rev, reverse=True)
1879 1881
1880 1882 def branchheads(self, branch=None, start=None, closed=False):
1881 1883 '''return a (possibly filtered) list of heads for the given branch
1882 1884
1883 1885 Heads are returned in topological order, from newest to oldest.
1884 1886 If branch is None, use the dirstate branch.
1885 1887 If start is not None, return only heads reachable from start.
1886 1888 If closed is True, return heads that are marked as closed as well.
1887 1889 '''
1888 1890 if branch is None:
1889 1891 branch = self[None].branch()
1890 1892 branches = self.branchmap()
1891 1893 if branch not in branches:
1892 1894 return []
1893 1895 # the cache returns heads ordered lowest to highest
1894 1896 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1895 1897 if start is not None:
1896 1898 # filter out the heads that cannot be reached from startrev
1897 1899 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1898 1900 bheads = [h for h in bheads if h in fbheads]
1899 1901 return bheads
1900 1902
1901 1903 def branches(self, nodes):
1902 1904 if not nodes:
1903 1905 nodes = [self.changelog.tip()]
1904 1906 b = []
1905 1907 for n in nodes:
1906 1908 t = n
1907 1909 while True:
1908 1910 p = self.changelog.parents(n)
1909 1911 if p[1] != nullid or p[0] == nullid:
1910 1912 b.append((t, n, p[0], p[1]))
1911 1913 break
1912 1914 n = p[0]
1913 1915 return b
1914 1916
1915 1917 def between(self, pairs):
1916 1918 r = []
1917 1919
1918 1920 for top, bottom in pairs:
1919 1921 n, l, i = top, [], 0
1920 1922 f = 1
1921 1923
1922 1924 while n != bottom and n != nullid:
1923 1925 p = self.changelog.parents(n)[0]
1924 1926 if i == f:
1925 1927 l.append(n)
1926 1928 f = f * 2
1927 1929 n = p
1928 1930 i += 1
1929 1931
1930 1932 r.append(l)
1931 1933
1932 1934 return r
1933 1935
1934 1936 def checkpush(self, pushop):
1935 1937 """Extensions can override this function if additional checks have
1936 1938 to be performed before pushing, or call it if they override push
1937 1939 command.
1938 1940 """
1939 1941 pass
1940 1942
1941 1943 @unfilteredpropertycache
1942 1944 def prepushoutgoinghooks(self):
1943 1945 """Return util.hooks consists of a pushop with repo, remote, outgoing
1944 1946 methods, which are called before pushing changesets.
1945 1947 """
1946 1948 return util.hooks()
1947 1949
1948 1950 def pushkey(self, namespace, key, old, new):
1949 1951 try:
1950 1952 tr = self.currenttransaction()
1951 1953 hookargs = {}
1952 1954 if tr is not None:
1953 1955 hookargs.update(tr.hookargs)
1954 1956 hookargs['namespace'] = namespace
1955 1957 hookargs['key'] = key
1956 1958 hookargs['old'] = old
1957 1959 hookargs['new'] = new
1958 1960 self.hook('prepushkey', throw=True, **hookargs)
1959 1961 except error.HookAbort as exc:
1960 1962 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1961 1963 if exc.hint:
1962 1964 self.ui.write_err(_("(%s)\n") % exc.hint)
1963 1965 return False
1964 1966 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1965 1967 ret = pushkey.push(self, namespace, key, old, new)
1966 1968 def runhook():
1967 1969 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1968 1970 ret=ret)
1969 1971 self._afterlock(runhook)
1970 1972 return ret
1971 1973
1972 1974 def listkeys(self, namespace):
1973 1975 self.hook('prelistkeys', throw=True, namespace=namespace)
1974 1976 self.ui.debug('listing keys for "%s"\n' % namespace)
1975 1977 values = pushkey.list(self, namespace)
1976 1978 self.hook('listkeys', namespace=namespace, values=values)
1977 1979 return values
1978 1980
1979 1981 def debugwireargs(self, one, two, three=None, four=None, five=None):
1980 1982 '''used to test argument passing over the wire'''
1981 1983 return "%s %s %s %s %s" % (one, two, three, four, five)
1982 1984
1983 1985 def savecommitmessage(self, text):
1984 1986 fp = self.vfs('last-message.txt', 'wb')
1985 1987 try:
1986 1988 fp.write(text)
1987 1989 finally:
1988 1990 fp.close()
1989 1991 return self.pathto(fp.name[len(self.root) + 1:])
1990 1992
1991 1993 # used to avoid circular references so destructors work
1992 1994 def aftertrans(files):
1993 1995 renamefiles = [tuple(t) for t in files]
1994 1996 def a():
1995 1997 for vfs, src, dest in renamefiles:
1996 1998 try:
1997 1999 vfs.rename(src, dest)
1998 2000 except OSError: # journal file does not yet exist
1999 2001 pass
2000 2002 return a
2001 2003
2002 2004 def undoname(fn):
2003 2005 base, name = os.path.split(fn)
2004 2006 assert name.startswith('journal')
2005 2007 return os.path.join(base, name.replace('journal', 'undo', 1))
2006 2008
2007 2009 def instance(ui, path, create):
2008 2010 return localrepository(ui, util.urllocalpath(path), create)
2009 2011
2010 2012 def islocal(path):
2011 2013 return True
2012 2014
2013 2015 def newreporequirements(repo):
2014 2016 """Determine the set of requirements for a new local repository.
2015 2017
2016 2018 Extensions can wrap this function to specify custom requirements for
2017 2019 new repositories.
2018 2020 """
2019 2021 ui = repo.ui
2020 2022 requirements = set(['revlogv1'])
2021 2023 if ui.configbool('format', 'usestore', True):
2022 2024 requirements.add('store')
2023 2025 if ui.configbool('format', 'usefncache', True):
2024 2026 requirements.add('fncache')
2025 2027 if ui.configbool('format', 'dotencode', True):
2026 2028 requirements.add('dotencode')
2027 2029
2028 2030 compengine = ui.config('experimental', 'format.compression', 'zlib')
2029 2031 if compengine not in util.compengines:
2030 2032 raise error.Abort(_('compression engine %s defined by '
2031 2033 'experimental.format.compression not available') %
2032 2034 compengine,
2033 2035 hint=_('run "hg debuginstall" to list available '
2034 2036 'compression engines'))
2035 2037
2036 2038 # zlib is the historical default and doesn't need an explicit requirement.
2037 2039 if compengine != 'zlib':
2038 2040 requirements.add('exp-compression-%s' % compengine)
2039 2041
2040 2042 if scmutil.gdinitconfig(ui):
2041 2043 requirements.add('generaldelta')
2042 2044 if ui.configbool('experimental', 'treemanifest', False):
2043 2045 requirements.add('treemanifest')
2044 2046 if ui.configbool('experimental', 'manifestv2', False):
2045 2047 requirements.add('manifestv2')
2046 2048
2047 2049 return requirements
@@ -1,388 +1,389 b''
1 1 $ cat <<EOF >> $HGRCPATH
2 2 > [ui]
3 3 > color = always
4 4 > [color]
5 5 > mode = ansi
6 6 > EOF
7 7 Terminfo codes compatibility fix
8 8 $ echo "color.none=0" >> $HGRCPATH
9 9
10 10 $ hg init repo1
11 11 $ cd repo1
12 12 $ mkdir a b a/1 b/1 b/2
13 13 $ touch in_root a/in_a b/in_b a/1/in_a_1 b/1/in_b_1 b/2/in_b_2
14 14
15 15 hg status in repo root:
16 16
17 17 $ hg status
18 18 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
19 19 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
20 20 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
21 21 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
22 22 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
23 23 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
24 24
25 25 $ hg status --color=debug
26 26 [status.unknown|? ][status.unknown|a/1/in_a_1]
27 27 [status.unknown|? ][status.unknown|a/in_a]
28 28 [status.unknown|? ][status.unknown|b/1/in_b_1]
29 29 [status.unknown|? ][status.unknown|b/2/in_b_2]
30 30 [status.unknown|? ][status.unknown|b/in_b]
31 31 [status.unknown|? ][status.unknown|in_root]
32 32
33 33 hg status with template
34 34 $ hg status -T "{label('red', path)}\n" --color=debug
35 35 [red|a/1/in_a_1]
36 36 [red|a/in_a]
37 37 [red|b/1/in_b_1]
38 38 [red|b/2/in_b_2]
39 39 [red|b/in_b]
40 40 [red|in_root]
41 41
42 42 hg status . in repo root:
43 43
44 44 $ hg status .
45 45 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
46 46 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
47 47 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
48 48 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
49 49 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
50 50 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
51 51
52 52 $ hg status --cwd a
53 53 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
54 54 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
55 55 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
56 56 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
57 57 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
58 58 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
59 59 $ hg status --cwd a .
60 60 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc)
61 61 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc)
62 62 $ hg status --cwd a ..
63 63 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc)
64 64 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc)
65 65 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/1/in_b_1\x1b[0m (esc)
66 66 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/2/in_b_2\x1b[0m (esc)
67 67 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/in_b\x1b[0m (esc)
68 68 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc)
69 69
70 70 $ hg status --cwd b
71 71 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
72 72 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
73 73 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
74 74 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
75 75 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
76 76 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
77 77 $ hg status --cwd b .
78 78 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc)
79 79 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m2/in_b_2\x1b[0m (esc)
80 80 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc)
81 81 $ hg status --cwd b ..
82 82 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/1/in_a_1\x1b[0m (esc)
83 83 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/in_a\x1b[0m (esc)
84 84 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc)
85 85 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m2/in_b_2\x1b[0m (esc)
86 86 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc)
87 87 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc)
88 88
89 89 $ hg status --cwd a/1
90 90 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
91 91 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
92 92 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
93 93 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
94 94 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
95 95 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
96 96 $ hg status --cwd a/1 .
97 97 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc)
98 98 $ hg status --cwd a/1 ..
99 99 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc)
100 100 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_a\x1b[0m (esc)
101 101
102 102 $ hg status --cwd b/1
103 103 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
104 104 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
105 105 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
106 106 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
107 107 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
108 108 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
109 109 $ hg status --cwd b/1 .
110 110 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc)
111 111 $ hg status --cwd b/1 ..
112 112 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc)
113 113 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../2/in_b_2\x1b[0m (esc)
114 114 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc)
115 115
116 116 $ hg status --cwd b/2
117 117 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
118 118 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
119 119 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
120 120 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
121 121 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
122 122 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
123 123 $ hg status --cwd b/2 .
124 124 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc)
125 125 $ hg status --cwd b/2 ..
126 126 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../1/in_b_1\x1b[0m (esc)
127 127 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc)
128 128 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc)
129 129
130 130 Make sure --color=never works
131 131 $ hg status --color=never
132 132 ? a/1/in_a_1
133 133 ? a/in_a
134 134 ? b/1/in_b_1
135 135 ? b/2/in_b_2
136 136 ? b/in_b
137 137 ? in_root
138 138
139 139 Make sure ui.formatted=False works
140 140 $ hg status --color=auto --config ui.formatted=False
141 141 ? a/1/in_a_1
142 142 ? a/in_a
143 143 ? b/1/in_b_1
144 144 ? b/2/in_b_2
145 145 ? b/in_b
146 146 ? in_root
147 147
148 148 $ cd ..
149 149
150 150 $ hg init repo2
151 151 $ cd repo2
152 152 $ touch modified removed deleted ignored
153 153 $ echo "^ignored$" > .hgignore
154 154 $ hg ci -A -m 'initial checkin'
155 155 adding .hgignore
156 156 adding deleted
157 157 adding modified
158 158 adding removed
159 159 $ hg log --color=debug
160 160 [log.changeset changeset.draft|changeset: 0:389aef86a55e]
161 161 [log.tag|tag: tip]
162 162 [log.user|user: test]
163 163 [log.date|date: Thu Jan 01 00:00:00 1970 +0000]
164 164 [log.summary|summary: initial checkin]
165 165
166 166 $ hg log -Tcompact --color=debug
167 167 [log.changeset changeset.draft|0][tip] [log.node|389aef86a55e] [log.date|1970-01-01 00:00 +0000] [log.user|test]
168 168 [ui.note log.description|initial checkin]
169 169
170 170 Labels on empty strings should not be displayed, labels on custom
171 171 templates should be.
172 172
173 173 $ hg log --color=debug -T '{label("my.label",author)}\n{label("skipped.label","")}'
174 174 [my.label|test]
175 175 $ touch modified added unknown ignored
176 176 $ hg add added
177 177 $ hg remove removed
178 178 $ rm deleted
179 179
180 180 hg status:
181 181
182 182 $ hg status
183 183 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
184 184 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
185 185 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
186 186 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
187 187
188 188 hg status modified added removed deleted unknown never-existed ignored:
189 189
190 190 $ hg status modified added removed deleted unknown never-existed ignored
191 191 never-existed: * (glob)
192 192 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
193 193 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
194 194 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
195 195 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
196 196
197 197 $ hg copy modified copied
198 198
199 199 hg status -C:
200 200
201 201 $ hg status -C
202 202 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
203 203 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
204 204 \x1b[0;0m modified\x1b[0m (esc)
205 205 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
206 206 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
207 207 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
208 208
209 209 hg status -A:
210 210
211 211 $ hg status -A
212 212 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
213 213 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
214 214 \x1b[0;0m modified\x1b[0m (esc)
215 215 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
216 216 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
217 217 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
218 218 \x1b[0;30;1mI \x1b[0m\x1b[0;30;1mignored\x1b[0m (esc)
219 219 \x1b[0;0mC \x1b[0m\x1b[0;0m.hgignore\x1b[0m (esc)
220 220 \x1b[0;0mC \x1b[0m\x1b[0;0mmodified\x1b[0m (esc)
221 221
222 222
223 223 hg status -A (with terminfo color):
224 224
225 225 #if tic
226 226
227 227 $ mkdir "$TESTTMP/terminfo"
228 228 $ TERMINFO="$TESTTMP/terminfo" tic "$TESTDIR/hgterm.ti"
229 229 $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo -A
230 230 \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1madded\x1b[30m (esc)
231 231 \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1mcopied\x1b[30m (esc)
232 232 \x1b[30m\x1b[30m modified\x1b[30m (esc)
233 233 \x1b[30m\x1b[31m\x1b[1mR \x1b[30m\x1b[30m\x1b[31m\x1b[1mremoved\x1b[30m (esc)
234 234 \x1b[30m\x1b[36m\x1b[1m\x1b[4m! \x1b[30m\x1b[30m\x1b[36m\x1b[1m\x1b[4mdeleted\x1b[30m (esc)
235 235 \x1b[30m\x1b[35m\x1b[1m\x1b[4m? \x1b[30m\x1b[30m\x1b[35m\x1b[1m\x1b[4munknown\x1b[30m (esc)
236 236 \x1b[30m\x1b[30m\x1b[1mI \x1b[30m\x1b[30m\x1b[30m\x1b[1mignored\x1b[30m (esc)
237 237 \x1b[30m\x1b[30mC \x1b[30m\x1b[30m\x1b[30m.hgignore\x1b[30m (esc)
238 238 \x1b[30m\x1b[30mC \x1b[30m\x1b[30m\x1b[30mmodified\x1b[30m (esc)
239 239
240 240 The user can define effects with raw terminfo codes:
241 241
242 242 $ cat <<EOF >> $HGRCPATH
243 243 > # Completely bogus code for dim
244 244 > terminfo.dim = \E[88m
245 245 > # We can override what's in the terminfo database, too
246 246 > terminfo.bold = \E[2m
247 247 > EOF
248 248 $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --config color.status.clean=dim -A
249 249 \x1b[30m\x1b[32m\x1b[2mA \x1b[30m\x1b[30m\x1b[32m\x1b[2madded\x1b[30m (esc)
250 250 \x1b[30m\x1b[32m\x1b[2mA \x1b[30m\x1b[30m\x1b[32m\x1b[2mcopied\x1b[30m (esc)
251 251 \x1b[30m\x1b[30m modified\x1b[30m (esc)
252 252 \x1b[30m\x1b[31m\x1b[2mR \x1b[30m\x1b[30m\x1b[31m\x1b[2mremoved\x1b[30m (esc)
253 253 \x1b[30m\x1b[36m\x1b[2m\x1b[4m! \x1b[30m\x1b[30m\x1b[36m\x1b[2m\x1b[4mdeleted\x1b[30m (esc)
254 254 \x1b[30m\x1b[35m\x1b[2m\x1b[4m? \x1b[30m\x1b[30m\x1b[35m\x1b[2m\x1b[4munknown\x1b[30m (esc)
255 255 \x1b[30m\x1b[30m\x1b[2mI \x1b[30m\x1b[30m\x1b[30m\x1b[2mignored\x1b[30m (esc)
256 256 \x1b[30m\x1b[88mC \x1b[30m\x1b[30m\x1b[88m.hgignore\x1b[30m (esc)
257 257 \x1b[30m\x1b[88mC \x1b[30m\x1b[30m\x1b[88mmodified\x1b[30m (esc)
258 258
259 259 #endif
260 260
261 261
262 262 $ echo "^ignoreddir$" > .hgignore
263 263 $ mkdir ignoreddir
264 264 $ touch ignoreddir/file
265 265
266 266 hg status ignoreddir/file:
267 267
268 268 $ hg status ignoreddir/file
269 269
270 270 hg status -i ignoreddir/file:
271 271
272 272 $ hg status -i ignoreddir/file
273 273 \x1b[0;30;1mI \x1b[0m\x1b[0;30;1mignoreddir/file\x1b[0m (esc)
274 274 $ cd ..
275 275
276 276 check 'status -q' and some combinations
277 277
278 278 $ hg init repo3
279 279 $ cd repo3
280 280 $ touch modified removed deleted ignored
281 281 $ echo "^ignored$" > .hgignore
282 282 $ hg commit -A -m 'initial checkin'
283 283 adding .hgignore
284 284 adding deleted
285 285 adding modified
286 286 adding removed
287 287 $ touch added unknown ignored
288 288 $ hg add added
289 289 $ echo "test" >> modified
290 290 $ hg remove removed
291 291 $ rm deleted
292 292 $ hg copy modified copied
293 293
294 294 test unknown color
295 295
296 296 $ hg --config color.status.modified=periwinkle status
297 297 ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
298 298 ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
299 ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
299 300 M modified
300 301 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
301 302 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
302 303 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
303 304 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
304 305 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
305 306
306 307 Run status with 2 different flags.
307 308 Check if result is the same or different.
308 309 If result is not as expected, raise error
309 310
310 311 $ assert() {
311 312 > hg status $1 > ../a
312 313 > hg status $2 > ../b
313 314 > if diff ../a ../b > /dev/null; then
314 315 > out=0
315 316 > else
316 317 > out=1
317 318 > fi
318 319 > if [ $3 -eq 0 ]; then
319 320 > df="same"
320 321 > else
321 322 > df="different"
322 323 > fi
323 324 > if [ $out -ne $3 ]; then
324 325 > echo "Error on $1 and $2, should be $df."
325 326 > fi
326 327 > }
327 328
328 329 assert flag1 flag2 [0-same | 1-different]
329 330
330 331 $ assert "-q" "-mard" 0
331 332 $ assert "-A" "-marduicC" 0
332 333 $ assert "-qA" "-mardcC" 0
333 334 $ assert "-qAui" "-A" 0
334 335 $ assert "-qAu" "-marducC" 0
335 336 $ assert "-qAi" "-mardicC" 0
336 337 $ assert "-qu" "-u" 0
337 338 $ assert "-q" "-u" 1
338 339 $ assert "-m" "-a" 1
339 340 $ assert "-r" "-d" 1
340 341 $ cd ..
341 342
342 343 test 'resolve -l'
343 344
344 345 $ hg init repo4
345 346 $ cd repo4
346 347 $ echo "file a" > a
347 348 $ echo "file b" > b
348 349 $ hg add a b
349 350 $ hg commit -m "initial"
350 351 $ echo "file a change 1" > a
351 352 $ echo "file b change 1" > b
352 353 $ hg commit -m "head 1"
353 354 $ hg update 0
354 355 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
355 356 $ echo "file a change 2" > a
356 357 $ echo "file b change 2" > b
357 358 $ hg commit -m "head 2"
358 359 created new head
359 360 $ hg merge
360 361 merging a
361 362 merging b
362 363 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
363 364 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
364 365 0 files updated, 0 files merged, 0 files removed, 2 files unresolved
365 366 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
366 367 [1]
367 368 $ hg resolve -m b
368 369
369 370 hg resolve with one unresolved, one resolved:
370 371
371 372 $ hg resolve -l
372 373 \x1b[0;31;1mU \x1b[0m\x1b[0;31;1ma\x1b[0m (esc)
373 374 \x1b[0;32;1mR \x1b[0m\x1b[0;32;1mb\x1b[0m (esc)
374 375
375 376 color coding of error message with current availability of curses
376 377
377 378 $ hg unknowncommand > /dev/null
378 379 hg: unknown command 'unknowncommand'
379 380 [255]
380 381
381 382 color coding of error message without curses
382 383
383 384 $ echo 'raise ImportError' > curses.py
384 385 $ PYTHONPATH=`pwd`:$PYTHONPATH hg unknowncommand > /dev/null
385 386 hg: unknown command 'unknowncommand'
386 387 [255]
387 388
388 389 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now