##// END OF EJS Templates
devel-warn: move the develwarn function as a method of the ui object...
Pierre-Yves David -
r25629:52e5f68d default
parent child Browse files
Show More
@@ -1,1943 +1,1943 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect, random
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception, exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced, exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return changegroup.addchangegroup(self._repo, cg, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 196 'manifestv2'))
197 197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 198 'dotencode'))
199 199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return ['revlogv1']
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.requirements = set()
211 211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 212 self.wopener = self.wvfs
213 213 self.root = self.wvfs.base
214 214 self.path = self.wvfs.join(".hg")
215 215 self.origroot = path
216 216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 217 self.vfs = scmutil.vfs(self.path)
218 218 self.opener = self.vfs
219 219 self.baseui = baseui
220 220 self.ui = baseui.copy()
221 221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 222 # A list of callback to shape the phase if no data were found.
223 223 # Callback are in the form: func(repo, roots) --> processed root.
224 224 # This list it to be filled by extension during repo setup
225 225 self._phasedefaults = []
226 226 try:
227 227 self.ui.readconfig(self.join("hgrc"), self.root)
228 228 extensions.loadall(self.ui)
229 229 except IOError:
230 230 pass
231 231
232 232 if self.featuresetupfuncs:
233 233 self.supported = set(self._basesupported) # use private copy
234 234 extmods = set(m.__name__ for n, m
235 235 in extensions.extensions(self.ui))
236 236 for setupfunc in self.featuresetupfuncs:
237 237 if setupfunc.__module__ in extmods:
238 238 setupfunc(self.ui, self.supported)
239 239 else:
240 240 self.supported = self._basesupported
241 241
242 242 if not self.vfs.isdir():
243 243 if create:
244 244 if not self.wvfs.exists():
245 245 self.wvfs.makedirs()
246 246 self.vfs.makedir(notindexed=True)
247 247 self.requirements.update(self._baserequirements(create))
248 248 if self.ui.configbool('format', 'usestore', True):
249 249 self.vfs.mkdir("store")
250 250 self.requirements.add("store")
251 251 if self.ui.configbool('format', 'usefncache', True):
252 252 self.requirements.add("fncache")
253 253 if self.ui.configbool('format', 'dotencode', True):
254 254 self.requirements.add('dotencode')
255 255 # create an invalid changelog
256 256 self.vfs.append(
257 257 "00changelog.i",
258 258 '\0\0\0\2' # represents revlogv2
259 259 ' dummy changelog to prevent using the old repo layout'
260 260 )
261 261 if self.ui.configbool('format', 'generaldelta', False):
262 262 self.requirements.add("generaldelta")
263 263 if self.ui.configbool('experimental', 'treemanifest', False):
264 264 self.requirements.add("treemanifest")
265 265 if self.ui.configbool('experimental', 'manifestv2', False):
266 266 self.requirements.add("manifestv2")
267 267 else:
268 268 raise error.RepoError(_("repository %s not found") % path)
269 269 elif create:
270 270 raise error.RepoError(_("repository %s already exists") % path)
271 271 else:
272 272 try:
273 273 self.requirements = scmutil.readrequires(
274 274 self.vfs, self.supported)
275 275 except IOError, inst:
276 276 if inst.errno != errno.ENOENT:
277 277 raise
278 278
279 279 self.sharedpath = self.path
280 280 try:
281 281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 282 realpath=True)
283 283 s = vfs.base
284 284 if not vfs.exists():
285 285 raise error.RepoError(
286 286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 287 self.sharedpath = s
288 288 except IOError, inst:
289 289 if inst.errno != errno.ENOENT:
290 290 raise
291 291
292 292 self.store = store.store(
293 293 self.requirements, self.sharedpath, scmutil.vfs)
294 294 self.spath = self.store.path
295 295 self.svfs = self.store.vfs
296 296 self.sopener = self.svfs
297 297 self.sjoin = self.store.join
298 298 self.vfs.createmode = self.store.createmode
299 299 self._applyopenerreqs()
300 300 if create:
301 301 self._writerequirements()
302 302
303 303
304 304 self._branchcaches = {}
305 305 self._revbranchcache = None
306 306 self.filterpats = {}
307 307 self._datafilters = {}
308 308 self._transref = self._lockref = self._wlockref = None
309 309
310 310 # A cache for various files under .hg/ that tracks file changes,
311 311 # (used by the filecache decorator)
312 312 #
313 313 # Maps a property name to its util.filecacheentry
314 314 self._filecache = {}
315 315
316 316 # hold sets of revision to be filtered
317 317 # should be cleared when something might have changed the filter value:
318 318 # - new changesets,
319 319 # - phase change,
320 320 # - new obsolescence marker,
321 321 # - working directory parent change,
322 322 # - bookmark changes
323 323 self.filteredrevcache = {}
324 324
325 325 # generic mapping between names and nodes
326 326 self.names = namespaces.namespaces()
327 327
328 328 def close(self):
329 329 self._writecaches()
330 330
331 331 def _writecaches(self):
332 332 if self._revbranchcache:
333 333 self._revbranchcache.write()
334 334
335 335 def _restrictcapabilities(self, caps):
336 336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 337 caps = set(caps)
338 338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 339 caps.add('bundle2=' + urllib.quote(capsblob))
340 340 return caps
341 341
342 342 def _applyopenerreqs(self):
343 343 self.svfs.options = dict((r, 1) for r in self.requirements
344 344 if r in self.openerreqs)
345 345 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
346 346 if chunkcachesize is not None:
347 347 self.svfs.options['chunkcachesize'] = chunkcachesize
348 348 maxchainlen = self.ui.configint('format', 'maxchainlen')
349 349 if maxchainlen is not None:
350 350 self.svfs.options['maxchainlen'] = maxchainlen
351 351 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
352 352 if manifestcachesize is not None:
353 353 self.svfs.options['manifestcachesize'] = manifestcachesize
354 354
355 355 def _writerequirements(self):
356 356 scmutil.writerequires(self.vfs, self.requirements)
357 357
358 358 def _checknested(self, path):
359 359 """Determine if path is a legal nested repository."""
360 360 if not path.startswith(self.root):
361 361 return False
362 362 subpath = path[len(self.root) + 1:]
363 363 normsubpath = util.pconvert(subpath)
364 364
365 365 # XXX: Checking against the current working copy is wrong in
366 366 # the sense that it can reject things like
367 367 #
368 368 # $ hg cat -r 10 sub/x.txt
369 369 #
370 370 # if sub/ is no longer a subrepository in the working copy
371 371 # parent revision.
372 372 #
373 373 # However, it can of course also allow things that would have
374 374 # been rejected before, such as the above cat command if sub/
375 375 # is a subrepository now, but was a normal directory before.
376 376 # The old path auditor would have rejected by mistake since it
377 377 # panics when it sees sub/.hg/.
378 378 #
379 379 # All in all, checking against the working copy seems sensible
380 380 # since we want to prevent access to nested repositories on
381 381 # the filesystem *now*.
382 382 ctx = self[None]
383 383 parts = util.splitpath(subpath)
384 384 while parts:
385 385 prefix = '/'.join(parts)
386 386 if prefix in ctx.substate:
387 387 if prefix == normsubpath:
388 388 return True
389 389 else:
390 390 sub = ctx.sub(prefix)
391 391 return sub.checknested(subpath[len(prefix) + 1:])
392 392 else:
393 393 parts.pop()
394 394 return False
395 395
396 396 def peer(self):
397 397 return localpeer(self) # not cached to avoid reference cycle
398 398
399 399 def unfiltered(self):
400 400 """Return unfiltered version of the repository
401 401
402 402 Intended to be overwritten by filtered repo."""
403 403 return self
404 404
405 405 def filtered(self, name):
406 406 """Return a filtered version of a repository"""
407 407 # build a new class with the mixin and the current class
408 408 # (possibly subclass of the repo)
409 409 class proxycls(repoview.repoview, self.unfiltered().__class__):
410 410 pass
411 411 return proxycls(self, name)
412 412
413 413 @repofilecache('bookmarks')
414 414 def _bookmarks(self):
415 415 return bookmarks.bmstore(self)
416 416
417 417 @repofilecache('bookmarks.current')
418 418 def _activebookmark(self):
419 419 return bookmarks.readactive(self)
420 420
421 421 def bookmarkheads(self, bookmark):
422 422 name = bookmark.split('@', 1)[0]
423 423 heads = []
424 424 for mark, n in self._bookmarks.iteritems():
425 425 if mark.split('@', 1)[0] == name:
426 426 heads.append(n)
427 427 return heads
428 428
429 429 @storecache('phaseroots')
430 430 def _phasecache(self):
431 431 return phases.phasecache(self, self._phasedefaults)
432 432
433 433 @storecache('obsstore')
434 434 def obsstore(self):
435 435 # read default format for new obsstore.
436 436 defaultformat = self.ui.configint('format', 'obsstore-version', None)
437 437 # rely on obsstore class default when possible.
438 438 kwargs = {}
439 439 if defaultformat is not None:
440 440 kwargs['defaultformat'] = defaultformat
441 441 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
442 442 store = obsolete.obsstore(self.svfs, readonly=readonly,
443 443 **kwargs)
444 444 if store and readonly:
445 445 self.ui.warn(
446 446 _('obsolete feature not enabled but %i markers found!\n')
447 447 % len(list(store)))
448 448 return store
449 449
450 450 @storecache('00changelog.i')
451 451 def changelog(self):
452 452 c = changelog.changelog(self.svfs)
453 453 if 'HG_PENDING' in os.environ:
454 454 p = os.environ['HG_PENDING']
455 455 if p.startswith(self.root):
456 456 c.readpending('00changelog.i.a')
457 457 return c
458 458
459 459 @storecache('00manifest.i')
460 460 def manifest(self):
461 461 return manifest.manifest(self.svfs)
462 462
463 463 def dirlog(self, dir):
464 464 return self.manifest.dirlog(dir)
465 465
466 466 @repofilecache('dirstate')
467 467 def dirstate(self):
468 468 warned = [0]
469 469 def validate(node):
470 470 try:
471 471 self.changelog.rev(node)
472 472 return node
473 473 except error.LookupError:
474 474 if not warned[0]:
475 475 warned[0] = True
476 476 self.ui.warn(_("warning: ignoring unknown"
477 477 " working parent %s!\n") % short(node))
478 478 return nullid
479 479
480 480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
481 481
482 482 def __getitem__(self, changeid):
483 483 if changeid is None:
484 484 return context.workingctx(self)
485 485 if isinstance(changeid, slice):
486 486 return [context.changectx(self, i)
487 487 for i in xrange(*changeid.indices(len(self)))
488 488 if i not in self.changelog.filteredrevs]
489 489 return context.changectx(self, changeid)
490 490
491 491 def __contains__(self, changeid):
492 492 try:
493 493 self[changeid]
494 494 return True
495 495 except error.RepoLookupError:
496 496 return False
497 497
498 498 def __nonzero__(self):
499 499 return True
500 500
501 501 def __len__(self):
502 502 return len(self.changelog)
503 503
504 504 def __iter__(self):
505 505 return iter(self.changelog)
506 506
507 507 def revs(self, expr, *args):
508 508 '''Return a list of revisions matching the given revset'''
509 509 expr = revset.formatspec(expr, *args)
510 510 m = revset.match(None, expr)
511 511 return m(self)
512 512
513 513 def set(self, expr, *args):
514 514 '''
515 515 Yield a context for each matching revision, after doing arg
516 516 replacement via revset.formatspec
517 517 '''
518 518 for r in self.revs(expr, *args):
519 519 yield self[r]
520 520
521 521 def url(self):
522 522 return 'file:' + self.root
523 523
524 524 def hook(self, name, throw=False, **args):
525 525 """Call a hook, passing this repo instance.
526 526
527 527 This a convenience method to aid invoking hooks. Extensions likely
528 528 won't call this unless they have registered a custom hook or are
529 529 replacing code that is expected to call a hook.
530 530 """
531 531 return hook.hook(self.ui, self, name, throw, **args)
532 532
533 533 @unfilteredmethod
534 534 def _tag(self, names, node, message, local, user, date, extra={},
535 535 editor=False):
536 536 if isinstance(names, str):
537 537 names = (names,)
538 538
539 539 branches = self.branchmap()
540 540 for name in names:
541 541 self.hook('pretag', throw=True, node=hex(node), tag=name,
542 542 local=local)
543 543 if name in branches:
544 544 self.ui.warn(_("warning: tag %s conflicts with existing"
545 545 " branch name\n") % name)
546 546
547 547 def writetags(fp, names, munge, prevtags):
548 548 fp.seek(0, 2)
549 549 if prevtags and prevtags[-1] != '\n':
550 550 fp.write('\n')
551 551 for name in names:
552 552 if munge:
553 553 m = munge(name)
554 554 else:
555 555 m = name
556 556
557 557 if (self._tagscache.tagtypes and
558 558 name in self._tagscache.tagtypes):
559 559 old = self.tags().get(name, nullid)
560 560 fp.write('%s %s\n' % (hex(old), m))
561 561 fp.write('%s %s\n' % (hex(node), m))
562 562 fp.close()
563 563
564 564 prevtags = ''
565 565 if local:
566 566 try:
567 567 fp = self.vfs('localtags', 'r+')
568 568 except IOError:
569 569 fp = self.vfs('localtags', 'a')
570 570 else:
571 571 prevtags = fp.read()
572 572
573 573 # local tags are stored in the current charset
574 574 writetags(fp, names, None, prevtags)
575 575 for name in names:
576 576 self.hook('tag', node=hex(node), tag=name, local=local)
577 577 return
578 578
579 579 try:
580 580 fp = self.wfile('.hgtags', 'rb+')
581 581 except IOError, e:
582 582 if e.errno != errno.ENOENT:
583 583 raise
584 584 fp = self.wfile('.hgtags', 'ab')
585 585 else:
586 586 prevtags = fp.read()
587 587
588 588 # committed tags are stored in UTF-8
589 589 writetags(fp, names, encoding.fromlocal, prevtags)
590 590
591 591 fp.close()
592 592
593 593 self.invalidatecaches()
594 594
595 595 if '.hgtags' not in self.dirstate:
596 596 self[None].add(['.hgtags'])
597 597
598 598 m = matchmod.exact(self.root, '', ['.hgtags'])
599 599 tagnode = self.commit(message, user, date, extra=extra, match=m,
600 600 editor=editor)
601 601
602 602 for name in names:
603 603 self.hook('tag', node=hex(node), tag=name, local=local)
604 604
605 605 return tagnode
606 606
607 607 def tag(self, names, node, message, local, user, date, editor=False):
608 608 '''tag a revision with one or more symbolic names.
609 609
610 610 names is a list of strings or, when adding a single tag, names may be a
611 611 string.
612 612
613 613 if local is True, the tags are stored in a per-repository file.
614 614 otherwise, they are stored in the .hgtags file, and a new
615 615 changeset is committed with the change.
616 616
617 617 keyword arguments:
618 618
619 619 local: whether to store tags in non-version-controlled file
620 620 (default False)
621 621
622 622 message: commit message to use if committing
623 623
624 624 user: name of user to use if committing
625 625
626 626 date: date tuple to use if committing'''
627 627
628 628 if not local:
629 629 m = matchmod.exact(self.root, '', ['.hgtags'])
630 630 if any(self.status(match=m, unknown=True, ignored=True)):
631 631 raise util.Abort(_('working copy of .hgtags is changed'),
632 632 hint=_('please commit .hgtags manually'))
633 633
634 634 self.tags() # instantiate the cache
635 635 self._tag(names, node, message, local, user, date, editor=editor)
636 636
637 637 @filteredpropertycache
638 638 def _tagscache(self):
639 639 '''Returns a tagscache object that contains various tags related
640 640 caches.'''
641 641
642 642 # This simplifies its cache management by having one decorated
643 643 # function (this one) and the rest simply fetch things from it.
644 644 class tagscache(object):
645 645 def __init__(self):
646 646 # These two define the set of tags for this repository. tags
647 647 # maps tag name to node; tagtypes maps tag name to 'global' or
648 648 # 'local'. (Global tags are defined by .hgtags across all
649 649 # heads, and local tags are defined in .hg/localtags.)
650 650 # They constitute the in-memory cache of tags.
651 651 self.tags = self.tagtypes = None
652 652
653 653 self.nodetagscache = self.tagslist = None
654 654
655 655 cache = tagscache()
656 656 cache.tags, cache.tagtypes = self._findtags()
657 657
658 658 return cache
659 659
660 660 def tags(self):
661 661 '''return a mapping of tag to node'''
662 662 t = {}
663 663 if self.changelog.filteredrevs:
664 664 tags, tt = self._findtags()
665 665 else:
666 666 tags = self._tagscache.tags
667 667 for k, v in tags.iteritems():
668 668 try:
669 669 # ignore tags to unknown nodes
670 670 self.changelog.rev(v)
671 671 t[k] = v
672 672 except (error.LookupError, ValueError):
673 673 pass
674 674 return t
675 675
676 676 def _findtags(self):
677 677 '''Do the hard work of finding tags. Return a pair of dicts
678 678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
679 679 maps tag name to a string like \'global\' or \'local\'.
680 680 Subclasses or extensions are free to add their own tags, but
681 681 should be aware that the returned dicts will be retained for the
682 682 duration of the localrepo object.'''
683 683
684 684 # XXX what tagtype should subclasses/extensions use? Currently
685 685 # mq and bookmarks add tags, but do not set the tagtype at all.
686 686 # Should each extension invent its own tag type? Should there
687 687 # be one tagtype for all such "virtual" tags? Or is the status
688 688 # quo fine?
689 689
690 690 alltags = {} # map tag name to (node, hist)
691 691 tagtypes = {}
692 692
693 693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
694 694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
695 695
696 696 # Build the return dicts. Have to re-encode tag names because
697 697 # the tags module always uses UTF-8 (in order not to lose info
698 698 # writing to the cache), but the rest of Mercurial wants them in
699 699 # local encoding.
700 700 tags = {}
701 701 for (name, (node, hist)) in alltags.iteritems():
702 702 if node != nullid:
703 703 tags[encoding.tolocal(name)] = node
704 704 tags['tip'] = self.changelog.tip()
705 705 tagtypes = dict([(encoding.tolocal(name), value)
706 706 for (name, value) in tagtypes.iteritems()])
707 707 return (tags, tagtypes)
708 708
709 709 def tagtype(self, tagname):
710 710 '''
711 711 return the type of the given tag. result can be:
712 712
713 713 'local' : a local tag
714 714 'global' : a global tag
715 715 None : tag does not exist
716 716 '''
717 717
718 718 return self._tagscache.tagtypes.get(tagname)
719 719
720 720 def tagslist(self):
721 721 '''return a list of tags ordered by revision'''
722 722 if not self._tagscache.tagslist:
723 723 l = []
724 724 for t, n in self.tags().iteritems():
725 725 l.append((self.changelog.rev(n), t, n))
726 726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
727 727
728 728 return self._tagscache.tagslist
729 729
730 730 def nodetags(self, node):
731 731 '''return the tags associated with a node'''
732 732 if not self._tagscache.nodetagscache:
733 733 nodetagscache = {}
734 734 for t, n in self._tagscache.tags.iteritems():
735 735 nodetagscache.setdefault(n, []).append(t)
736 736 for tags in nodetagscache.itervalues():
737 737 tags.sort()
738 738 self._tagscache.nodetagscache = nodetagscache
739 739 return self._tagscache.nodetagscache.get(node, [])
740 740
741 741 def nodebookmarks(self, node):
742 742 marks = []
743 743 for bookmark, n in self._bookmarks.iteritems():
744 744 if n == node:
745 745 marks.append(bookmark)
746 746 return sorted(marks)
747 747
748 748 def branchmap(self):
749 749 '''returns a dictionary {branch: [branchheads]} with branchheads
750 750 ordered by increasing revision number'''
751 751 branchmap.updatecache(self)
752 752 return self._branchcaches[self.filtername]
753 753
754 754 @unfilteredmethod
755 755 def revbranchcache(self):
756 756 if not self._revbranchcache:
757 757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
758 758 return self._revbranchcache
759 759
760 760 def branchtip(self, branch, ignoremissing=False):
761 761 '''return the tip node for a given branch
762 762
763 763 If ignoremissing is True, then this method will not raise an error.
764 764 This is helpful for callers that only expect None for a missing branch
765 765 (e.g. namespace).
766 766
767 767 '''
768 768 try:
769 769 return self.branchmap().branchtip(branch)
770 770 except KeyError:
771 771 if not ignoremissing:
772 772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
773 773 else:
774 774 pass
775 775
776 776 def lookup(self, key):
777 777 return self[key].node()
778 778
779 779 def lookupbranch(self, key, remote=None):
780 780 repo = remote or self
781 781 if key in repo.branchmap():
782 782 return key
783 783
784 784 repo = (remote and remote.local()) and remote or self
785 785 return repo[key].branch()
786 786
787 787 def known(self, nodes):
788 788 nm = self.changelog.nodemap
789 789 pc = self._phasecache
790 790 result = []
791 791 for n in nodes:
792 792 r = nm.get(n)
793 793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 794 result.append(resp)
795 795 return result
796 796
797 797 def local(self):
798 798 return self
799 799
800 800 def publishing(self):
801 801 # it's safe (and desirable) to trust the publish flag unconditionally
802 802 # so that we don't finalize changes shared between users via ssh or nfs
803 803 return self.ui.configbool('phases', 'publish', True, untrusted=True)
804 804
805 805 def cancopy(self):
806 806 # so statichttprepo's override of local() works
807 807 if not self.local():
808 808 return False
809 809 if not self.publishing():
810 810 return True
811 811 # if publishing we can't copy if there is filtered content
812 812 return not self.filtered('visible').changelog.filteredrevs
813 813
814 814 def shared(self):
815 815 '''the type of shared repository (None if not shared)'''
816 816 if self.sharedpath != self.path:
817 817 return 'store'
818 818 return None
819 819
820 820 def join(self, f, *insidef):
821 821 return self.vfs.join(os.path.join(f, *insidef))
822 822
823 823 def wjoin(self, f, *insidef):
824 824 return self.vfs.reljoin(self.root, f, *insidef)
825 825
826 826 def file(self, f):
827 827 if f[0] == '/':
828 828 f = f[1:]
829 829 return filelog.filelog(self.svfs, f)
830 830
831 831 def changectx(self, changeid):
832 832 return self[changeid]
833 833
834 834 def parents(self, changeid=None):
835 835 '''get list of changectxs for parents of changeid'''
836 836 return self[changeid].parents()
837 837
838 838 def setparents(self, p1, p2=nullid):
839 839 self.dirstate.beginparentchange()
840 840 copies = self.dirstate.setparents(p1, p2)
841 841 pctx = self[p1]
842 842 if copies:
843 843 # Adjust copy records, the dirstate cannot do it, it
844 844 # requires access to parents manifests. Preserve them
845 845 # only for entries added to first parent.
846 846 for f in copies:
847 847 if f not in pctx and copies[f] in pctx:
848 848 self.dirstate.copy(copies[f], f)
849 849 if p2 == nullid:
850 850 for f, s in sorted(self.dirstate.copies().items()):
851 851 if f not in pctx and s not in pctx:
852 852 self.dirstate.copy(None, f)
853 853 self.dirstate.endparentchange()
854 854
855 855 def filectx(self, path, changeid=None, fileid=None):
856 856 """changeid can be a changeset revision, node, or tag.
857 857 fileid can be a file revision or node."""
858 858 return context.filectx(self, path, changeid, fileid)
859 859
860 860 def getcwd(self):
861 861 return self.dirstate.getcwd()
862 862
863 863 def pathto(self, f, cwd=None):
864 864 return self.dirstate.pathto(f, cwd)
865 865
866 866 def wfile(self, f, mode='r'):
867 867 return self.wvfs(f, mode)
868 868
869 869 def _link(self, f):
870 870 return self.wvfs.islink(f)
871 871
872 872 def _loadfilter(self, filter):
873 873 if filter not in self.filterpats:
874 874 l = []
875 875 for pat, cmd in self.ui.configitems(filter):
876 876 if cmd == '!':
877 877 continue
878 878 mf = matchmod.match(self.root, '', [pat])
879 879 fn = None
880 880 params = cmd
881 881 for name, filterfn in self._datafilters.iteritems():
882 882 if cmd.startswith(name):
883 883 fn = filterfn
884 884 params = cmd[len(name):].lstrip()
885 885 break
886 886 if not fn:
887 887 fn = lambda s, c, **kwargs: util.filter(s, c)
888 888 # Wrap old filters not supporting keyword arguments
889 889 if not inspect.getargspec(fn)[2]:
890 890 oldfn = fn
891 891 fn = lambda s, c, **kwargs: oldfn(s, c)
892 892 l.append((mf, fn, params))
893 893 self.filterpats[filter] = l
894 894 return self.filterpats[filter]
895 895
896 896 def _filter(self, filterpats, filename, data):
897 897 for mf, fn, cmd in filterpats:
898 898 if mf(filename):
899 899 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
900 900 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
901 901 break
902 902
903 903 return data
904 904
905 905 @unfilteredpropertycache
906 906 def _encodefilterpats(self):
907 907 return self._loadfilter('encode')
908 908
909 909 @unfilteredpropertycache
910 910 def _decodefilterpats(self):
911 911 return self._loadfilter('decode')
912 912
913 913 def adddatafilter(self, name, filter):
914 914 self._datafilters[name] = filter
915 915
916 916 def wread(self, filename):
917 917 if self._link(filename):
918 918 data = self.wvfs.readlink(filename)
919 919 else:
920 920 data = self.wvfs.read(filename)
921 921 return self._filter(self._encodefilterpats, filename, data)
922 922
923 923 def wwrite(self, filename, data, flags):
924 924 """write ``data`` into ``filename`` in the working directory
925 925
926 926 This returns length of written (maybe decoded) data.
927 927 """
928 928 data = self._filter(self._decodefilterpats, filename, data)
929 929 if 'l' in flags:
930 930 self.wvfs.symlink(data, filename)
931 931 else:
932 932 self.wvfs.write(filename, data)
933 933 if 'x' in flags:
934 934 self.wvfs.setflags(filename, False, True)
935 935 return len(data)
936 936
937 937 def wwritedata(self, filename, data):
938 938 return self._filter(self._decodefilterpats, filename, data)
939 939
940 940 def currenttransaction(self):
941 941 """return the current transaction or None if non exists"""
942 942 if self._transref:
943 943 tr = self._transref()
944 944 else:
945 945 tr = None
946 946
947 947 if tr and tr.running():
948 948 return tr
949 949 return None
950 950
951 951 def transaction(self, desc, report=None):
952 952 if (self.ui.configbool('devel', 'all-warnings')
953 953 or self.ui.configbool('devel', 'check-locks')):
954 954 l = self._lockref and self._lockref()
955 955 if l is None or not l.held:
956 scmutil.develwarn(self.ui, 'transaction with no lock')
956 self.ui.develwarn('transaction with no lock')
957 957 tr = self.currenttransaction()
958 958 if tr is not None:
959 959 return tr.nest()
960 960
961 961 # abort here if the journal already exists
962 962 if self.svfs.exists("journal"):
963 963 raise error.RepoError(
964 964 _("abandoned transaction found"),
965 965 hint=_("run 'hg recover' to clean up transaction"))
966 966
967 967 idbase = "%.40f#%f" % (random.random(), time.time())
968 968 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
969 969 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
970 970
971 971 self._writejournal(desc)
972 972 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
973 973 if report:
974 974 rp = report
975 975 else:
976 976 rp = self.ui.warn
977 977 vfsmap = {'plain': self.vfs} # root of .hg/
978 978 # we must avoid cyclic reference between repo and transaction.
979 979 reporef = weakref.ref(self)
980 980 def validate(tr):
981 981 """will run pre-closing hooks"""
982 982 pending = lambda: tr.writepending() and self.root or ""
983 983 reporef().hook('pretxnclose', throw=True, pending=pending,
984 984 txnname=desc, **tr.hookargs)
985 985
986 986 tr = transaction.transaction(rp, self.sopener, vfsmap,
987 987 "journal",
988 988 "undo",
989 989 aftertrans(renames),
990 990 self.store.createmode,
991 991 validator=validate)
992 992
993 993 tr.hookargs['txnid'] = txnid
994 994 # note: writing the fncache only during finalize mean that the file is
995 995 # outdated when running hooks. As fncache is used for streaming clone,
996 996 # this is not expected to break anything that happen during the hooks.
997 997 tr.addfinalize('flush-fncache', self.store.write)
998 998 def txnclosehook(tr2):
999 999 """To be run if transaction is successful, will schedule a hook run
1000 1000 """
1001 1001 def hook():
1002 1002 reporef().hook('txnclose', throw=False, txnname=desc,
1003 1003 **tr2.hookargs)
1004 1004 reporef()._afterlock(hook)
1005 1005 tr.addfinalize('txnclose-hook', txnclosehook)
1006 1006 def txnaborthook(tr2):
1007 1007 """To be run if transaction is aborted
1008 1008 """
1009 1009 reporef().hook('txnabort', throw=False, txnname=desc,
1010 1010 **tr2.hookargs)
1011 1011 tr.addabort('txnabort-hook', txnaborthook)
1012 1012 self._transref = weakref.ref(tr)
1013 1013 return tr
1014 1014
1015 1015 def _journalfiles(self):
1016 1016 return ((self.svfs, 'journal'),
1017 1017 (self.vfs, 'journal.dirstate'),
1018 1018 (self.vfs, 'journal.branch'),
1019 1019 (self.vfs, 'journal.desc'),
1020 1020 (self.vfs, 'journal.bookmarks'),
1021 1021 (self.svfs, 'journal.phaseroots'))
1022 1022
1023 1023 def undofiles(self):
1024 1024 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1025 1025
1026 1026 def _writejournal(self, desc):
1027 1027 self.vfs.write("journal.dirstate",
1028 1028 self.vfs.tryread("dirstate"))
1029 1029 self.vfs.write("journal.branch",
1030 1030 encoding.fromlocal(self.dirstate.branch()))
1031 1031 self.vfs.write("journal.desc",
1032 1032 "%d\n%s\n" % (len(self), desc))
1033 1033 self.vfs.write("journal.bookmarks",
1034 1034 self.vfs.tryread("bookmarks"))
1035 1035 self.svfs.write("journal.phaseroots",
1036 1036 self.svfs.tryread("phaseroots"))
1037 1037
1038 1038 def recover(self):
1039 1039 lock = self.lock()
1040 1040 try:
1041 1041 if self.svfs.exists("journal"):
1042 1042 self.ui.status(_("rolling back interrupted transaction\n"))
1043 1043 vfsmap = {'': self.svfs,
1044 1044 'plain': self.vfs,}
1045 1045 transaction.rollback(self.svfs, vfsmap, "journal",
1046 1046 self.ui.warn)
1047 1047 self.invalidate()
1048 1048 return True
1049 1049 else:
1050 1050 self.ui.warn(_("no interrupted transaction available\n"))
1051 1051 return False
1052 1052 finally:
1053 1053 lock.release()
1054 1054
1055 1055 def rollback(self, dryrun=False, force=False):
1056 1056 wlock = lock = None
1057 1057 try:
1058 1058 wlock = self.wlock()
1059 1059 lock = self.lock()
1060 1060 if self.svfs.exists("undo"):
1061 1061 return self._rollback(dryrun, force)
1062 1062 else:
1063 1063 self.ui.warn(_("no rollback information available\n"))
1064 1064 return 1
1065 1065 finally:
1066 1066 release(lock, wlock)
1067 1067
1068 1068 @unfilteredmethod # Until we get smarter cache management
1069 1069 def _rollback(self, dryrun, force):
1070 1070 ui = self.ui
1071 1071 try:
1072 1072 args = self.vfs.read('undo.desc').splitlines()
1073 1073 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1074 1074 if len(args) >= 3:
1075 1075 detail = args[2]
1076 1076 oldtip = oldlen - 1
1077 1077
1078 1078 if detail and ui.verbose:
1079 1079 msg = (_('repository tip rolled back to revision %s'
1080 1080 ' (undo %s: %s)\n')
1081 1081 % (oldtip, desc, detail))
1082 1082 else:
1083 1083 msg = (_('repository tip rolled back to revision %s'
1084 1084 ' (undo %s)\n')
1085 1085 % (oldtip, desc))
1086 1086 except IOError:
1087 1087 msg = _('rolling back unknown transaction\n')
1088 1088 desc = None
1089 1089
1090 1090 if not force and self['.'] != self['tip'] and desc == 'commit':
1091 1091 raise util.Abort(
1092 1092 _('rollback of last commit while not checked out '
1093 1093 'may lose data'), hint=_('use -f to force'))
1094 1094
1095 1095 ui.status(msg)
1096 1096 if dryrun:
1097 1097 return 0
1098 1098
1099 1099 parents = self.dirstate.parents()
1100 1100 self.destroying()
1101 1101 vfsmap = {'plain': self.vfs, '': self.svfs}
1102 1102 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1103 1103 if self.vfs.exists('undo.bookmarks'):
1104 1104 self.vfs.rename('undo.bookmarks', 'bookmarks')
1105 1105 if self.svfs.exists('undo.phaseroots'):
1106 1106 self.svfs.rename('undo.phaseroots', 'phaseroots')
1107 1107 self.invalidate()
1108 1108
1109 1109 parentgone = (parents[0] not in self.changelog.nodemap or
1110 1110 parents[1] not in self.changelog.nodemap)
1111 1111 if parentgone:
1112 1112 self.vfs.rename('undo.dirstate', 'dirstate')
1113 1113 try:
1114 1114 branch = self.vfs.read('undo.branch')
1115 1115 self.dirstate.setbranch(encoding.tolocal(branch))
1116 1116 except IOError:
1117 1117 ui.warn(_('named branch could not be reset: '
1118 1118 'current branch is still \'%s\'\n')
1119 1119 % self.dirstate.branch())
1120 1120
1121 1121 self.dirstate.invalidate()
1122 1122 parents = tuple([p.rev() for p in self.parents()])
1123 1123 if len(parents) > 1:
1124 1124 ui.status(_('working directory now based on '
1125 1125 'revisions %d and %d\n') % parents)
1126 1126 else:
1127 1127 ui.status(_('working directory now based on '
1128 1128 'revision %d\n') % parents)
1129 1129 ms = mergemod.mergestate(self)
1130 1130 ms.reset(self['.'].node())
1131 1131
1132 1132 # TODO: if we know which new heads may result from this rollback, pass
1133 1133 # them to destroy(), which will prevent the branchhead cache from being
1134 1134 # invalidated.
1135 1135 self.destroyed()
1136 1136 return 0
1137 1137
1138 1138 def invalidatecaches(self):
1139 1139
1140 1140 if '_tagscache' in vars(self):
1141 1141 # can't use delattr on proxy
1142 1142 del self.__dict__['_tagscache']
1143 1143
1144 1144 self.unfiltered()._branchcaches.clear()
1145 1145 self.invalidatevolatilesets()
1146 1146
1147 1147 def invalidatevolatilesets(self):
1148 1148 self.filteredrevcache.clear()
1149 1149 obsolete.clearobscaches(self)
1150 1150
1151 1151 def invalidatedirstate(self):
1152 1152 '''Invalidates the dirstate, causing the next call to dirstate
1153 1153 to check if it was modified since the last time it was read,
1154 1154 rereading it if it has.
1155 1155
1156 1156 This is different to dirstate.invalidate() that it doesn't always
1157 1157 rereads the dirstate. Use dirstate.invalidate() if you want to
1158 1158 explicitly read the dirstate again (i.e. restoring it to a previous
1159 1159 known good state).'''
1160 1160 if hasunfilteredcache(self, 'dirstate'):
1161 1161 for k in self.dirstate._filecache:
1162 1162 try:
1163 1163 delattr(self.dirstate, k)
1164 1164 except AttributeError:
1165 1165 pass
1166 1166 delattr(self.unfiltered(), 'dirstate')
1167 1167
1168 1168 def invalidate(self):
1169 1169 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1170 1170 for k in self._filecache:
1171 1171 # dirstate is invalidated separately in invalidatedirstate()
1172 1172 if k == 'dirstate':
1173 1173 continue
1174 1174
1175 1175 try:
1176 1176 delattr(unfiltered, k)
1177 1177 except AttributeError:
1178 1178 pass
1179 1179 self.invalidatecaches()
1180 1180 self.store.invalidatecaches()
1181 1181
1182 1182 def invalidateall(self):
1183 1183 '''Fully invalidates both store and non-store parts, causing the
1184 1184 subsequent operation to reread any outside changes.'''
1185 1185 # extension should hook this to invalidate its caches
1186 1186 self.invalidate()
1187 1187 self.invalidatedirstate()
1188 1188
1189 1189 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1190 1190 try:
1191 1191 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1192 1192 except error.LockHeld, inst:
1193 1193 if not wait:
1194 1194 raise
1195 1195 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1196 1196 (desc, inst.locker))
1197 1197 # default to 600 seconds timeout
1198 1198 l = lockmod.lock(vfs, lockname,
1199 1199 int(self.ui.config("ui", "timeout", "600")),
1200 1200 releasefn, desc=desc)
1201 1201 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1202 1202 if acquirefn:
1203 1203 acquirefn()
1204 1204 return l
1205 1205
1206 1206 def _afterlock(self, callback):
1207 1207 """add a callback to be run when the repository is fully unlocked
1208 1208
1209 1209 The callback will be executed when the outermost lock is released
1210 1210 (with wlock being higher level than 'lock')."""
1211 1211 for ref in (self._wlockref, self._lockref):
1212 1212 l = ref and ref()
1213 1213 if l and l.held:
1214 1214 l.postrelease.append(callback)
1215 1215 break
1216 1216 else: # no lock have been found.
1217 1217 callback()
1218 1218
1219 1219 def lock(self, wait=True):
1220 1220 '''Lock the repository store (.hg/store) and return a weak reference
1221 1221 to the lock. Use this before modifying the store (e.g. committing or
1222 1222 stripping). If you are opening a transaction, get a lock as well.)
1223 1223
1224 1224 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1225 1225 'wlock' first to avoid a dead-lock hazard.'''
1226 1226 l = self._lockref and self._lockref()
1227 1227 if l is not None and l.held:
1228 1228 l.lock()
1229 1229 return l
1230 1230
1231 1231 def unlock():
1232 1232 for k, ce in self._filecache.items():
1233 1233 if k == 'dirstate' or k not in self.__dict__:
1234 1234 continue
1235 1235 ce.refresh()
1236 1236
1237 1237 l = self._lock(self.svfs, "lock", wait, unlock,
1238 1238 self.invalidate, _('repository %s') % self.origroot)
1239 1239 self._lockref = weakref.ref(l)
1240 1240 return l
1241 1241
1242 1242 def wlock(self, wait=True):
1243 1243 '''Lock the non-store parts of the repository (everything under
1244 1244 .hg except .hg/store) and return a weak reference to the lock.
1245 1245
1246 1246 Use this before modifying files in .hg.
1247 1247
1248 1248 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1249 1249 'wlock' first to avoid a dead-lock hazard.'''
1250 1250 l = self._wlockref and self._wlockref()
1251 1251 if l is not None and l.held:
1252 1252 l.lock()
1253 1253 return l
1254 1254
1255 1255 # We do not need to check for non-waiting lock aquisition. Such
1256 1256 # acquisition would not cause dead-lock as they would just fail.
1257 1257 if wait and (self.ui.configbool('devel', 'all-warnings')
1258 1258 or self.ui.configbool('devel', 'check-locks')):
1259 1259 l = self._lockref and self._lockref()
1260 1260 if l is not None and l.held:
1261 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1261 self.ui.develwarn('"wlock" acquired after "lock"')
1262 1262
1263 1263 def unlock():
1264 1264 if self.dirstate.pendingparentchange():
1265 1265 self.dirstate.invalidate()
1266 1266 else:
1267 1267 self.dirstate.write()
1268 1268
1269 1269 self._filecache['dirstate'].refresh()
1270 1270
1271 1271 l = self._lock(self.vfs, "wlock", wait, unlock,
1272 1272 self.invalidatedirstate, _('working directory of %s') %
1273 1273 self.origroot)
1274 1274 self._wlockref = weakref.ref(l)
1275 1275 return l
1276 1276
1277 1277 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1278 1278 """
1279 1279 commit an individual file as part of a larger transaction
1280 1280 """
1281 1281
1282 1282 fname = fctx.path()
1283 1283 fparent1 = manifest1.get(fname, nullid)
1284 1284 fparent2 = manifest2.get(fname, nullid)
1285 1285 if isinstance(fctx, context.filectx):
1286 1286 node = fctx.filenode()
1287 1287 if node in [fparent1, fparent2]:
1288 1288 self.ui.debug('reusing %s filelog entry\n' % fname)
1289 1289 return node
1290 1290
1291 1291 flog = self.file(fname)
1292 1292 meta = {}
1293 1293 copy = fctx.renamed()
1294 1294 if copy and copy[0] != fname:
1295 1295 # Mark the new revision of this file as a copy of another
1296 1296 # file. This copy data will effectively act as a parent
1297 1297 # of this new revision. If this is a merge, the first
1298 1298 # parent will be the nullid (meaning "look up the copy data")
1299 1299 # and the second one will be the other parent. For example:
1300 1300 #
1301 1301 # 0 --- 1 --- 3 rev1 changes file foo
1302 1302 # \ / rev2 renames foo to bar and changes it
1303 1303 # \- 2 -/ rev3 should have bar with all changes and
1304 1304 # should record that bar descends from
1305 1305 # bar in rev2 and foo in rev1
1306 1306 #
1307 1307 # this allows this merge to succeed:
1308 1308 #
1309 1309 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1310 1310 # \ / merging rev3 and rev4 should use bar@rev2
1311 1311 # \- 2 --- 4 as the merge base
1312 1312 #
1313 1313
1314 1314 cfname = copy[0]
1315 1315 crev = manifest1.get(cfname)
1316 1316 newfparent = fparent2
1317 1317
1318 1318 if manifest2: # branch merge
1319 1319 if fparent2 == nullid or crev is None: # copied on remote side
1320 1320 if cfname in manifest2:
1321 1321 crev = manifest2[cfname]
1322 1322 newfparent = fparent1
1323 1323
1324 1324 # Here, we used to search backwards through history to try to find
1325 1325 # where the file copy came from if the source of a copy was not in
1326 1326 # the parent directory. However, this doesn't actually make sense to
1327 1327 # do (what does a copy from something not in your working copy even
1328 1328 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1329 1329 # the user that copy information was dropped, so if they didn't
1330 1330 # expect this outcome it can be fixed, but this is the correct
1331 1331 # behavior in this circumstance.
1332 1332
1333 1333 if crev:
1334 1334 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1335 1335 meta["copy"] = cfname
1336 1336 meta["copyrev"] = hex(crev)
1337 1337 fparent1, fparent2 = nullid, newfparent
1338 1338 else:
1339 1339 self.ui.warn(_("warning: can't find ancestor for '%s' "
1340 1340 "copied from '%s'!\n") % (fname, cfname))
1341 1341
1342 1342 elif fparent1 == nullid:
1343 1343 fparent1, fparent2 = fparent2, nullid
1344 1344 elif fparent2 != nullid:
1345 1345 # is one parent an ancestor of the other?
1346 1346 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1347 1347 if fparent1 in fparentancestors:
1348 1348 fparent1, fparent2 = fparent2, nullid
1349 1349 elif fparent2 in fparentancestors:
1350 1350 fparent2 = nullid
1351 1351
1352 1352 # is the file changed?
1353 1353 text = fctx.data()
1354 1354 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1355 1355 changelist.append(fname)
1356 1356 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1357 1357 # are just the flags changed during merge?
1358 1358 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1359 1359 changelist.append(fname)
1360 1360
1361 1361 return fparent1
1362 1362
1363 1363 @unfilteredmethod
1364 1364 def commit(self, text="", user=None, date=None, match=None, force=False,
1365 1365 editor=False, extra={}):
1366 1366 """Add a new revision to current repository.
1367 1367
1368 1368 Revision information is gathered from the working directory,
1369 1369 match can be used to filter the committed files. If editor is
1370 1370 supplied, it is called to get a commit message.
1371 1371 """
1372 1372
1373 1373 def fail(f, msg):
1374 1374 raise util.Abort('%s: %s' % (f, msg))
1375 1375
1376 1376 if not match:
1377 1377 match = matchmod.always(self.root, '')
1378 1378
1379 1379 if not force:
1380 1380 vdirs = []
1381 1381 match.explicitdir = vdirs.append
1382 1382 match.bad = fail
1383 1383
1384 1384 wlock = self.wlock()
1385 1385 try:
1386 1386 wctx = self[None]
1387 1387 merge = len(wctx.parents()) > 1
1388 1388
1389 1389 if not force and merge and match.ispartial():
1390 1390 raise util.Abort(_('cannot partially commit a merge '
1391 1391 '(do not specify files or patterns)'))
1392 1392
1393 1393 status = self.status(match=match, clean=force)
1394 1394 if force:
1395 1395 status.modified.extend(status.clean) # mq may commit clean files
1396 1396
1397 1397 # check subrepos
1398 1398 subs = []
1399 1399 commitsubs = set()
1400 1400 newstate = wctx.substate.copy()
1401 1401 # only manage subrepos and .hgsubstate if .hgsub is present
1402 1402 if '.hgsub' in wctx:
1403 1403 # we'll decide whether to track this ourselves, thanks
1404 1404 for c in status.modified, status.added, status.removed:
1405 1405 if '.hgsubstate' in c:
1406 1406 c.remove('.hgsubstate')
1407 1407
1408 1408 # compare current state to last committed state
1409 1409 # build new substate based on last committed state
1410 1410 oldstate = wctx.p1().substate
1411 1411 for s in sorted(newstate.keys()):
1412 1412 if not match(s):
1413 1413 # ignore working copy, use old state if present
1414 1414 if s in oldstate:
1415 1415 newstate[s] = oldstate[s]
1416 1416 continue
1417 1417 if not force:
1418 1418 raise util.Abort(
1419 1419 _("commit with new subrepo %s excluded") % s)
1420 1420 dirtyreason = wctx.sub(s).dirtyreason(True)
1421 1421 if dirtyreason:
1422 1422 if not self.ui.configbool('ui', 'commitsubrepos'):
1423 1423 raise util.Abort(dirtyreason,
1424 1424 hint=_("use --subrepos for recursive commit"))
1425 1425 subs.append(s)
1426 1426 commitsubs.add(s)
1427 1427 else:
1428 1428 bs = wctx.sub(s).basestate()
1429 1429 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1430 1430 if oldstate.get(s, (None, None, None))[1] != bs:
1431 1431 subs.append(s)
1432 1432
1433 1433 # check for removed subrepos
1434 1434 for p in wctx.parents():
1435 1435 r = [s for s in p.substate if s not in newstate]
1436 1436 subs += [s for s in r if match(s)]
1437 1437 if subs:
1438 1438 if (not match('.hgsub') and
1439 1439 '.hgsub' in (wctx.modified() + wctx.added())):
1440 1440 raise util.Abort(
1441 1441 _("can't commit subrepos without .hgsub"))
1442 1442 status.modified.insert(0, '.hgsubstate')
1443 1443
1444 1444 elif '.hgsub' in status.removed:
1445 1445 # clean up .hgsubstate when .hgsub is removed
1446 1446 if ('.hgsubstate' in wctx and
1447 1447 '.hgsubstate' not in (status.modified + status.added +
1448 1448 status.removed)):
1449 1449 status.removed.insert(0, '.hgsubstate')
1450 1450
1451 1451 # make sure all explicit patterns are matched
1452 1452 if not force and (match.isexact() or match.prefix()):
1453 1453 matched = set(status.modified + status.added + status.removed)
1454 1454
1455 1455 for f in match.files():
1456 1456 f = self.dirstate.normalize(f)
1457 1457 if f == '.' or f in matched or f in wctx.substate:
1458 1458 continue
1459 1459 if f in status.deleted:
1460 1460 fail(f, _('file not found!'))
1461 1461 if f in vdirs: # visited directory
1462 1462 d = f + '/'
1463 1463 for mf in matched:
1464 1464 if mf.startswith(d):
1465 1465 break
1466 1466 else:
1467 1467 fail(f, _("no match under directory!"))
1468 1468 elif f not in self.dirstate:
1469 1469 fail(f, _("file not tracked!"))
1470 1470
1471 1471 cctx = context.workingcommitctx(self, status,
1472 1472 text, user, date, extra)
1473 1473
1474 1474 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1475 1475 or extra.get('close') or merge or cctx.files()
1476 1476 or self.ui.configbool('ui', 'allowemptycommit'))
1477 1477 if not allowemptycommit:
1478 1478 return None
1479 1479
1480 1480 if merge and cctx.deleted():
1481 1481 raise util.Abort(_("cannot commit merge with missing files"))
1482 1482
1483 1483 ms = mergemod.mergestate(self)
1484 1484 for f in status.modified:
1485 1485 if f in ms and ms[f] == 'u':
1486 1486 raise util.Abort(_('unresolved merge conflicts '
1487 1487 '(see "hg help resolve")'))
1488 1488
1489 1489 if editor:
1490 1490 cctx._text = editor(self, cctx, subs)
1491 1491 edited = (text != cctx._text)
1492 1492
1493 1493 # Save commit message in case this transaction gets rolled back
1494 1494 # (e.g. by a pretxncommit hook). Leave the content alone on
1495 1495 # the assumption that the user will use the same editor again.
1496 1496 msgfn = self.savecommitmessage(cctx._text)
1497 1497
1498 1498 # commit subs and write new state
1499 1499 if subs:
1500 1500 for s in sorted(commitsubs):
1501 1501 sub = wctx.sub(s)
1502 1502 self.ui.status(_('committing subrepository %s\n') %
1503 1503 subrepo.subrelpath(sub))
1504 1504 sr = sub.commit(cctx._text, user, date)
1505 1505 newstate[s] = (newstate[s][0], sr)
1506 1506 subrepo.writestate(self, newstate)
1507 1507
1508 1508 p1, p2 = self.dirstate.parents()
1509 1509 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1510 1510 try:
1511 1511 self.hook("precommit", throw=True, parent1=hookp1,
1512 1512 parent2=hookp2)
1513 1513 ret = self.commitctx(cctx, True)
1514 1514 except: # re-raises
1515 1515 if edited:
1516 1516 self.ui.write(
1517 1517 _('note: commit message saved in %s\n') % msgfn)
1518 1518 raise
1519 1519
1520 1520 # update bookmarks, dirstate and mergestate
1521 1521 bookmarks.update(self, [p1, p2], ret)
1522 1522 cctx.markcommitted(ret)
1523 1523 ms.reset()
1524 1524 finally:
1525 1525 wlock.release()
1526 1526
1527 1527 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1528 1528 # hack for command that use a temporary commit (eg: histedit)
1529 1529 # temporary commit got stripped before hook release
1530 1530 if self.changelog.hasnode(ret):
1531 1531 self.hook("commit", node=node, parent1=parent1,
1532 1532 parent2=parent2)
1533 1533 self._afterlock(commithook)
1534 1534 return ret
1535 1535
1536 1536 @unfilteredmethod
1537 1537 def commitctx(self, ctx, error=False):
1538 1538 """Add a new revision to current repository.
1539 1539 Revision information is passed via the context argument.
1540 1540 """
1541 1541
1542 1542 tr = None
1543 1543 p1, p2 = ctx.p1(), ctx.p2()
1544 1544 user = ctx.user()
1545 1545
1546 1546 lock = self.lock()
1547 1547 try:
1548 1548 tr = self.transaction("commit")
1549 1549 trp = weakref.proxy(tr)
1550 1550
1551 1551 if ctx.files():
1552 1552 m1 = p1.manifest()
1553 1553 m2 = p2.manifest()
1554 1554 m = m1.copy()
1555 1555
1556 1556 # check in files
1557 1557 added = []
1558 1558 changed = []
1559 1559 removed = list(ctx.removed())
1560 1560 linkrev = len(self)
1561 1561 self.ui.note(_("committing files:\n"))
1562 1562 for f in sorted(ctx.modified() + ctx.added()):
1563 1563 self.ui.note(f + "\n")
1564 1564 try:
1565 1565 fctx = ctx[f]
1566 1566 if fctx is None:
1567 1567 removed.append(f)
1568 1568 else:
1569 1569 added.append(f)
1570 1570 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1571 1571 trp, changed)
1572 1572 m.setflag(f, fctx.flags())
1573 1573 except OSError, inst:
1574 1574 self.ui.warn(_("trouble committing %s!\n") % f)
1575 1575 raise
1576 1576 except IOError, inst:
1577 1577 errcode = getattr(inst, 'errno', errno.ENOENT)
1578 1578 if error or errcode and errcode != errno.ENOENT:
1579 1579 self.ui.warn(_("trouble committing %s!\n") % f)
1580 1580 raise
1581 1581
1582 1582 # update manifest
1583 1583 self.ui.note(_("committing manifest\n"))
1584 1584 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1585 1585 drop = [f for f in removed if f in m]
1586 1586 for f in drop:
1587 1587 del m[f]
1588 1588 mn = self.manifest.add(m, trp, linkrev,
1589 1589 p1.manifestnode(), p2.manifestnode(),
1590 1590 added, drop)
1591 1591 files = changed + removed
1592 1592 else:
1593 1593 mn = p1.manifestnode()
1594 1594 files = []
1595 1595
1596 1596 # update changelog
1597 1597 self.ui.note(_("committing changelog\n"))
1598 1598 self.changelog.delayupdate(tr)
1599 1599 n = self.changelog.add(mn, files, ctx.description(),
1600 1600 trp, p1.node(), p2.node(),
1601 1601 user, ctx.date(), ctx.extra().copy())
1602 1602 p = lambda: tr.writepending() and self.root or ""
1603 1603 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1604 1604 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1605 1605 parent2=xp2, pending=p)
1606 1606 # set the new commit is proper phase
1607 1607 targetphase = subrepo.newcommitphase(self.ui, ctx)
1608 1608 if targetphase:
1609 1609 # retract boundary do not alter parent changeset.
1610 1610 # if a parent have higher the resulting phase will
1611 1611 # be compliant anyway
1612 1612 #
1613 1613 # if minimal phase was 0 we don't need to retract anything
1614 1614 phases.retractboundary(self, tr, targetphase, [n])
1615 1615 tr.close()
1616 1616 branchmap.updatecache(self.filtered('served'))
1617 1617 return n
1618 1618 finally:
1619 1619 if tr:
1620 1620 tr.release()
1621 1621 lock.release()
1622 1622
1623 1623 @unfilteredmethod
1624 1624 def destroying(self):
1625 1625 '''Inform the repository that nodes are about to be destroyed.
1626 1626 Intended for use by strip and rollback, so there's a common
1627 1627 place for anything that has to be done before destroying history.
1628 1628
1629 1629 This is mostly useful for saving state that is in memory and waiting
1630 1630 to be flushed when the current lock is released. Because a call to
1631 1631 destroyed is imminent, the repo will be invalidated causing those
1632 1632 changes to stay in memory (waiting for the next unlock), or vanish
1633 1633 completely.
1634 1634 '''
1635 1635 # When using the same lock to commit and strip, the phasecache is left
1636 1636 # dirty after committing. Then when we strip, the repo is invalidated,
1637 1637 # causing those changes to disappear.
1638 1638 if '_phasecache' in vars(self):
1639 1639 self._phasecache.write()
1640 1640
1641 1641 @unfilteredmethod
1642 1642 def destroyed(self):
1643 1643 '''Inform the repository that nodes have been destroyed.
1644 1644 Intended for use by strip and rollback, so there's a common
1645 1645 place for anything that has to be done after destroying history.
1646 1646 '''
1647 1647 # When one tries to:
1648 1648 # 1) destroy nodes thus calling this method (e.g. strip)
1649 1649 # 2) use phasecache somewhere (e.g. commit)
1650 1650 #
1651 1651 # then 2) will fail because the phasecache contains nodes that were
1652 1652 # removed. We can either remove phasecache from the filecache,
1653 1653 # causing it to reload next time it is accessed, or simply filter
1654 1654 # the removed nodes now and write the updated cache.
1655 1655 self._phasecache.filterunknown(self)
1656 1656 self._phasecache.write()
1657 1657
1658 1658 # update the 'served' branch cache to help read only server process
1659 1659 # Thanks to branchcache collaboration this is done from the nearest
1660 1660 # filtered subset and it is expected to be fast.
1661 1661 branchmap.updatecache(self.filtered('served'))
1662 1662
1663 1663 # Ensure the persistent tag cache is updated. Doing it now
1664 1664 # means that the tag cache only has to worry about destroyed
1665 1665 # heads immediately after a strip/rollback. That in turn
1666 1666 # guarantees that "cachetip == currenttip" (comparing both rev
1667 1667 # and node) always means no nodes have been added or destroyed.
1668 1668
1669 1669 # XXX this is suboptimal when qrefresh'ing: we strip the current
1670 1670 # head, refresh the tag cache, then immediately add a new head.
1671 1671 # But I think doing it this way is necessary for the "instant
1672 1672 # tag cache retrieval" case to work.
1673 1673 self.invalidate()
1674 1674
1675 1675 def walk(self, match, node=None):
1676 1676 '''
1677 1677 walk recursively through the directory tree or a given
1678 1678 changeset, finding all files matched by the match
1679 1679 function
1680 1680 '''
1681 1681 return self[node].walk(match)
1682 1682
1683 1683 def status(self, node1='.', node2=None, match=None,
1684 1684 ignored=False, clean=False, unknown=False,
1685 1685 listsubrepos=False):
1686 1686 '''a convenience method that calls node1.status(node2)'''
1687 1687 return self[node1].status(node2, match, ignored, clean, unknown,
1688 1688 listsubrepos)
1689 1689
1690 1690 def heads(self, start=None):
1691 1691 heads = self.changelog.heads(start)
1692 1692 # sort the output in rev descending order
1693 1693 return sorted(heads, key=self.changelog.rev, reverse=True)
1694 1694
1695 1695 def branchheads(self, branch=None, start=None, closed=False):
1696 1696 '''return a (possibly filtered) list of heads for the given branch
1697 1697
1698 1698 Heads are returned in topological order, from newest to oldest.
1699 1699 If branch is None, use the dirstate branch.
1700 1700 If start is not None, return only heads reachable from start.
1701 1701 If closed is True, return heads that are marked as closed as well.
1702 1702 '''
1703 1703 if branch is None:
1704 1704 branch = self[None].branch()
1705 1705 branches = self.branchmap()
1706 1706 if branch not in branches:
1707 1707 return []
1708 1708 # the cache returns heads ordered lowest to highest
1709 1709 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1710 1710 if start is not None:
1711 1711 # filter out the heads that cannot be reached from startrev
1712 1712 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1713 1713 bheads = [h for h in bheads if h in fbheads]
1714 1714 return bheads
1715 1715
1716 1716 def branches(self, nodes):
1717 1717 if not nodes:
1718 1718 nodes = [self.changelog.tip()]
1719 1719 b = []
1720 1720 for n in nodes:
1721 1721 t = n
1722 1722 while True:
1723 1723 p = self.changelog.parents(n)
1724 1724 if p[1] != nullid or p[0] == nullid:
1725 1725 b.append((t, n, p[0], p[1]))
1726 1726 break
1727 1727 n = p[0]
1728 1728 return b
1729 1729
1730 1730 def between(self, pairs):
1731 1731 r = []
1732 1732
1733 1733 for top, bottom in pairs:
1734 1734 n, l, i = top, [], 0
1735 1735 f = 1
1736 1736
1737 1737 while n != bottom and n != nullid:
1738 1738 p = self.changelog.parents(n)[0]
1739 1739 if i == f:
1740 1740 l.append(n)
1741 1741 f = f * 2
1742 1742 n = p
1743 1743 i += 1
1744 1744
1745 1745 r.append(l)
1746 1746
1747 1747 return r
1748 1748
1749 1749 def checkpush(self, pushop):
1750 1750 """Extensions can override this function if additional checks have
1751 1751 to be performed before pushing, or call it if they override push
1752 1752 command.
1753 1753 """
1754 1754 pass
1755 1755
1756 1756 @unfilteredpropertycache
1757 1757 def prepushoutgoinghooks(self):
1758 1758 """Return util.hooks consists of "(repo, remote, outgoing)"
1759 1759 functions, which are called before pushing changesets.
1760 1760 """
1761 1761 return util.hooks()
1762 1762
1763 1763 def stream_in(self, remote, remotereqs):
1764 1764 # Save remote branchmap. We will use it later
1765 1765 # to speed up branchcache creation
1766 1766 rbranchmap = None
1767 1767 if remote.capable("branchmap"):
1768 1768 rbranchmap = remote.branchmap()
1769 1769
1770 1770 fp = remote.stream_out()
1771 1771 l = fp.readline()
1772 1772 try:
1773 1773 resp = int(l)
1774 1774 except ValueError:
1775 1775 raise error.ResponseError(
1776 1776 _('unexpected response from remote server:'), l)
1777 1777 if resp == 1:
1778 1778 raise util.Abort(_('operation forbidden by server'))
1779 1779 elif resp == 2:
1780 1780 raise util.Abort(_('locking the remote repository failed'))
1781 1781 elif resp != 0:
1782 1782 raise util.Abort(_('the server sent an unknown error code'))
1783 1783
1784 1784 self.applystreamclone(remotereqs, rbranchmap, fp)
1785 1785 return len(self.heads()) + 1
1786 1786
1787 1787 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1788 1788 """Apply stream clone data to this repository.
1789 1789
1790 1790 "remotereqs" is a set of requirements to handle the incoming data.
1791 1791 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1792 1792 can be None.
1793 1793 "fp" is a file object containing the raw stream data, suitable for
1794 1794 feeding into exchange.consumestreamclone.
1795 1795 """
1796 1796 lock = self.lock()
1797 1797 try:
1798 1798 exchange.consumestreamclone(self, fp)
1799 1799
1800 1800 # new requirements = old non-format requirements +
1801 1801 # new format-related remote requirements
1802 1802 # requirements from the streamed-in repository
1803 1803 self.requirements = remotereqs | (
1804 1804 self.requirements - self.supportedformats)
1805 1805 self._applyopenerreqs()
1806 1806 self._writerequirements()
1807 1807
1808 1808 if remotebranchmap:
1809 1809 rbheads = []
1810 1810 closed = []
1811 1811 for bheads in remotebranchmap.itervalues():
1812 1812 rbheads.extend(bheads)
1813 1813 for h in bheads:
1814 1814 r = self.changelog.rev(h)
1815 1815 b, c = self.changelog.branchinfo(r)
1816 1816 if c:
1817 1817 closed.append(h)
1818 1818
1819 1819 if rbheads:
1820 1820 rtiprev = max((int(self.changelog.rev(node))
1821 1821 for node in rbheads))
1822 1822 cache = branchmap.branchcache(remotebranchmap,
1823 1823 self[rtiprev].node(),
1824 1824 rtiprev,
1825 1825 closednodes=closed)
1826 1826 # Try to stick it as low as possible
1827 1827 # filter above served are unlikely to be fetch from a clone
1828 1828 for candidate in ('base', 'immutable', 'served'):
1829 1829 rview = self.filtered(candidate)
1830 1830 if cache.validfor(rview):
1831 1831 self._branchcaches[candidate] = cache
1832 1832 cache.write(rview)
1833 1833 break
1834 1834 self.invalidate()
1835 1835 finally:
1836 1836 lock.release()
1837 1837
1838 1838 def clone(self, remote, heads=[], stream=None):
1839 1839 '''clone remote repository.
1840 1840
1841 1841 keyword arguments:
1842 1842 heads: list of revs to clone (forces use of pull)
1843 1843 stream: use streaming clone if possible'''
1844 1844
1845 1845 # now, all clients that can request uncompressed clones can
1846 1846 # read repo formats supported by all servers that can serve
1847 1847 # them.
1848 1848
1849 1849 # if revlog format changes, client will have to check version
1850 1850 # and format flags on "stream" capability, and use
1851 1851 # uncompressed only if compatible.
1852 1852
1853 1853 if stream is None:
1854 1854 # if the server explicitly prefers to stream (for fast LANs)
1855 1855 stream = remote.capable('stream-preferred')
1856 1856
1857 1857 if stream and not heads:
1858 1858 # 'stream' means remote revlog format is revlogv1 only
1859 1859 if remote.capable('stream'):
1860 1860 self.stream_in(remote, set(('revlogv1',)))
1861 1861 else:
1862 1862 # otherwise, 'streamreqs' contains the remote revlog format
1863 1863 streamreqs = remote.capable('streamreqs')
1864 1864 if streamreqs:
1865 1865 streamreqs = set(streamreqs.split(','))
1866 1866 # if we support it, stream in and adjust our requirements
1867 1867 if not streamreqs - self.supportedformats:
1868 1868 self.stream_in(remote, streamreqs)
1869 1869
1870 1870 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1871 1871 try:
1872 1872 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1873 1873 ret = exchange.pull(self, remote, heads).cgresult
1874 1874 finally:
1875 1875 self.ui.restoreconfig(quiet)
1876 1876 return ret
1877 1877
1878 1878 def pushkey(self, namespace, key, old, new):
1879 1879 try:
1880 1880 tr = self.currenttransaction()
1881 1881 hookargs = {}
1882 1882 if tr is not None:
1883 1883 hookargs.update(tr.hookargs)
1884 1884 pending = lambda: tr.writepending() and self.root or ""
1885 1885 hookargs['pending'] = pending
1886 1886 hookargs['namespace'] = namespace
1887 1887 hookargs['key'] = key
1888 1888 hookargs['old'] = old
1889 1889 hookargs['new'] = new
1890 1890 self.hook('prepushkey', throw=True, **hookargs)
1891 1891 except error.HookAbort, exc:
1892 1892 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1893 1893 if exc.hint:
1894 1894 self.ui.write_err(_("(%s)\n") % exc.hint)
1895 1895 return False
1896 1896 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1897 1897 ret = pushkey.push(self, namespace, key, old, new)
1898 1898 def runhook():
1899 1899 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1900 1900 ret=ret)
1901 1901 self._afterlock(runhook)
1902 1902 return ret
1903 1903
1904 1904 def listkeys(self, namespace):
1905 1905 self.hook('prelistkeys', throw=True, namespace=namespace)
1906 1906 self.ui.debug('listing keys for "%s"\n' % namespace)
1907 1907 values = pushkey.list(self, namespace)
1908 1908 self.hook('listkeys', namespace=namespace, values=values)
1909 1909 return values
1910 1910
1911 1911 def debugwireargs(self, one, two, three=None, four=None, five=None):
1912 1912 '''used to test argument passing over the wire'''
1913 1913 return "%s %s %s %s %s" % (one, two, three, four, five)
1914 1914
1915 1915 def savecommitmessage(self, text):
1916 1916 fp = self.vfs('last-message.txt', 'wb')
1917 1917 try:
1918 1918 fp.write(text)
1919 1919 finally:
1920 1920 fp.close()
1921 1921 return self.pathto(fp.name[len(self.root) + 1:])
1922 1922
1923 1923 # used to avoid circular references so destructors work
1924 1924 def aftertrans(files):
1925 1925 renamefiles = [tuple(t) for t in files]
1926 1926 def a():
1927 1927 for vfs, src, dest in renamefiles:
1928 1928 try:
1929 1929 vfs.rename(src, dest)
1930 1930 except OSError: # journal file does not yet exist
1931 1931 pass
1932 1932 return a
1933 1933
1934 1934 def undoname(fn):
1935 1935 base, name = os.path.split(fn)
1936 1936 assert name.startswith('journal')
1937 1937 return os.path.join(base, name.replace('journal', 'undo', 1))
1938 1938
1939 1939 def instance(ui, path, create):
1940 1940 return localrepository(ui, util.urllocalpath(path), create)
1941 1941
1942 1942 def islocal(path):
1943 1943 return True
@@ -1,1166 +1,1156 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import pathutil
12 12 import match as matchmod
13 import os, errno, re, glob, tempfile, shutil, stat, inspect
13 import os, errno, re, glob, tempfile, shutil, stat
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83
84 84 missing = set()
85 85
86 86 for subpath in ctx2.substate:
87 87 if subpath not in ctx1.substate:
88 88 del subpaths[subpath]
89 89 missing.add(subpath)
90 90
91 91 for subpath, ctx in sorted(subpaths.iteritems()):
92 92 yield subpath, ctx.sub(subpath)
93 93
94 94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
95 95 # status and diff will have an accurate result when it does
96 96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
97 97 # against itself.
98 98 for subpath in missing:
99 99 yield subpath, ctx2.nullsub(subpath, ctx1)
100 100
101 101 def nochangesfound(ui, repo, excluded=None):
102 102 '''Report no changes for push/pull, excluded is None or a list of
103 103 nodes excluded from the push/pull.
104 104 '''
105 105 secretlist = []
106 106 if excluded:
107 107 for n in excluded:
108 108 if n not in repo:
109 109 # discovery should not have included the filtered revision,
110 110 # we have to explicitly exclude it until discovery is cleanup.
111 111 continue
112 112 ctx = repo[n]
113 113 if ctx.phase() >= phases.secret and not ctx.extinct():
114 114 secretlist.append(n)
115 115
116 116 if secretlist:
117 117 ui.status(_("no changes found (ignored %d secret changesets)\n")
118 118 % len(secretlist))
119 119 else:
120 120 ui.status(_("no changes found\n"))
121 121
122 122 def checknewlabel(repo, lbl, kind):
123 123 # Do not use the "kind" parameter in ui output.
124 124 # It makes strings difficult to translate.
125 125 if lbl in ['tip', '.', 'null']:
126 126 raise util.Abort(_("the name '%s' is reserved") % lbl)
127 127 for c in (':', '\0', '\n', '\r'):
128 128 if c in lbl:
129 129 raise util.Abort(_("%r cannot be used in a name") % c)
130 130 try:
131 131 int(lbl)
132 132 raise util.Abort(_("cannot use an integer as a name"))
133 133 except ValueError:
134 134 pass
135 135
136 136 def checkfilename(f):
137 137 '''Check that the filename f is an acceptable filename for a tracked file'''
138 138 if '\r' in f or '\n' in f:
139 139 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
140 140
141 141 def checkportable(ui, f):
142 142 '''Check if filename f is portable and warn or abort depending on config'''
143 143 checkfilename(f)
144 144 abort, warn = checkportabilityalert(ui)
145 145 if abort or warn:
146 146 msg = util.checkwinfilename(f)
147 147 if msg:
148 148 msg = "%s: %r" % (msg, f)
149 149 if abort:
150 150 raise util.Abort(msg)
151 151 ui.warn(_("warning: %s\n") % msg)
152 152
153 153 def checkportabilityalert(ui):
154 154 '''check if the user's config requests nothing, a warning, or abort for
155 155 non-portable filenames'''
156 156 val = ui.config('ui', 'portablefilenames', 'warn')
157 157 lval = val.lower()
158 158 bval = util.parsebool(val)
159 159 abort = os.name == 'nt' or lval == 'abort'
160 160 warn = bval or lval == 'warn'
161 161 if bval is None and not (warn or abort or lval == 'ignore'):
162 162 raise error.ConfigError(
163 163 _("ui.portablefilenames value is invalid ('%s')") % val)
164 164 return abort, warn
165 165
166 166 class casecollisionauditor(object):
167 167 def __init__(self, ui, abort, dirstate):
168 168 self._ui = ui
169 169 self._abort = abort
170 170 allfiles = '\0'.join(dirstate._map)
171 171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
172 172 self._dirstate = dirstate
173 173 # The purpose of _newfiles is so that we don't complain about
174 174 # case collisions if someone were to call this object with the
175 175 # same filename twice.
176 176 self._newfiles = set()
177 177
178 178 def __call__(self, f):
179 179 if f in self._newfiles:
180 180 return
181 181 fl = encoding.lower(f)
182 182 if fl in self._loweredfiles and f not in self._dirstate:
183 183 msg = _('possible case-folding collision for %s') % f
184 184 if self._abort:
185 185 raise util.Abort(msg)
186 186 self._ui.warn(_("warning: %s\n") % msg)
187 187 self._loweredfiles.add(fl)
188 188 self._newfiles.add(f)
189 189
190 def develwarn(tui, msg):
191 """issue a developer warning message"""
192 msg = 'devel-warn: ' + msg
193 if tui.tracebackflag:
194 util.debugstacktrace(msg, 2)
195 else:
196 curframe = inspect.currentframe()
197 calframe = inspect.getouterframes(curframe, 2)
198 tui.write_err('%s at: %s:%s (%s)\n' % ((msg,) + calframe[2][1:4]))
199
200 190 def filteredhash(repo, maxrev):
201 191 """build hash of filtered revisions in the current repoview.
202 192
203 193 Multiple caches perform up-to-date validation by checking that the
204 194 tiprev and tipnode stored in the cache file match the current repository.
205 195 However, this is not sufficient for validating repoviews because the set
206 196 of revisions in the view may change without the repository tiprev and
207 197 tipnode changing.
208 198
209 199 This function hashes all the revs filtered from the view and returns
210 200 that SHA-1 digest.
211 201 """
212 202 cl = repo.changelog
213 203 if not cl.filteredrevs:
214 204 return None
215 205 key = None
216 206 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
217 207 if revs:
218 208 s = util.sha1()
219 209 for rev in revs:
220 210 s.update('%s;' % rev)
221 211 key = s.digest()
222 212 return key
223 213
224 214 class abstractvfs(object):
225 215 """Abstract base class; cannot be instantiated"""
226 216
227 217 def __init__(self, *args, **kwargs):
228 218 '''Prevent instantiation; don't call this from subclasses.'''
229 219 raise NotImplementedError('attempted instantiating ' + str(type(self)))
230 220
231 221 def tryread(self, path):
232 222 '''gracefully return an empty string for missing files'''
233 223 try:
234 224 return self.read(path)
235 225 except IOError, inst:
236 226 if inst.errno != errno.ENOENT:
237 227 raise
238 228 return ""
239 229
240 230 def tryreadlines(self, path, mode='rb'):
241 231 '''gracefully return an empty array for missing files'''
242 232 try:
243 233 return self.readlines(path, mode=mode)
244 234 except IOError, inst:
245 235 if inst.errno != errno.ENOENT:
246 236 raise
247 237 return []
248 238
249 239 def open(self, path, mode="r", text=False, atomictemp=False,
250 240 notindexed=False):
251 241 '''Open ``path`` file, which is relative to vfs root.
252 242
253 243 Newly created directories are marked as "not to be indexed by
254 244 the content indexing service", if ``notindexed`` is specified
255 245 for "write" mode access.
256 246 '''
257 247 self.open = self.__call__
258 248 return self.__call__(path, mode, text, atomictemp, notindexed)
259 249
260 250 def read(self, path):
261 251 fp = self(path, 'rb')
262 252 try:
263 253 return fp.read()
264 254 finally:
265 255 fp.close()
266 256
267 257 def readlines(self, path, mode='rb'):
268 258 fp = self(path, mode=mode)
269 259 try:
270 260 return fp.readlines()
271 261 finally:
272 262 fp.close()
273 263
274 264 def write(self, path, data):
275 265 fp = self(path, 'wb')
276 266 try:
277 267 return fp.write(data)
278 268 finally:
279 269 fp.close()
280 270
281 271 def writelines(self, path, data, mode='wb', notindexed=False):
282 272 fp = self(path, mode=mode, notindexed=notindexed)
283 273 try:
284 274 return fp.writelines(data)
285 275 finally:
286 276 fp.close()
287 277
288 278 def append(self, path, data):
289 279 fp = self(path, 'ab')
290 280 try:
291 281 return fp.write(data)
292 282 finally:
293 283 fp.close()
294 284
295 285 def chmod(self, path, mode):
296 286 return os.chmod(self.join(path), mode)
297 287
298 288 def exists(self, path=None):
299 289 return os.path.exists(self.join(path))
300 290
301 291 def fstat(self, fp):
302 292 return util.fstat(fp)
303 293
304 294 def isdir(self, path=None):
305 295 return os.path.isdir(self.join(path))
306 296
307 297 def isfile(self, path=None):
308 298 return os.path.isfile(self.join(path))
309 299
310 300 def islink(self, path=None):
311 301 return os.path.islink(self.join(path))
312 302
313 303 def reljoin(self, *paths):
314 304 """join various elements of a path together (as os.path.join would do)
315 305
316 306 The vfs base is not injected so that path stay relative. This exists
317 307 to allow handling of strange encoding if needed."""
318 308 return os.path.join(*paths)
319 309
320 310 def split(self, path):
321 311 """split top-most element of a path (as os.path.split would do)
322 312
323 313 This exists to allow handling of strange encoding if needed."""
324 314 return os.path.split(path)
325 315
326 316 def lexists(self, path=None):
327 317 return os.path.lexists(self.join(path))
328 318
329 319 def lstat(self, path=None):
330 320 return os.lstat(self.join(path))
331 321
332 322 def listdir(self, path=None):
333 323 return os.listdir(self.join(path))
334 324
335 325 def makedir(self, path=None, notindexed=True):
336 326 return util.makedir(self.join(path), notindexed)
337 327
338 328 def makedirs(self, path=None, mode=None):
339 329 return util.makedirs(self.join(path), mode)
340 330
341 331 def makelock(self, info, path):
342 332 return util.makelock(info, self.join(path))
343 333
344 334 def mkdir(self, path=None):
345 335 return os.mkdir(self.join(path))
346 336
347 337 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
348 338 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
349 339 dir=self.join(dir), text=text)
350 340 dname, fname = util.split(name)
351 341 if dir:
352 342 return fd, os.path.join(dir, fname)
353 343 else:
354 344 return fd, fname
355 345
356 346 def readdir(self, path=None, stat=None, skip=None):
357 347 return osutil.listdir(self.join(path), stat, skip)
358 348
359 349 def readlock(self, path):
360 350 return util.readlock(self.join(path))
361 351
362 352 def rename(self, src, dst):
363 353 return util.rename(self.join(src), self.join(dst))
364 354
365 355 def readlink(self, path):
366 356 return os.readlink(self.join(path))
367 357
368 358 def removedirs(self, path=None):
369 359 """Remove a leaf directory and all empty intermediate ones
370 360 """
371 361 return util.removedirs(self.join(path))
372 362
373 363 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
374 364 """Remove a directory tree recursively
375 365
376 366 If ``forcibly``, this tries to remove READ-ONLY files, too.
377 367 """
378 368 if forcibly:
379 369 def onerror(function, path, excinfo):
380 370 if function is not os.remove:
381 371 raise
382 372 # read-only files cannot be unlinked under Windows
383 373 s = os.stat(path)
384 374 if (s.st_mode & stat.S_IWRITE) != 0:
385 375 raise
386 376 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
387 377 os.remove(path)
388 378 else:
389 379 onerror = None
390 380 return shutil.rmtree(self.join(path),
391 381 ignore_errors=ignore_errors, onerror=onerror)
392 382
393 383 def setflags(self, path, l, x):
394 384 return util.setflags(self.join(path), l, x)
395 385
396 386 def stat(self, path=None):
397 387 return os.stat(self.join(path))
398 388
399 389 def unlink(self, path=None):
400 390 return util.unlink(self.join(path))
401 391
402 392 def unlinkpath(self, path=None, ignoremissing=False):
403 393 return util.unlinkpath(self.join(path), ignoremissing)
404 394
405 395 def utime(self, path=None, t=None):
406 396 return os.utime(self.join(path), t)
407 397
408 398 def walk(self, path=None, onerror=None):
409 399 """Yield (dirpath, dirs, files) tuple for each directories under path
410 400
411 401 ``dirpath`` is relative one from the root of this vfs. This
412 402 uses ``os.sep`` as path separator, even you specify POSIX
413 403 style ``path``.
414 404
415 405 "The root of this vfs" is represented as empty ``dirpath``.
416 406 """
417 407 root = os.path.normpath(self.join(None))
418 408 # when dirpath == root, dirpath[prefixlen:] becomes empty
419 409 # because len(dirpath) < prefixlen.
420 410 prefixlen = len(pathutil.normasprefix(root))
421 411 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
422 412 yield (dirpath[prefixlen:], dirs, files)
423 413
424 414 class vfs(abstractvfs):
425 415 '''Operate files relative to a base directory
426 416
427 417 This class is used to hide the details of COW semantics and
428 418 remote file access from higher level code.
429 419 '''
430 420 def __init__(self, base, audit=True, expandpath=False, realpath=False):
431 421 if expandpath:
432 422 base = util.expandpath(base)
433 423 if realpath:
434 424 base = os.path.realpath(base)
435 425 self.base = base
436 426 self._setmustaudit(audit)
437 427 self.createmode = None
438 428 self._trustnlink = None
439 429
440 430 def _getmustaudit(self):
441 431 return self._audit
442 432
443 433 def _setmustaudit(self, onoff):
444 434 self._audit = onoff
445 435 if onoff:
446 436 self.audit = pathutil.pathauditor(self.base)
447 437 else:
448 438 self.audit = util.always
449 439
450 440 mustaudit = property(_getmustaudit, _setmustaudit)
451 441
452 442 @util.propertycache
453 443 def _cansymlink(self):
454 444 return util.checklink(self.base)
455 445
456 446 @util.propertycache
457 447 def _chmod(self):
458 448 return util.checkexec(self.base)
459 449
460 450 def _fixfilemode(self, name):
461 451 if self.createmode is None or not self._chmod:
462 452 return
463 453 os.chmod(name, self.createmode & 0666)
464 454
465 455 def __call__(self, path, mode="r", text=False, atomictemp=False,
466 456 notindexed=False):
467 457 '''Open ``path`` file, which is relative to vfs root.
468 458
469 459 Newly created directories are marked as "not to be indexed by
470 460 the content indexing service", if ``notindexed`` is specified
471 461 for "write" mode access.
472 462 '''
473 463 if self._audit:
474 464 r = util.checkosfilename(path)
475 465 if r:
476 466 raise util.Abort("%s: %r" % (r, path))
477 467 self.audit(path)
478 468 f = self.join(path)
479 469
480 470 if not text and "b" not in mode:
481 471 mode += "b" # for that other OS
482 472
483 473 nlink = -1
484 474 if mode not in ('r', 'rb'):
485 475 dirname, basename = util.split(f)
486 476 # If basename is empty, then the path is malformed because it points
487 477 # to a directory. Let the posixfile() call below raise IOError.
488 478 if basename:
489 479 if atomictemp:
490 480 util.ensuredirs(dirname, self.createmode, notindexed)
491 481 return util.atomictempfile(f, mode, self.createmode)
492 482 try:
493 483 if 'w' in mode:
494 484 util.unlink(f)
495 485 nlink = 0
496 486 else:
497 487 # nlinks() may behave differently for files on Windows
498 488 # shares if the file is open.
499 489 fd = util.posixfile(f)
500 490 nlink = util.nlinks(f)
501 491 if nlink < 1:
502 492 nlink = 2 # force mktempcopy (issue1922)
503 493 fd.close()
504 494 except (OSError, IOError), e:
505 495 if e.errno != errno.ENOENT:
506 496 raise
507 497 nlink = 0
508 498 util.ensuredirs(dirname, self.createmode, notindexed)
509 499 if nlink > 0:
510 500 if self._trustnlink is None:
511 501 self._trustnlink = nlink > 1 or util.checknlink(f)
512 502 if nlink > 1 or not self._trustnlink:
513 503 util.rename(util.mktempcopy(f), f)
514 504 fp = util.posixfile(f, mode)
515 505 if nlink == 0:
516 506 self._fixfilemode(f)
517 507 return fp
518 508
519 509 def symlink(self, src, dst):
520 510 self.audit(dst)
521 511 linkname = self.join(dst)
522 512 try:
523 513 os.unlink(linkname)
524 514 except OSError:
525 515 pass
526 516
527 517 util.ensuredirs(os.path.dirname(linkname), self.createmode)
528 518
529 519 if self._cansymlink:
530 520 try:
531 521 os.symlink(src, linkname)
532 522 except OSError, err:
533 523 raise OSError(err.errno, _('could not symlink to %r: %s') %
534 524 (src, err.strerror), linkname)
535 525 else:
536 526 self.write(dst, src)
537 527
538 528 def join(self, path, *insidef):
539 529 if path:
540 530 return os.path.join(self.base, path, *insidef)
541 531 else:
542 532 return self.base
543 533
544 534 opener = vfs
545 535
546 536 class auditvfs(object):
547 537 def __init__(self, vfs):
548 538 self.vfs = vfs
549 539
550 540 def _getmustaudit(self):
551 541 return self.vfs.mustaudit
552 542
553 543 def _setmustaudit(self, onoff):
554 544 self.vfs.mustaudit = onoff
555 545
556 546 mustaudit = property(_getmustaudit, _setmustaudit)
557 547
558 548 class filtervfs(abstractvfs, auditvfs):
559 549 '''Wrapper vfs for filtering filenames with a function.'''
560 550
561 551 def __init__(self, vfs, filter):
562 552 auditvfs.__init__(self, vfs)
563 553 self._filter = filter
564 554
565 555 def __call__(self, path, *args, **kwargs):
566 556 return self.vfs(self._filter(path), *args, **kwargs)
567 557
568 558 def join(self, path, *insidef):
569 559 if path:
570 560 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
571 561 else:
572 562 return self.vfs.join(path)
573 563
574 564 filteropener = filtervfs
575 565
576 566 class readonlyvfs(abstractvfs, auditvfs):
577 567 '''Wrapper vfs preventing any writing.'''
578 568
579 569 def __init__(self, vfs):
580 570 auditvfs.__init__(self, vfs)
581 571
582 572 def __call__(self, path, mode='r', *args, **kw):
583 573 if mode not in ('r', 'rb'):
584 574 raise util.Abort('this vfs is read only')
585 575 return self.vfs(path, mode, *args, **kw)
586 576
587 577
588 578 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
589 579 '''yield every hg repository under path, always recursively.
590 580 The recurse flag will only control recursion into repo working dirs'''
591 581 def errhandler(err):
592 582 if err.filename == path:
593 583 raise err
594 584 samestat = getattr(os.path, 'samestat', None)
595 585 if followsym and samestat is not None:
596 586 def adddir(dirlst, dirname):
597 587 match = False
598 588 dirstat = os.stat(dirname)
599 589 for lstdirstat in dirlst:
600 590 if samestat(dirstat, lstdirstat):
601 591 match = True
602 592 break
603 593 if not match:
604 594 dirlst.append(dirstat)
605 595 return not match
606 596 else:
607 597 followsym = False
608 598
609 599 if (seen_dirs is None) and followsym:
610 600 seen_dirs = []
611 601 adddir(seen_dirs, path)
612 602 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
613 603 dirs.sort()
614 604 if '.hg' in dirs:
615 605 yield root # found a repository
616 606 qroot = os.path.join(root, '.hg', 'patches')
617 607 if os.path.isdir(os.path.join(qroot, '.hg')):
618 608 yield qroot # we have a patch queue repo here
619 609 if recurse:
620 610 # avoid recursing inside the .hg directory
621 611 dirs.remove('.hg')
622 612 else:
623 613 dirs[:] = [] # don't descend further
624 614 elif followsym:
625 615 newdirs = []
626 616 for d in dirs:
627 617 fname = os.path.join(root, d)
628 618 if adddir(seen_dirs, fname):
629 619 if os.path.islink(fname):
630 620 for hgname in walkrepos(fname, True, seen_dirs):
631 621 yield hgname
632 622 else:
633 623 newdirs.append(d)
634 624 dirs[:] = newdirs
635 625
636 626 def osrcpath():
637 627 '''return default os-specific hgrc search path'''
638 628 path = []
639 629 defaultpath = os.path.join(util.datapath, 'default.d')
640 630 if os.path.isdir(defaultpath):
641 631 for f, kind in osutil.listdir(defaultpath):
642 632 if f.endswith('.rc'):
643 633 path.append(os.path.join(defaultpath, f))
644 634 path.extend(systemrcpath())
645 635 path.extend(userrcpath())
646 636 path = [os.path.normpath(f) for f in path]
647 637 return path
648 638
649 639 _rcpath = None
650 640
651 641 def rcpath():
652 642 '''return hgrc search path. if env var HGRCPATH is set, use it.
653 643 for each item in path, if directory, use files ending in .rc,
654 644 else use item.
655 645 make HGRCPATH empty to only look in .hg/hgrc of current repo.
656 646 if no HGRCPATH, use default os-specific path.'''
657 647 global _rcpath
658 648 if _rcpath is None:
659 649 if 'HGRCPATH' in os.environ:
660 650 _rcpath = []
661 651 for p in os.environ['HGRCPATH'].split(os.pathsep):
662 652 if not p:
663 653 continue
664 654 p = util.expandpath(p)
665 655 if os.path.isdir(p):
666 656 for f, kind in osutil.listdir(p):
667 657 if f.endswith('.rc'):
668 658 _rcpath.append(os.path.join(p, f))
669 659 else:
670 660 _rcpath.append(p)
671 661 else:
672 662 _rcpath = osrcpath()
673 663 return _rcpath
674 664
675 665 def intrev(repo, rev):
676 666 """Return integer for a given revision that can be used in comparison or
677 667 arithmetic operation"""
678 668 if rev is None:
679 669 return len(repo)
680 670 return rev
681 671
682 672 def revsingle(repo, revspec, default='.'):
683 673 if not revspec and revspec != 0:
684 674 return repo[default]
685 675
686 676 l = revrange(repo, [revspec])
687 677 if not l:
688 678 raise util.Abort(_('empty revision set'))
689 679 return repo[l.last()]
690 680
691 681 def revpair(repo, revs):
692 682 if not revs:
693 683 return repo.dirstate.p1(), None
694 684
695 685 l = revrange(repo, revs)
696 686
697 687 if not l:
698 688 first = second = None
699 689 elif l.isascending():
700 690 first = l.min()
701 691 second = l.max()
702 692 elif l.isdescending():
703 693 first = l.max()
704 694 second = l.min()
705 695 else:
706 696 first = l.first()
707 697 second = l.last()
708 698
709 699 if first is None:
710 700 raise util.Abort(_('empty revision range'))
711 701
712 702 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
713 703 return repo.lookup(first), None
714 704
715 705 return repo.lookup(first), repo.lookup(second)
716 706
717 707 _revrangesep = ':'
718 708
719 709 def revrange(repo, revs):
720 710 """Yield revision as strings from a list of revision specifications."""
721 711
722 712 def revfix(repo, val, defval):
723 713 if not val and val != 0 and defval is not None:
724 714 return defval
725 715 return repo[val].rev()
726 716
727 717 subsets = []
728 718
729 719 revsetaliases = [alias for (alias, _) in
730 720 repo.ui.configitems("revsetalias")]
731 721
732 722 for spec in revs:
733 723 # attempt to parse old-style ranges first to deal with
734 724 # things like old-tag which contain query metacharacters
735 725 try:
736 726 # ... except for revset aliases without arguments. These
737 727 # should be parsed as soon as possible, because they might
738 728 # clash with a hash prefix.
739 729 if spec in revsetaliases:
740 730 raise error.RepoLookupError
741 731
742 732 if isinstance(spec, int):
743 733 subsets.append(revset.baseset([spec]))
744 734 continue
745 735
746 736 if _revrangesep in spec:
747 737 start, end = spec.split(_revrangesep, 1)
748 738 if start in revsetaliases or end in revsetaliases:
749 739 raise error.RepoLookupError
750 740
751 741 start = revfix(repo, start, 0)
752 742 end = revfix(repo, end, len(repo) - 1)
753 743 if end == nullrev and start < 0:
754 744 start = nullrev
755 745 if start < end:
756 746 l = revset.spanset(repo, start, end + 1)
757 747 else:
758 748 l = revset.spanset(repo, start, end - 1)
759 749 subsets.append(l)
760 750 continue
761 751 elif spec and spec in repo: # single unquoted rev
762 752 rev = revfix(repo, spec, None)
763 753 subsets.append(revset.baseset([rev]))
764 754 continue
765 755 except error.RepoLookupError:
766 756 pass
767 757
768 758 # fall through to new-style queries if old-style fails
769 759 m = revset.match(repo.ui, spec, repo)
770 760 subsets.append(m(repo))
771 761
772 762 return revset._combinesets(subsets)
773 763
774 764 def expandpats(pats):
775 765 '''Expand bare globs when running on windows.
776 766 On posix we assume it already has already been done by sh.'''
777 767 if not util.expandglobs:
778 768 return list(pats)
779 769 ret = []
780 770 for kindpat in pats:
781 771 kind, pat = matchmod._patsplit(kindpat, None)
782 772 if kind is None:
783 773 try:
784 774 globbed = glob.glob(pat)
785 775 except re.error:
786 776 globbed = [pat]
787 777 if globbed:
788 778 ret.extend(globbed)
789 779 continue
790 780 ret.append(kindpat)
791 781 return ret
792 782
793 783 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath',
794 784 badfn=None):
795 785 '''Return a matcher and the patterns that were used.
796 786 The matcher will warn about bad matches, unless an alternate badfn callback
797 787 is provided.'''
798 788 if pats == ("",):
799 789 pats = []
800 790 if not globbed and default == 'relpath':
801 791 pats = expandpats(pats or [])
802 792
803 793 def bad(f, msg):
804 794 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
805 795
806 796 if badfn is None:
807 797 badfn = bad
808 798
809 799 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
810 800 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
811 801
812 802 if m.always():
813 803 pats = []
814 804 return m, pats
815 805
816 806 def match(ctx, pats=[], opts={}, globbed=False, default='relpath', badfn=None):
817 807 '''Return a matcher that will warn about bad matches.'''
818 808 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
819 809
820 810 def matchall(repo):
821 811 '''Return a matcher that will efficiently match everything.'''
822 812 return matchmod.always(repo.root, repo.getcwd())
823 813
824 814 def matchfiles(repo, files, badfn=None):
825 815 '''Return a matcher that will efficiently match exactly these files.'''
826 816 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
827 817
828 818 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
829 819 m = matcher
830 820 if dry_run is None:
831 821 dry_run = opts.get('dry_run')
832 822 if similarity is None:
833 823 similarity = float(opts.get('similarity') or 0)
834 824
835 825 ret = 0
836 826 join = lambda f: os.path.join(prefix, f)
837 827
838 828 def matchessubrepo(matcher, subpath):
839 829 if matcher.exact(subpath):
840 830 return True
841 831 for f in matcher.files():
842 832 if f.startswith(subpath):
843 833 return True
844 834 return False
845 835
846 836 wctx = repo[None]
847 837 for subpath in sorted(wctx.substate):
848 838 if opts.get('subrepos') or matchessubrepo(m, subpath):
849 839 sub = wctx.sub(subpath)
850 840 try:
851 841 submatch = matchmod.narrowmatcher(subpath, m)
852 842 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
853 843 ret = 1
854 844 except error.LookupError:
855 845 repo.ui.status(_("skipping missing subrepository: %s\n")
856 846 % join(subpath))
857 847
858 848 rejected = []
859 849 def badfn(f, msg):
860 850 if f in m.files():
861 851 m.bad(f, msg)
862 852 rejected.append(f)
863 853
864 854 badmatch = matchmod.badmatch(m, badfn)
865 855 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
866 856 badmatch)
867 857
868 858 unknownset = set(unknown + forgotten)
869 859 toprint = unknownset.copy()
870 860 toprint.update(deleted)
871 861 for abs in sorted(toprint):
872 862 if repo.ui.verbose or not m.exact(abs):
873 863 if abs in unknownset:
874 864 status = _('adding %s\n') % m.uipath(abs)
875 865 else:
876 866 status = _('removing %s\n') % m.uipath(abs)
877 867 repo.ui.status(status)
878 868
879 869 renames = _findrenames(repo, m, added + unknown, removed + deleted,
880 870 similarity)
881 871
882 872 if not dry_run:
883 873 _markchanges(repo, unknown + forgotten, deleted, renames)
884 874
885 875 for f in rejected:
886 876 if f in m.files():
887 877 return 1
888 878 return ret
889 879
890 880 def marktouched(repo, files, similarity=0.0):
891 881 '''Assert that files have somehow been operated upon. files are relative to
892 882 the repo root.'''
893 883 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
894 884 rejected = []
895 885
896 886 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
897 887
898 888 if repo.ui.verbose:
899 889 unknownset = set(unknown + forgotten)
900 890 toprint = unknownset.copy()
901 891 toprint.update(deleted)
902 892 for abs in sorted(toprint):
903 893 if abs in unknownset:
904 894 status = _('adding %s\n') % abs
905 895 else:
906 896 status = _('removing %s\n') % abs
907 897 repo.ui.status(status)
908 898
909 899 renames = _findrenames(repo, m, added + unknown, removed + deleted,
910 900 similarity)
911 901
912 902 _markchanges(repo, unknown + forgotten, deleted, renames)
913 903
914 904 for f in rejected:
915 905 if f in m.files():
916 906 return 1
917 907 return 0
918 908
919 909 def _interestingfiles(repo, matcher):
920 910 '''Walk dirstate with matcher, looking for files that addremove would care
921 911 about.
922 912
923 913 This is different from dirstate.status because it doesn't care about
924 914 whether files are modified or clean.'''
925 915 added, unknown, deleted, removed, forgotten = [], [], [], [], []
926 916 audit_path = pathutil.pathauditor(repo.root)
927 917
928 918 ctx = repo[None]
929 919 dirstate = repo.dirstate
930 920 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
931 921 full=False)
932 922 for abs, st in walkresults.iteritems():
933 923 dstate = dirstate[abs]
934 924 if dstate == '?' and audit_path.check(abs):
935 925 unknown.append(abs)
936 926 elif dstate != 'r' and not st:
937 927 deleted.append(abs)
938 928 elif dstate == 'r' and st:
939 929 forgotten.append(abs)
940 930 # for finding renames
941 931 elif dstate == 'r' and not st:
942 932 removed.append(abs)
943 933 elif dstate == 'a':
944 934 added.append(abs)
945 935
946 936 return added, unknown, deleted, removed, forgotten
947 937
948 938 def _findrenames(repo, matcher, added, removed, similarity):
949 939 '''Find renames from removed files to added ones.'''
950 940 renames = {}
951 941 if similarity > 0:
952 942 for old, new, score in similar.findrenames(repo, added, removed,
953 943 similarity):
954 944 if (repo.ui.verbose or not matcher.exact(old)
955 945 or not matcher.exact(new)):
956 946 repo.ui.status(_('recording removal of %s as rename to %s '
957 947 '(%d%% similar)\n') %
958 948 (matcher.rel(old), matcher.rel(new),
959 949 score * 100))
960 950 renames[new] = old
961 951 return renames
962 952
963 953 def _markchanges(repo, unknown, deleted, renames):
964 954 '''Marks the files in unknown as added, the files in deleted as removed,
965 955 and the files in renames as copied.'''
966 956 wctx = repo[None]
967 957 wlock = repo.wlock()
968 958 try:
969 959 wctx.forget(deleted)
970 960 wctx.add(unknown)
971 961 for new, old in renames.iteritems():
972 962 wctx.copy(old, new)
973 963 finally:
974 964 wlock.release()
975 965
976 966 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
977 967 """Update the dirstate to reflect the intent of copying src to dst. For
978 968 different reasons it might not end with dst being marked as copied from src.
979 969 """
980 970 origsrc = repo.dirstate.copied(src) or src
981 971 if dst == origsrc: # copying back a copy?
982 972 if repo.dirstate[dst] not in 'mn' and not dryrun:
983 973 repo.dirstate.normallookup(dst)
984 974 else:
985 975 if repo.dirstate[origsrc] == 'a' and origsrc == src:
986 976 if not ui.quiet:
987 977 ui.warn(_("%s has not been committed yet, so no copy "
988 978 "data will be stored for %s.\n")
989 979 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
990 980 if repo.dirstate[dst] in '?r' and not dryrun:
991 981 wctx.add([dst])
992 982 elif not dryrun:
993 983 wctx.copy(origsrc, dst)
994 984
995 985 def readrequires(opener, supported):
996 986 '''Reads and parses .hg/requires and checks if all entries found
997 987 are in the list of supported features.'''
998 988 requirements = set(opener.read("requires").splitlines())
999 989 missings = []
1000 990 for r in requirements:
1001 991 if r not in supported:
1002 992 if not r or not r[0].isalnum():
1003 993 raise error.RequirementError(_(".hg/requires file is corrupt"))
1004 994 missings.append(r)
1005 995 missings.sort()
1006 996 if missings:
1007 997 raise error.RequirementError(
1008 998 _("repository requires features unknown to this Mercurial: %s")
1009 999 % " ".join(missings),
1010 1000 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
1011 1001 " for more information"))
1012 1002 return requirements
1013 1003
1014 1004 def writerequires(opener, requirements):
1015 1005 reqfile = opener("requires", "w")
1016 1006 for r in sorted(requirements):
1017 1007 reqfile.write("%s\n" % r)
1018 1008 reqfile.close()
1019 1009
1020 1010 class filecachesubentry(object):
1021 1011 def __init__(self, path, stat):
1022 1012 self.path = path
1023 1013 self.cachestat = None
1024 1014 self._cacheable = None
1025 1015
1026 1016 if stat:
1027 1017 self.cachestat = filecachesubentry.stat(self.path)
1028 1018
1029 1019 if self.cachestat:
1030 1020 self._cacheable = self.cachestat.cacheable()
1031 1021 else:
1032 1022 # None means we don't know yet
1033 1023 self._cacheable = None
1034 1024
1035 1025 def refresh(self):
1036 1026 if self.cacheable():
1037 1027 self.cachestat = filecachesubentry.stat(self.path)
1038 1028
1039 1029 def cacheable(self):
1040 1030 if self._cacheable is not None:
1041 1031 return self._cacheable
1042 1032
1043 1033 # we don't know yet, assume it is for now
1044 1034 return True
1045 1035
1046 1036 def changed(self):
1047 1037 # no point in going further if we can't cache it
1048 1038 if not self.cacheable():
1049 1039 return True
1050 1040
1051 1041 newstat = filecachesubentry.stat(self.path)
1052 1042
1053 1043 # we may not know if it's cacheable yet, check again now
1054 1044 if newstat and self._cacheable is None:
1055 1045 self._cacheable = newstat.cacheable()
1056 1046
1057 1047 # check again
1058 1048 if not self._cacheable:
1059 1049 return True
1060 1050
1061 1051 if self.cachestat != newstat:
1062 1052 self.cachestat = newstat
1063 1053 return True
1064 1054 else:
1065 1055 return False
1066 1056
1067 1057 @staticmethod
1068 1058 def stat(path):
1069 1059 try:
1070 1060 return util.cachestat(path)
1071 1061 except OSError, e:
1072 1062 if e.errno != errno.ENOENT:
1073 1063 raise
1074 1064
1075 1065 class filecacheentry(object):
1076 1066 def __init__(self, paths, stat=True):
1077 1067 self._entries = []
1078 1068 for path in paths:
1079 1069 self._entries.append(filecachesubentry(path, stat))
1080 1070
1081 1071 def changed(self):
1082 1072 '''true if any entry has changed'''
1083 1073 for entry in self._entries:
1084 1074 if entry.changed():
1085 1075 return True
1086 1076 return False
1087 1077
1088 1078 def refresh(self):
1089 1079 for entry in self._entries:
1090 1080 entry.refresh()
1091 1081
1092 1082 class filecache(object):
1093 1083 '''A property like decorator that tracks files under .hg/ for updates.
1094 1084
1095 1085 Records stat info when called in _filecache.
1096 1086
1097 1087 On subsequent calls, compares old stat info with new info, and recreates the
1098 1088 object when any of the files changes, updating the new stat info in
1099 1089 _filecache.
1100 1090
1101 1091 Mercurial either atomic renames or appends for files under .hg,
1102 1092 so to ensure the cache is reliable we need the filesystem to be able
1103 1093 to tell us if a file has been replaced. If it can't, we fallback to
1104 1094 recreating the object on every call (essentially the same behaviour as
1105 1095 propertycache).
1106 1096
1107 1097 '''
1108 1098 def __init__(self, *paths):
1109 1099 self.paths = paths
1110 1100
1111 1101 def join(self, obj, fname):
1112 1102 """Used to compute the runtime path of a cached file.
1113 1103
1114 1104 Users should subclass filecache and provide their own version of this
1115 1105 function to call the appropriate join function on 'obj' (an instance
1116 1106 of the class that its member function was decorated).
1117 1107 """
1118 1108 return obj.join(fname)
1119 1109
1120 1110 def __call__(self, func):
1121 1111 self.func = func
1122 1112 self.name = func.__name__
1123 1113 return self
1124 1114
1125 1115 def __get__(self, obj, type=None):
1126 1116 # do we need to check if the file changed?
1127 1117 if self.name in obj.__dict__:
1128 1118 assert self.name in obj._filecache, self.name
1129 1119 return obj.__dict__[self.name]
1130 1120
1131 1121 entry = obj._filecache.get(self.name)
1132 1122
1133 1123 if entry:
1134 1124 if entry.changed():
1135 1125 entry.obj = self.func(obj)
1136 1126 else:
1137 1127 paths = [self.join(obj, path) for path in self.paths]
1138 1128
1139 1129 # We stat -before- creating the object so our cache doesn't lie if
1140 1130 # a writer modified between the time we read and stat
1141 1131 entry = filecacheentry(paths, True)
1142 1132 entry.obj = self.func(obj)
1143 1133
1144 1134 obj._filecache[self.name] = entry
1145 1135
1146 1136 obj.__dict__[self.name] = entry.obj
1147 1137 return entry.obj
1148 1138
1149 1139 def __set__(self, obj, value):
1150 1140 if self.name not in obj._filecache:
1151 1141 # we add an entry for the missing value because X in __dict__
1152 1142 # implies X in _filecache
1153 1143 paths = [self.join(obj, path) for path in self.paths]
1154 1144 ce = filecacheentry(paths, False)
1155 1145 obj._filecache[self.name] = ce
1156 1146 else:
1157 1147 ce = obj._filecache[self.name]
1158 1148
1159 1149 ce.obj = value # update cached copy
1160 1150 obj.__dict__[self.name] = value # update copy returned by obj.x
1161 1151
1162 1152 def __delete__(self, obj):
1163 1153 try:
1164 1154 del obj.__dict__[self.name]
1165 1155 except KeyError:
1166 1156 raise AttributeError(self.name)
@@ -1,1016 +1,1027 b''
1 1 # ui.py - user interface bits for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 import inspect
8 9 from i18n import _
9 10 import errno, getpass, os, socket, sys, tempfile, traceback
10 11 import config, scmutil, util, error, formatter, progress
11 12 from node import hex
12 13
13 14 samplehgrcs = {
14 15 'user':
15 16 """# example user config (see "hg help config" for more info)
16 17 [ui]
17 18 # name and email, e.g.
18 19 # username = Jane Doe <jdoe@example.com>
19 20 username =
20 21
21 22 [extensions]
22 23 # uncomment these lines to enable some popular extensions
23 24 # (see "hg help extensions" for more info)
24 25 #
25 26 # pager =
26 27 # progress =
27 28 # color =""",
28 29
29 30 'cloned':
30 31 """# example repository config (see "hg help config" for more info)
31 32 [paths]
32 33 default = %s
33 34
34 35 # path aliases to other clones of this repo in URLs or filesystem paths
35 36 # (see "hg help config.paths" for more info)
36 37 #
37 38 # default-push = ssh://jdoe@example.net/hg/jdoes-fork
38 39 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
39 40 # my-clone = /home/jdoe/jdoes-clone
40 41
41 42 [ui]
42 43 # name and email (local to this repository, optional), e.g.
43 44 # username = Jane Doe <jdoe@example.com>
44 45 """,
45 46
46 47 'local':
47 48 """# example repository config (see "hg help config" for more info)
48 49 [paths]
49 50 # path aliases to other clones of this repo in URLs or filesystem paths
50 51 # (see "hg help config.paths" for more info)
51 52 #
52 53 # default = http://example.com/hg/example-repo
53 54 # default-push = ssh://jdoe@example.net/hg/jdoes-fork
54 55 # my-fork = ssh://jdoe@example.net/hg/jdoes-fork
55 56 # my-clone = /home/jdoe/jdoes-clone
56 57
57 58 [ui]
58 59 # name and email (local to this repository, optional), e.g.
59 60 # username = Jane Doe <jdoe@example.com>
60 61 """,
61 62
62 63 'global':
63 64 """# example system-wide hg config (see "hg help config" for more info)
64 65
65 66 [extensions]
66 67 # uncomment these lines to enable some popular extensions
67 68 # (see "hg help extensions" for more info)
68 69 #
69 70 # blackbox =
70 71 # progress =
71 72 # color =
72 73 # pager =""",
73 74 }
74 75
75 76 class ui(object):
76 77 def __init__(self, src=None):
77 78 # _buffers: used for temporary capture of output
78 79 self._buffers = []
79 80 # _bufferstates:
80 81 # should the temporary capture include stderr and subprocess output
81 82 self._bufferstates = []
82 83 self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
83 84 self._reportuntrusted = True
84 85 self._ocfg = config.config() # overlay
85 86 self._tcfg = config.config() # trusted
86 87 self._ucfg = config.config() # untrusted
87 88 self._trustusers = set()
88 89 self._trustgroups = set()
89 90 self.callhooks = True
90 91
91 92 if src:
92 93 self.fout = src.fout
93 94 self.ferr = src.ferr
94 95 self.fin = src.fin
95 96
96 97 self._tcfg = src._tcfg.copy()
97 98 self._ucfg = src._ucfg.copy()
98 99 self._ocfg = src._ocfg.copy()
99 100 self._trustusers = src._trustusers.copy()
100 101 self._trustgroups = src._trustgroups.copy()
101 102 self.environ = src.environ
102 103 self.callhooks = src.callhooks
103 104 self.fixconfig()
104 105 else:
105 106 self.fout = sys.stdout
106 107 self.ferr = sys.stderr
107 108 self.fin = sys.stdin
108 109
109 110 # shared read-only environment
110 111 self.environ = os.environ
111 112 # we always trust global config files
112 113 for f in scmutil.rcpath():
113 114 self.readconfig(f, trust=True)
114 115
115 116 def copy(self):
116 117 return self.__class__(self)
117 118
118 119 def formatter(self, topic, opts):
119 120 return formatter.formatter(self, topic, opts)
120 121
121 122 def _trusted(self, fp, f):
122 123 st = util.fstat(fp)
123 124 if util.isowner(st):
124 125 return True
125 126
126 127 tusers, tgroups = self._trustusers, self._trustgroups
127 128 if '*' in tusers or '*' in tgroups:
128 129 return True
129 130
130 131 user = util.username(st.st_uid)
131 132 group = util.groupname(st.st_gid)
132 133 if user in tusers or group in tgroups or user == util.username():
133 134 return True
134 135
135 136 if self._reportuntrusted:
136 137 self.warn(_('not trusting file %s from untrusted '
137 138 'user %s, group %s\n') % (f, user, group))
138 139 return False
139 140
140 141 def readconfig(self, filename, root=None, trust=False,
141 142 sections=None, remap=None):
142 143 try:
143 144 fp = open(filename)
144 145 except IOError:
145 146 if not sections: # ignore unless we were looking for something
146 147 return
147 148 raise
148 149
149 150 cfg = config.config()
150 151 trusted = sections or trust or self._trusted(fp, filename)
151 152
152 153 try:
153 154 cfg.read(filename, fp, sections=sections, remap=remap)
154 155 fp.close()
155 156 except error.ConfigError, inst:
156 157 if trusted:
157 158 raise
158 159 self.warn(_("ignored: %s\n") % str(inst))
159 160
160 161 if self.plain():
161 162 for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
162 163 'logtemplate', 'statuscopies', 'style',
163 164 'traceback', 'verbose'):
164 165 if k in cfg['ui']:
165 166 del cfg['ui'][k]
166 167 for k, v in cfg.items('defaults'):
167 168 del cfg['defaults'][k]
168 169 # Don't remove aliases from the configuration if in the exceptionlist
169 170 if self.plain('alias'):
170 171 for k, v in cfg.items('alias'):
171 172 del cfg['alias'][k]
172 173 if self.plain('revsetalias'):
173 174 for k, v in cfg.items('revsetalias'):
174 175 del cfg['revsetalias'][k]
175 176
176 177 if trusted:
177 178 self._tcfg.update(cfg)
178 179 self._tcfg.update(self._ocfg)
179 180 self._ucfg.update(cfg)
180 181 self._ucfg.update(self._ocfg)
181 182
182 183 if root is None:
183 184 root = os.path.expanduser('~')
184 185 self.fixconfig(root=root)
185 186
186 187 def fixconfig(self, root=None, section=None):
187 188 if section in (None, 'paths'):
188 189 # expand vars and ~
189 190 # translate paths relative to root (or home) into absolute paths
190 191 root = root or os.getcwd()
191 192 for c in self._tcfg, self._ucfg, self._ocfg:
192 193 for n, p in c.items('paths'):
193 194 if not p:
194 195 continue
195 196 if '%%' in p:
196 197 self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
197 198 % (n, p, self.configsource('paths', n)))
198 199 p = p.replace('%%', '%')
199 200 p = util.expandpath(p)
200 201 if not util.hasscheme(p) and not os.path.isabs(p):
201 202 p = os.path.normpath(os.path.join(root, p))
202 203 c.set("paths", n, p)
203 204
204 205 if section in (None, 'ui'):
205 206 # update ui options
206 207 self.debugflag = self.configbool('ui', 'debug')
207 208 self.verbose = self.debugflag or self.configbool('ui', 'verbose')
208 209 self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
209 210 if self.verbose and self.quiet:
210 211 self.quiet = self.verbose = False
211 212 self._reportuntrusted = self.debugflag or self.configbool("ui",
212 213 "report_untrusted", True)
213 214 self.tracebackflag = self.configbool('ui', 'traceback', False)
214 215
215 216 if section in (None, 'trusted'):
216 217 # update trust information
217 218 self._trustusers.update(self.configlist('trusted', 'users'))
218 219 self._trustgroups.update(self.configlist('trusted', 'groups'))
219 220
220 221 def backupconfig(self, section, item):
221 222 return (self._ocfg.backup(section, item),
222 223 self._tcfg.backup(section, item),
223 224 self._ucfg.backup(section, item),)
224 225 def restoreconfig(self, data):
225 226 self._ocfg.restore(data[0])
226 227 self._tcfg.restore(data[1])
227 228 self._ucfg.restore(data[2])
228 229
229 230 def setconfig(self, section, name, value, source=''):
230 231 for cfg in (self._ocfg, self._tcfg, self._ucfg):
231 232 cfg.set(section, name, value, source)
232 233 self.fixconfig(section=section)
233 234
234 235 def _data(self, untrusted):
235 236 return untrusted and self._ucfg or self._tcfg
236 237
237 238 def configsource(self, section, name, untrusted=False):
238 239 return self._data(untrusted).source(section, name) or 'none'
239 240
240 241 def config(self, section, name, default=None, untrusted=False):
241 242 if isinstance(name, list):
242 243 alternates = name
243 244 else:
244 245 alternates = [name]
245 246
246 247 for n in alternates:
247 248 value = self._data(untrusted).get(section, n, None)
248 249 if value is not None:
249 250 name = n
250 251 break
251 252 else:
252 253 value = default
253 254
254 255 if self.debugflag and not untrusted and self._reportuntrusted:
255 256 for n in alternates:
256 257 uvalue = self._ucfg.get(section, n)
257 258 if uvalue is not None and uvalue != value:
258 259 self.debug("ignoring untrusted configuration option "
259 260 "%s.%s = %s\n" % (section, n, uvalue))
260 261 return value
261 262
262 263 def configpath(self, section, name, default=None, untrusted=False):
263 264 'get a path config item, expanded relative to repo root or config file'
264 265 v = self.config(section, name, default, untrusted)
265 266 if v is None:
266 267 return None
267 268 if not os.path.isabs(v) or "://" not in v:
268 269 src = self.configsource(section, name, untrusted)
269 270 if ':' in src:
270 271 base = os.path.dirname(src.rsplit(':')[0])
271 272 v = os.path.join(base, os.path.expanduser(v))
272 273 return v
273 274
274 275 def configbool(self, section, name, default=False, untrusted=False):
275 276 """parse a configuration element as a boolean
276 277
277 278 >>> u = ui(); s = 'foo'
278 279 >>> u.setconfig(s, 'true', 'yes')
279 280 >>> u.configbool(s, 'true')
280 281 True
281 282 >>> u.setconfig(s, 'false', 'no')
282 283 >>> u.configbool(s, 'false')
283 284 False
284 285 >>> u.configbool(s, 'unknown')
285 286 False
286 287 >>> u.configbool(s, 'unknown', True)
287 288 True
288 289 >>> u.setconfig(s, 'invalid', 'somevalue')
289 290 >>> u.configbool(s, 'invalid')
290 291 Traceback (most recent call last):
291 292 ...
292 293 ConfigError: foo.invalid is not a boolean ('somevalue')
293 294 """
294 295
295 296 v = self.config(section, name, None, untrusted)
296 297 if v is None:
297 298 return default
298 299 if isinstance(v, bool):
299 300 return v
300 301 b = util.parsebool(v)
301 302 if b is None:
302 303 raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
303 304 % (section, name, v))
304 305 return b
305 306
306 307 def configint(self, section, name, default=None, untrusted=False):
307 308 """parse a configuration element as an integer
308 309
309 310 >>> u = ui(); s = 'foo'
310 311 >>> u.setconfig(s, 'int1', '42')
311 312 >>> u.configint(s, 'int1')
312 313 42
313 314 >>> u.setconfig(s, 'int2', '-42')
314 315 >>> u.configint(s, 'int2')
315 316 -42
316 317 >>> u.configint(s, 'unknown', 7)
317 318 7
318 319 >>> u.setconfig(s, 'invalid', 'somevalue')
319 320 >>> u.configint(s, 'invalid')
320 321 Traceback (most recent call last):
321 322 ...
322 323 ConfigError: foo.invalid is not an integer ('somevalue')
323 324 """
324 325
325 326 v = self.config(section, name, None, untrusted)
326 327 if v is None:
327 328 return default
328 329 try:
329 330 return int(v)
330 331 except ValueError:
331 332 raise error.ConfigError(_("%s.%s is not an integer ('%s')")
332 333 % (section, name, v))
333 334
334 335 def configbytes(self, section, name, default=0, untrusted=False):
335 336 """parse a configuration element as a quantity in bytes
336 337
337 338 Units can be specified as b (bytes), k or kb (kilobytes), m or
338 339 mb (megabytes), g or gb (gigabytes).
339 340
340 341 >>> u = ui(); s = 'foo'
341 342 >>> u.setconfig(s, 'val1', '42')
342 343 >>> u.configbytes(s, 'val1')
343 344 42
344 345 >>> u.setconfig(s, 'val2', '42.5 kb')
345 346 >>> u.configbytes(s, 'val2')
346 347 43520
347 348 >>> u.configbytes(s, 'unknown', '7 MB')
348 349 7340032
349 350 >>> u.setconfig(s, 'invalid', 'somevalue')
350 351 >>> u.configbytes(s, 'invalid')
351 352 Traceback (most recent call last):
352 353 ...
353 354 ConfigError: foo.invalid is not a byte quantity ('somevalue')
354 355 """
355 356
356 357 value = self.config(section, name)
357 358 if value is None:
358 359 if not isinstance(default, str):
359 360 return default
360 361 value = default
361 362 try:
362 363 return util.sizetoint(value)
363 364 except error.ParseError:
364 365 raise error.ConfigError(_("%s.%s is not a byte quantity ('%s')")
365 366 % (section, name, value))
366 367
367 368 def configlist(self, section, name, default=None, untrusted=False):
368 369 """parse a configuration element as a list of comma/space separated
369 370 strings
370 371
371 372 >>> u = ui(); s = 'foo'
372 373 >>> u.setconfig(s, 'list1', 'this,is "a small" ,test')
373 374 >>> u.configlist(s, 'list1')
374 375 ['this', 'is', 'a small', 'test']
375 376 """
376 377
377 378 def _parse_plain(parts, s, offset):
378 379 whitespace = False
379 380 while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
380 381 whitespace = True
381 382 offset += 1
382 383 if offset >= len(s):
383 384 return None, parts, offset
384 385 if whitespace:
385 386 parts.append('')
386 387 if s[offset] == '"' and not parts[-1]:
387 388 return _parse_quote, parts, offset + 1
388 389 elif s[offset] == '"' and parts[-1][-1] == '\\':
389 390 parts[-1] = parts[-1][:-1] + s[offset]
390 391 return _parse_plain, parts, offset + 1
391 392 parts[-1] += s[offset]
392 393 return _parse_plain, parts, offset + 1
393 394
394 395 def _parse_quote(parts, s, offset):
395 396 if offset < len(s) and s[offset] == '"': # ""
396 397 parts.append('')
397 398 offset += 1
398 399 while offset < len(s) and (s[offset].isspace() or
399 400 s[offset] == ','):
400 401 offset += 1
401 402 return _parse_plain, parts, offset
402 403
403 404 while offset < len(s) and s[offset] != '"':
404 405 if (s[offset] == '\\' and offset + 1 < len(s)
405 406 and s[offset + 1] == '"'):
406 407 offset += 1
407 408 parts[-1] += '"'
408 409 else:
409 410 parts[-1] += s[offset]
410 411 offset += 1
411 412
412 413 if offset >= len(s):
413 414 real_parts = _configlist(parts[-1])
414 415 if not real_parts:
415 416 parts[-1] = '"'
416 417 else:
417 418 real_parts[0] = '"' + real_parts[0]
418 419 parts = parts[:-1]
419 420 parts.extend(real_parts)
420 421 return None, parts, offset
421 422
422 423 offset += 1
423 424 while offset < len(s) and s[offset] in [' ', ',']:
424 425 offset += 1
425 426
426 427 if offset < len(s):
427 428 if offset + 1 == len(s) and s[offset] == '"':
428 429 parts[-1] += '"'
429 430 offset += 1
430 431 else:
431 432 parts.append('')
432 433 else:
433 434 return None, parts, offset
434 435
435 436 return _parse_plain, parts, offset
436 437
437 438 def _configlist(s):
438 439 s = s.rstrip(' ,')
439 440 if not s:
440 441 return []
441 442 parser, parts, offset = _parse_plain, [''], 0
442 443 while parser:
443 444 parser, parts, offset = parser(parts, s, offset)
444 445 return parts
445 446
446 447 result = self.config(section, name, untrusted=untrusted)
447 448 if result is None:
448 449 result = default or []
449 450 if isinstance(result, basestring):
450 451 result = _configlist(result.lstrip(' ,\n'))
451 452 if result is None:
452 453 result = default or []
453 454 return result
454 455
455 456 def has_section(self, section, untrusted=False):
456 457 '''tell whether section exists in config.'''
457 458 return section in self._data(untrusted)
458 459
459 460 def configitems(self, section, untrusted=False):
460 461 items = self._data(untrusted).items(section)
461 462 if self.debugflag and not untrusted and self._reportuntrusted:
462 463 for k, v in self._ucfg.items(section):
463 464 if self._tcfg.get(section, k) != v:
464 465 self.debug("ignoring untrusted configuration option "
465 466 "%s.%s = %s\n" % (section, k, v))
466 467 return items
467 468
468 469 def walkconfig(self, untrusted=False):
469 470 cfg = self._data(untrusted)
470 471 for section in cfg.sections():
471 472 for name, value in self.configitems(section, untrusted):
472 473 yield section, name, value
473 474
474 475 def plain(self, feature=None):
475 476 '''is plain mode active?
476 477
477 478 Plain mode means that all configuration variables which affect
478 479 the behavior and output of Mercurial should be
479 480 ignored. Additionally, the output should be stable,
480 481 reproducible and suitable for use in scripts or applications.
481 482
482 483 The only way to trigger plain mode is by setting either the
483 484 `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
484 485
485 486 The return value can either be
486 487 - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
487 488 - True otherwise
488 489 '''
489 490 if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ:
490 491 return False
491 492 exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',')
492 493 if feature and exceptions:
493 494 return feature not in exceptions
494 495 return True
495 496
496 497 def username(self):
497 498 """Return default username to be used in commits.
498 499
499 500 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
500 501 and stop searching if one of these is set.
501 502 If not found and ui.askusername is True, ask the user, else use
502 503 ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
503 504 """
504 505 user = os.environ.get("HGUSER")
505 506 if user is None:
506 507 user = self.config("ui", ["username", "user"])
507 508 if user is not None:
508 509 user = os.path.expandvars(user)
509 510 if user is None:
510 511 user = os.environ.get("EMAIL")
511 512 if user is None and self.configbool("ui", "askusername"):
512 513 user = self.prompt(_("enter a commit username:"), default=None)
513 514 if user is None and not self.interactive():
514 515 try:
515 516 user = '%s@%s' % (util.getuser(), socket.getfqdn())
516 517 self.warn(_("no username found, using '%s' instead\n") % user)
517 518 except KeyError:
518 519 pass
519 520 if not user:
520 521 raise util.Abort(_('no username supplied'),
521 522 hint=_('use "hg config --edit" '
522 523 'to set your username'))
523 524 if "\n" in user:
524 525 raise util.Abort(_("username %s contains a newline\n") % repr(user))
525 526 return user
526 527
527 528 def shortuser(self, user):
528 529 """Return a short representation of a user name or email address."""
529 530 if not self.verbose:
530 531 user = util.shortuser(user)
531 532 return user
532 533
533 534 def expandpath(self, loc, default=None):
534 535 """Return repository location relative to cwd or from [paths]"""
535 536 if util.hasscheme(loc) or os.path.isdir(os.path.join(loc, '.hg')):
536 537 return loc
537 538
538 539 p = self.paths.getpath(loc, default=default)
539 540 if p:
540 541 return p.loc
541 542 return loc
542 543
543 544 @util.propertycache
544 545 def paths(self):
545 546 return paths(self)
546 547
547 548 def pushbuffer(self, error=False, subproc=False):
548 549 """install a buffer to capture standard output of the ui object
549 550
550 551 If error is True, the error output will be captured too.
551 552
552 553 If subproc is True, output from subprocesses (typically hooks) will be
553 554 captured too."""
554 555 self._buffers.append([])
555 556 self._bufferstates.append((error, subproc))
556 557
557 558 def popbuffer(self, labeled=False):
558 559 '''pop the last buffer and return the buffered output
559 560
560 561 If labeled is True, any labels associated with buffered
561 562 output will be handled. By default, this has no effect
562 563 on the output returned, but extensions and GUI tools may
563 564 handle this argument and returned styled output. If output
564 565 is being buffered so it can be captured and parsed or
565 566 processed, labeled should not be set to True.
566 567 '''
567 568 self._bufferstates.pop()
568 569 return "".join(self._buffers.pop())
569 570
570 571 def write(self, *args, **opts):
571 572 '''write args to output
572 573
573 574 By default, this method simply writes to the buffer or stdout,
574 575 but extensions or GUI tools may override this method,
575 576 write_err(), popbuffer(), and label() to style output from
576 577 various parts of hg.
577 578
578 579 An optional keyword argument, "label", can be passed in.
579 580 This should be a string containing label names separated by
580 581 space. Label names take the form of "topic.type". For example,
581 582 ui.debug() issues a label of "ui.debug".
582 583
583 584 When labeling output for a specific command, a label of
584 585 "cmdname.type" is recommended. For example, status issues
585 586 a label of "status.modified" for modified files.
586 587 '''
587 588 self._progclear()
588 589 if self._buffers:
589 590 self._buffers[-1].extend([str(a) for a in args])
590 591 else:
591 592 for a in args:
592 593 self.fout.write(str(a))
593 594
594 595 def write_err(self, *args, **opts):
595 596 self._progclear()
596 597 try:
597 598 if self._bufferstates and self._bufferstates[-1][0]:
598 599 return self.write(*args, **opts)
599 600 if not getattr(self.fout, 'closed', False):
600 601 self.fout.flush()
601 602 for a in args:
602 603 self.ferr.write(str(a))
603 604 # stderr may be buffered under win32 when redirected to files,
604 605 # including stdout.
605 606 if not getattr(self.ferr, 'closed', False):
606 607 self.ferr.flush()
607 608 except IOError, inst:
608 609 if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
609 610 raise
610 611
611 612 def flush(self):
612 613 try: self.fout.flush()
613 614 except (IOError, ValueError): pass
614 615 try: self.ferr.flush()
615 616 except (IOError, ValueError): pass
616 617
617 618 def _isatty(self, fh):
618 619 if self.configbool('ui', 'nontty', False):
619 620 return False
620 621 return util.isatty(fh)
621 622
622 623 def interactive(self):
623 624 '''is interactive input allowed?
624 625
625 626 An interactive session is a session where input can be reasonably read
626 627 from `sys.stdin'. If this function returns false, any attempt to read
627 628 from stdin should fail with an error, unless a sensible default has been
628 629 specified.
629 630
630 631 Interactiveness is triggered by the value of the `ui.interactive'
631 632 configuration variable or - if it is unset - when `sys.stdin' points
632 633 to a terminal device.
633 634
634 635 This function refers to input only; for output, see `ui.formatted()'.
635 636 '''
636 637 i = self.configbool("ui", "interactive", None)
637 638 if i is None:
638 639 # some environments replace stdin without implementing isatty
639 640 # usually those are non-interactive
640 641 return self._isatty(self.fin)
641 642
642 643 return i
643 644
644 645 def termwidth(self):
645 646 '''how wide is the terminal in columns?
646 647 '''
647 648 if 'COLUMNS' in os.environ:
648 649 try:
649 650 return int(os.environ['COLUMNS'])
650 651 except ValueError:
651 652 pass
652 653 return util.termwidth()
653 654
654 655 def formatted(self):
655 656 '''should formatted output be used?
656 657
657 658 It is often desirable to format the output to suite the output medium.
658 659 Examples of this are truncating long lines or colorizing messages.
659 660 However, this is not often not desirable when piping output into other
660 661 utilities, e.g. `grep'.
661 662
662 663 Formatted output is triggered by the value of the `ui.formatted'
663 664 configuration variable or - if it is unset - when `sys.stdout' points
664 665 to a terminal device. Please note that `ui.formatted' should be
665 666 considered an implementation detail; it is not intended for use outside
666 667 Mercurial or its extensions.
667 668
668 669 This function refers to output only; for input, see `ui.interactive()'.
669 670 This function always returns false when in plain mode, see `ui.plain()'.
670 671 '''
671 672 if self.plain():
672 673 return False
673 674
674 675 i = self.configbool("ui", "formatted", None)
675 676 if i is None:
676 677 # some environments replace stdout without implementing isatty
677 678 # usually those are non-interactive
678 679 return self._isatty(self.fout)
679 680
680 681 return i
681 682
682 683 def _readline(self, prompt=''):
683 684 if self._isatty(self.fin):
684 685 try:
685 686 # magically add command line editing support, where
686 687 # available
687 688 import readline
688 689 # force demandimport to really load the module
689 690 readline.read_history_file
690 691 # windows sometimes raises something other than ImportError
691 692 except Exception:
692 693 pass
693 694
694 695 # call write() so output goes through subclassed implementation
695 696 # e.g. color extension on Windows
696 697 self.write(prompt)
697 698
698 699 # instead of trying to emulate raw_input, swap (self.fin,
699 700 # self.fout) with (sys.stdin, sys.stdout)
700 701 oldin = sys.stdin
701 702 oldout = sys.stdout
702 703 sys.stdin = self.fin
703 704 sys.stdout = self.fout
704 705 # prompt ' ' must exist; otherwise readline may delete entire line
705 706 # - http://bugs.python.org/issue12833
706 707 line = raw_input(' ')
707 708 sys.stdin = oldin
708 709 sys.stdout = oldout
709 710
710 711 # When stdin is in binary mode on Windows, it can cause
711 712 # raw_input() to emit an extra trailing carriage return
712 713 if os.linesep == '\r\n' and line and line[-1] == '\r':
713 714 line = line[:-1]
714 715 return line
715 716
716 717 def prompt(self, msg, default="y"):
717 718 """Prompt user with msg, read response.
718 719 If ui is not interactive, the default is returned.
719 720 """
720 721 if not self.interactive():
721 722 self.write(msg, ' ', default, "\n")
722 723 return default
723 724 try:
724 725 r = self._readline(self.label(msg, 'ui.prompt'))
725 726 if not r:
726 727 r = default
727 728 if self.configbool('ui', 'promptecho'):
728 729 self.write(r, "\n")
729 730 return r
730 731 except EOFError:
731 732 raise util.Abort(_('response expected'))
732 733
733 734 @staticmethod
734 735 def extractchoices(prompt):
735 736 """Extract prompt message and list of choices from specified prompt.
736 737
737 738 This returns tuple "(message, choices)", and "choices" is the
738 739 list of tuple "(response character, text without &)".
739 740 """
740 741 parts = prompt.split('$$')
741 742 msg = parts[0].rstrip(' ')
742 743 choices = [p.strip(' ') for p in parts[1:]]
743 744 return (msg,
744 745 [(s[s.index('&') + 1].lower(), s.replace('&', '', 1))
745 746 for s in choices])
746 747
747 748 def promptchoice(self, prompt, default=0):
748 749 """Prompt user with a message, read response, and ensure it matches
749 750 one of the provided choices. The prompt is formatted as follows:
750 751
751 752 "would you like fries with that (Yn)? $$ &Yes $$ &No"
752 753
753 754 The index of the choice is returned. Responses are case
754 755 insensitive. If ui is not interactive, the default is
755 756 returned.
756 757 """
757 758
758 759 msg, choices = self.extractchoices(prompt)
759 760 resps = [r for r, t in choices]
760 761 while True:
761 762 r = self.prompt(msg, resps[default])
762 763 if r.lower() in resps:
763 764 return resps.index(r.lower())
764 765 self.write(_("unrecognized response\n"))
765 766
766 767 def getpass(self, prompt=None, default=None):
767 768 if not self.interactive():
768 769 return default
769 770 try:
770 771 self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
771 772 # disable getpass() only if explicitly specified. it's still valid
772 773 # to interact with tty even if fin is not a tty.
773 774 if self.configbool('ui', 'nontty'):
774 775 return self.fin.readline().rstrip('\n')
775 776 else:
776 777 return getpass.getpass('')
777 778 except EOFError:
778 779 raise util.Abort(_('response expected'))
779 780 def status(self, *msg, **opts):
780 781 '''write status message to output (if ui.quiet is False)
781 782
782 783 This adds an output label of "ui.status".
783 784 '''
784 785 if not self.quiet:
785 786 opts['label'] = opts.get('label', '') + ' ui.status'
786 787 self.write(*msg, **opts)
787 788 def warn(self, *msg, **opts):
788 789 '''write warning message to output (stderr)
789 790
790 791 This adds an output label of "ui.warning".
791 792 '''
792 793 opts['label'] = opts.get('label', '') + ' ui.warning'
793 794 self.write_err(*msg, **opts)
794 795 def note(self, *msg, **opts):
795 796 '''write note to output (if ui.verbose is True)
796 797
797 798 This adds an output label of "ui.note".
798 799 '''
799 800 if self.verbose:
800 801 opts['label'] = opts.get('label', '') + ' ui.note'
801 802 self.write(*msg, **opts)
802 803 def debug(self, *msg, **opts):
803 804 '''write debug message to output (if ui.debugflag is True)
804 805
805 806 This adds an output label of "ui.debug".
806 807 '''
807 808 if self.debugflag:
808 809 opts['label'] = opts.get('label', '') + ' ui.debug'
809 810 self.write(*msg, **opts)
810 811 def edit(self, text, user, extra={}, editform=None):
811 812 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
812 813 text=True)
813 814 try:
814 815 f = os.fdopen(fd, "w")
815 816 f.write(text)
816 817 f.close()
817 818
818 819 environ = {'HGUSER': user}
819 820 if 'transplant_source' in extra:
820 821 environ.update({'HGREVISION': hex(extra['transplant_source'])})
821 822 for label in ('intermediate-source', 'source', 'rebase_source'):
822 823 if label in extra:
823 824 environ.update({'HGREVISION': extra[label]})
824 825 break
825 826 if editform:
826 827 environ.update({'HGEDITFORM': editform})
827 828
828 829 editor = self.geteditor()
829 830
830 831 self.system("%s \"%s\"" % (editor, name),
831 832 environ=environ,
832 833 onerr=util.Abort, errprefix=_("edit failed"))
833 834
834 835 f = open(name)
835 836 t = f.read()
836 837 f.close()
837 838 finally:
838 839 os.unlink(name)
839 840
840 841 return t
841 842
842 843 def system(self, cmd, environ={}, cwd=None, onerr=None, errprefix=None):
843 844 '''execute shell command with appropriate output stream. command
844 845 output will be redirected if fout is not stdout.
845 846 '''
846 847 out = self.fout
847 848 if any(s[1] for s in self._bufferstates):
848 849 out = self
849 850 return util.system(cmd, environ=environ, cwd=cwd, onerr=onerr,
850 851 errprefix=errprefix, out=out)
851 852
852 853 def traceback(self, exc=None, force=False):
853 854 '''print exception traceback if traceback printing enabled or forced.
854 855 only to call in exception handler. returns true if traceback
855 856 printed.'''
856 857 if self.tracebackflag or force:
857 858 if exc is None:
858 859 exc = sys.exc_info()
859 860 cause = getattr(exc[1], 'cause', None)
860 861
861 862 if cause is not None:
862 863 causetb = traceback.format_tb(cause[2])
863 864 exctb = traceback.format_tb(exc[2])
864 865 exconly = traceback.format_exception_only(cause[0], cause[1])
865 866
866 867 # exclude frame where 'exc' was chained and rethrown from exctb
867 868 self.write_err('Traceback (most recent call last):\n',
868 869 ''.join(exctb[:-1]),
869 870 ''.join(causetb),
870 871 ''.join(exconly))
871 872 else:
872 873 output = traceback.format_exception(exc[0], exc[1], exc[2])
873 874 self.write_err(''.join(output))
874 875 return self.tracebackflag or force
875 876
876 877 def geteditor(self):
877 878 '''return editor to use'''
878 879 if sys.platform == 'plan9':
879 880 # vi is the MIPS instruction simulator on Plan 9. We
880 881 # instead default to E to plumb commit messages to
881 882 # avoid confusion.
882 883 editor = 'E'
883 884 else:
884 885 editor = 'vi'
885 886 return (os.environ.get("HGEDITOR") or
886 887 self.config("ui", "editor") or
887 888 os.environ.get("VISUAL") or
888 889 os.environ.get("EDITOR", editor))
889 890
890 891 @util.propertycache
891 892 def _progbar(self):
892 893 """setup the progbar singleton to the ui object"""
893 894 if (self.quiet or self.debugflag
894 895 or self.configbool('progress', 'disable', False)
895 896 or not progress.shouldprint(self)):
896 897 return None
897 898 return getprogbar(self)
898 899
899 900 def _progclear(self):
900 901 """clear progress bar output if any. use it before any output"""
901 902 if '_progbar' not in vars(self): # nothing loadef yet
902 903 return
903 904 if self._progbar is not None and self._progbar.printed:
904 905 self._progbar.clear()
905 906
906 907 def progress(self, topic, pos, item="", unit="", total=None):
907 908 '''show a progress message
908 909
909 910 With stock hg, this is simply a debug message that is hidden
910 911 by default, but with extensions or GUI tools it may be
911 912 visible. 'topic' is the current operation, 'item' is a
912 913 non-numeric marker of the current position (i.e. the currently
913 914 in-process file), 'pos' is the current numeric position (i.e.
914 915 revision, bytes, etc.), unit is a corresponding unit label,
915 916 and total is the highest expected pos.
916 917
917 918 Multiple nested topics may be active at a time.
918 919
919 920 All topics should be marked closed by setting pos to None at
920 921 termination.
921 922 '''
922 923 if self._progbar is not None:
923 924 self._progbar.progress(topic, pos, item=item, unit=unit,
924 925 total=total)
925 926 if pos is None or not self.configbool('progress', 'debug'):
926 927 return
927 928
928 929 if unit:
929 930 unit = ' ' + unit
930 931 if item:
931 932 item = ' ' + item
932 933
933 934 if total:
934 935 pct = 100.0 * pos / total
935 936 self.debug('%s:%s %s/%s%s (%4.2f%%)\n'
936 937 % (topic, item, pos, total, unit, pct))
937 938 else:
938 939 self.debug('%s:%s %s%s\n' % (topic, item, pos, unit))
939 940
940 941 def log(self, service, *msg, **opts):
941 942 '''hook for logging facility extensions
942 943
943 944 service should be a readily-identifiable subsystem, which will
944 945 allow filtering.
945 946 message should be a newline-terminated string to log.
946 947 '''
947 948 pass
948 949
949 950 def label(self, msg, label):
950 951 '''style msg based on supplied label
951 952
952 953 Like ui.write(), this just returns msg unchanged, but extensions
953 954 and GUI tools can override it to allow styling output without
954 955 writing it.
955 956
956 957 ui.write(s, 'label') is equivalent to
957 958 ui.write(ui.label(s, 'label')).
958 959 '''
959 960 return msg
960 961
962 def develwarn(self, msg):
963 """issue a developer warning message"""
964 msg = 'devel-warn: ' + msg
965 if self.tracebackflag:
966 util.debugstacktrace(msg, 2)
967 else:
968 curframe = inspect.currentframe()
969 calframe = inspect.getouterframes(curframe, 2)
970 self.write_err('%s at: %s:%s (%s)\n' % ((msg,) + calframe[2][1:4]))
971
961 972 class paths(dict):
962 973 """Represents a collection of paths and their configs.
963 974
964 975 Data is initially derived from ui instances and the config files they have
965 976 loaded.
966 977 """
967 978 def __init__(self, ui):
968 979 dict.__init__(self)
969 980
970 981 for name, loc in ui.configitems('paths'):
971 982 # No location is the same as not existing.
972 983 if not loc:
973 984 continue
974 985 self[name] = path(name, rawloc=loc)
975 986
976 987 def getpath(self, name, default=None):
977 988 """Return a ``path`` for the specified name, falling back to a default.
978 989
979 990 Returns the first of ``name`` or ``default`` that is present, or None
980 991 if neither is present.
981 992 """
982 993 try:
983 994 return self[name]
984 995 except KeyError:
985 996 if default is not None:
986 997 try:
987 998 return self[default]
988 999 except KeyError:
989 1000 pass
990 1001
991 1002 return None
992 1003
993 1004 class path(object):
994 1005 """Represents an individual path and its configuration."""
995 1006
996 1007 def __init__(self, name, rawloc=None):
997 1008 """Construct a path from its config options.
998 1009
999 1010 ``name`` is the symbolic name of the path.
1000 1011 ``rawloc`` is the raw location, as defined in the config.
1001 1012 """
1002 1013 self.name = name
1003 1014 # We'll do more intelligent things with rawloc in the future.
1004 1015 self.loc = rawloc
1005 1016
1006 1017 # we instantiate one globally shared progress bar to avoid
1007 1018 # competing progress bars when multiple UI objects get created
1008 1019 _progresssingleton = None
1009 1020
1010 1021 def getprogbar(ui):
1011 1022 global _progresssingleton
1012 1023 if _progresssingleton is None:
1013 1024 # passing 'ui' object to the singleton is fishy,
1014 1025 # this is how the extension used to work but feel free to rework it.
1015 1026 _progresssingleton = progress.progbar(ui)
1016 1027 return _progresssingleton
General Comments 0
You need to be logged in to leave comments. Login now