##// END OF EJS Templates
scmutil: allow access to filecache descriptor on class...
Martijn Pieters -
r29373:36fbd72c default
parent child Browse files
Show More
@@ -1,1970 +1,1972 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 urlerr = util.urlerr
62 62 urlreq = util.urlreq
63 63
64 64 class repofilecache(scmutil.filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 if repo is None:
70 return self
69 71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
70 72 def __set__(self, repo, value):
71 73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
72 74 def __delete__(self, repo):
73 75 return super(repofilecache, self).__delete__(repo.unfiltered())
74 76
75 77 class storecache(repofilecache):
76 78 """filecache for files in the store"""
77 79 def join(self, obj, fname):
78 80 return obj.sjoin(fname)
79 81
80 82 class unfilteredpropertycache(util.propertycache):
81 83 """propertycache that apply to unfiltered repo only"""
82 84
83 85 def __get__(self, repo, type=None):
84 86 unfi = repo.unfiltered()
85 87 if unfi is repo:
86 88 return super(unfilteredpropertycache, self).__get__(unfi)
87 89 return getattr(unfi, self.name)
88 90
89 91 class filteredpropertycache(util.propertycache):
90 92 """propertycache that must take filtering in account"""
91 93
92 94 def cachevalue(self, obj, value):
93 95 object.__setattr__(obj, self.name, value)
94 96
95 97
96 98 def hasunfilteredcache(repo, name):
97 99 """check if a repo has an unfilteredpropertycache value for <name>"""
98 100 return name in vars(repo.unfiltered())
99 101
100 102 def unfilteredmethod(orig):
101 103 """decorate method that always need to be run on unfiltered version"""
102 104 def wrapper(repo, *args, **kwargs):
103 105 return orig(repo.unfiltered(), *args, **kwargs)
104 106 return wrapper
105 107
106 108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
107 109 'unbundle'))
108 110 legacycaps = moderncaps.union(set(['changegroupsubset']))
109 111
110 112 class localpeer(peer.peerrepository):
111 113 '''peer for a local repo; reflects only the most recent API'''
112 114
113 115 def __init__(self, repo, caps=moderncaps):
114 116 peer.peerrepository.__init__(self)
115 117 self._repo = repo.filtered('served')
116 118 self.ui = repo.ui
117 119 self._caps = repo._restrictcapabilities(caps)
118 120 self.requirements = repo.requirements
119 121 self.supportedformats = repo.supportedformats
120 122
121 123 def close(self):
122 124 self._repo.close()
123 125
124 126 def _capabilities(self):
125 127 return self._caps
126 128
127 129 def local(self):
128 130 return self._repo
129 131
130 132 def canpush(self):
131 133 return True
132 134
133 135 def url(self):
134 136 return self._repo.url()
135 137
136 138 def lookup(self, key):
137 139 return self._repo.lookup(key)
138 140
139 141 def branchmap(self):
140 142 return self._repo.branchmap()
141 143
142 144 def heads(self):
143 145 return self._repo.heads()
144 146
145 147 def known(self, nodes):
146 148 return self._repo.known(nodes)
147 149
148 150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
149 151 **kwargs):
150 152 cg = exchange.getbundle(self._repo, source, heads=heads,
151 153 common=common, bundlecaps=bundlecaps, **kwargs)
152 154 if bundlecaps is not None and 'HG20' in bundlecaps:
153 155 # When requesting a bundle2, getbundle returns a stream to make the
154 156 # wire level function happier. We need to build a proper object
155 157 # from it in local peer.
156 158 cg = bundle2.getunbundler(self.ui, cg)
157 159 return cg
158 160
159 161 # TODO We might want to move the next two calls into legacypeer and add
160 162 # unbundle instead.
161 163
162 164 def unbundle(self, cg, heads, url):
163 165 """apply a bundle on a repo
164 166
165 167 This function handles the repo locking itself."""
166 168 try:
167 169 try:
168 170 cg = exchange.readbundle(self.ui, cg, None)
169 171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
170 172 if util.safehasattr(ret, 'getchunks'):
171 173 # This is a bundle20 object, turn it into an unbundler.
172 174 # This little dance should be dropped eventually when the
173 175 # API is finally improved.
174 176 stream = util.chunkbuffer(ret.getchunks())
175 177 ret = bundle2.getunbundler(self.ui, stream)
176 178 return ret
177 179 except Exception as exc:
178 180 # If the exception contains output salvaged from a bundle2
179 181 # reply, we need to make sure it is printed before continuing
180 182 # to fail. So we build a bundle2 with such output and consume
181 183 # it directly.
182 184 #
183 185 # This is not very elegant but allows a "simple" solution for
184 186 # issue4594
185 187 output = getattr(exc, '_bundle2salvagedoutput', ())
186 188 if output:
187 189 bundler = bundle2.bundle20(self._repo.ui)
188 190 for out in output:
189 191 bundler.addpart(out)
190 192 stream = util.chunkbuffer(bundler.getchunks())
191 193 b = bundle2.getunbundler(self.ui, stream)
192 194 bundle2.processbundle(self._repo, b)
193 195 raise
194 196 except error.PushRaced as exc:
195 197 raise error.ResponseError(_('push failed:'), str(exc))
196 198
197 199 def lock(self):
198 200 return self._repo.lock()
199 201
200 202 def addchangegroup(self, cg, source, url):
201 203 return cg.apply(self._repo, source, url)
202 204
203 205 def pushkey(self, namespace, key, old, new):
204 206 return self._repo.pushkey(namespace, key, old, new)
205 207
206 208 def listkeys(self, namespace):
207 209 return self._repo.listkeys(namespace)
208 210
209 211 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 212 '''used to test argument passing over the wire'''
211 213 return "%s %s %s %s %s" % (one, two, three, four, five)
212 214
213 215 class locallegacypeer(localpeer):
214 216 '''peer extension which implements legacy methods too; used for tests with
215 217 restricted capabilities'''
216 218
217 219 def __init__(self, repo):
218 220 localpeer.__init__(self, repo, caps=legacycaps)
219 221
220 222 def branches(self, nodes):
221 223 return self._repo.branches(nodes)
222 224
223 225 def between(self, pairs):
224 226 return self._repo.between(pairs)
225 227
226 228 def changegroup(self, basenodes, source):
227 229 return changegroup.changegroup(self._repo, basenodes, source)
228 230
229 231 def changegroupsubset(self, bases, heads, source):
230 232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
231 233
232 234 class localrepository(object):
233 235
234 236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
235 237 'manifestv2'))
236 238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
237 239 'dotencode'))
238 240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
239 241 filtername = None
240 242
241 243 # a list of (ui, featureset) functions.
242 244 # only functions defined in module of enabled extensions are invoked
243 245 featuresetupfuncs = set()
244 246
245 247 def __init__(self, baseui, path=None, create=False):
246 248 self.requirements = set()
247 249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
248 250 self.wopener = self.wvfs
249 251 self.root = self.wvfs.base
250 252 self.path = self.wvfs.join(".hg")
251 253 self.origroot = path
252 254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
253 255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
254 256 realfs=False)
255 257 self.vfs = scmutil.vfs(self.path)
256 258 self.opener = self.vfs
257 259 self.baseui = baseui
258 260 self.ui = baseui.copy()
259 261 self.ui.copy = baseui.copy # prevent copying repo configuration
260 262 # A list of callback to shape the phase if no data were found.
261 263 # Callback are in the form: func(repo, roots) --> processed root.
262 264 # This list it to be filled by extension during repo setup
263 265 self._phasedefaults = []
264 266 try:
265 267 self.ui.readconfig(self.join("hgrc"), self.root)
266 268 extensions.loadall(self.ui)
267 269 except IOError:
268 270 pass
269 271
270 272 if self.featuresetupfuncs:
271 273 self.supported = set(self._basesupported) # use private copy
272 274 extmods = set(m.__name__ for n, m
273 275 in extensions.extensions(self.ui))
274 276 for setupfunc in self.featuresetupfuncs:
275 277 if setupfunc.__module__ in extmods:
276 278 setupfunc(self.ui, self.supported)
277 279 else:
278 280 self.supported = self._basesupported
279 281
280 282 if not self.vfs.isdir():
281 283 if create:
282 284 self.requirements = newreporequirements(self)
283 285
284 286 if not self.wvfs.exists():
285 287 self.wvfs.makedirs()
286 288 self.vfs.makedir(notindexed=True)
287 289
288 290 if 'store' in self.requirements:
289 291 self.vfs.mkdir("store")
290 292
291 293 # create an invalid changelog
292 294 self.vfs.append(
293 295 "00changelog.i",
294 296 '\0\0\0\2' # represents revlogv2
295 297 ' dummy changelog to prevent using the old repo layout'
296 298 )
297 299 else:
298 300 raise error.RepoError(_("repository %s not found") % path)
299 301 elif create:
300 302 raise error.RepoError(_("repository %s already exists") % path)
301 303 else:
302 304 try:
303 305 self.requirements = scmutil.readrequires(
304 306 self.vfs, self.supported)
305 307 except IOError as inst:
306 308 if inst.errno != errno.ENOENT:
307 309 raise
308 310
309 311 self.sharedpath = self.path
310 312 try:
311 313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
312 314 realpath=True)
313 315 s = vfs.base
314 316 if not vfs.exists():
315 317 raise error.RepoError(
316 318 _('.hg/sharedpath points to nonexistent directory %s') % s)
317 319 self.sharedpath = s
318 320 except IOError as inst:
319 321 if inst.errno != errno.ENOENT:
320 322 raise
321 323
322 324 self.store = store.store(
323 325 self.requirements, self.sharedpath, scmutil.vfs)
324 326 self.spath = self.store.path
325 327 self.svfs = self.store.vfs
326 328 self.sjoin = self.store.join
327 329 self.vfs.createmode = self.store.createmode
328 330 self._applyopenerreqs()
329 331 if create:
330 332 self._writerequirements()
331 333
332 334 self._dirstatevalidatewarned = False
333 335
334 336 self._branchcaches = {}
335 337 self._revbranchcache = None
336 338 self.filterpats = {}
337 339 self._datafilters = {}
338 340 self._transref = self._lockref = self._wlockref = None
339 341
340 342 # A cache for various files under .hg/ that tracks file changes,
341 343 # (used by the filecache decorator)
342 344 #
343 345 # Maps a property name to its util.filecacheentry
344 346 self._filecache = {}
345 347
346 348 # hold sets of revision to be filtered
347 349 # should be cleared when something might have changed the filter value:
348 350 # - new changesets,
349 351 # - phase change,
350 352 # - new obsolescence marker,
351 353 # - working directory parent change,
352 354 # - bookmark changes
353 355 self.filteredrevcache = {}
354 356
355 357 # generic mapping between names and nodes
356 358 self.names = namespaces.namespaces()
357 359
358 360 def close(self):
359 361 self._writecaches()
360 362
361 363 def _writecaches(self):
362 364 if self._revbranchcache:
363 365 self._revbranchcache.write()
364 366
365 367 def _restrictcapabilities(self, caps):
366 368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
367 369 caps = set(caps)
368 370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
369 371 caps.add('bundle2=' + urlreq.quote(capsblob))
370 372 return caps
371 373
372 374 def _applyopenerreqs(self):
373 375 self.svfs.options = dict((r, 1) for r in self.requirements
374 376 if r in self.openerreqs)
375 377 # experimental config: format.chunkcachesize
376 378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
377 379 if chunkcachesize is not None:
378 380 self.svfs.options['chunkcachesize'] = chunkcachesize
379 381 # experimental config: format.maxchainlen
380 382 maxchainlen = self.ui.configint('format', 'maxchainlen')
381 383 if maxchainlen is not None:
382 384 self.svfs.options['maxchainlen'] = maxchainlen
383 385 # experimental config: format.manifestcachesize
384 386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
385 387 if manifestcachesize is not None:
386 388 self.svfs.options['manifestcachesize'] = manifestcachesize
387 389 # experimental config: format.aggressivemergedeltas
388 390 aggressivemergedeltas = self.ui.configbool('format',
389 391 'aggressivemergedeltas', False)
390 392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
391 393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
392 394
393 395 def _writerequirements(self):
394 396 scmutil.writerequires(self.vfs, self.requirements)
395 397
396 398 def _checknested(self, path):
397 399 """Determine if path is a legal nested repository."""
398 400 if not path.startswith(self.root):
399 401 return False
400 402 subpath = path[len(self.root) + 1:]
401 403 normsubpath = util.pconvert(subpath)
402 404
403 405 # XXX: Checking against the current working copy is wrong in
404 406 # the sense that it can reject things like
405 407 #
406 408 # $ hg cat -r 10 sub/x.txt
407 409 #
408 410 # if sub/ is no longer a subrepository in the working copy
409 411 # parent revision.
410 412 #
411 413 # However, it can of course also allow things that would have
412 414 # been rejected before, such as the above cat command if sub/
413 415 # is a subrepository now, but was a normal directory before.
414 416 # The old path auditor would have rejected by mistake since it
415 417 # panics when it sees sub/.hg/.
416 418 #
417 419 # All in all, checking against the working copy seems sensible
418 420 # since we want to prevent access to nested repositories on
419 421 # the filesystem *now*.
420 422 ctx = self[None]
421 423 parts = util.splitpath(subpath)
422 424 while parts:
423 425 prefix = '/'.join(parts)
424 426 if prefix in ctx.substate:
425 427 if prefix == normsubpath:
426 428 return True
427 429 else:
428 430 sub = ctx.sub(prefix)
429 431 return sub.checknested(subpath[len(prefix) + 1:])
430 432 else:
431 433 parts.pop()
432 434 return False
433 435
434 436 def peer(self):
435 437 return localpeer(self) # not cached to avoid reference cycle
436 438
437 439 def unfiltered(self):
438 440 """Return unfiltered version of the repository
439 441
440 442 Intended to be overwritten by filtered repo."""
441 443 return self
442 444
443 445 def filtered(self, name):
444 446 """Return a filtered version of a repository"""
445 447 # build a new class with the mixin and the current class
446 448 # (possibly subclass of the repo)
447 449 class proxycls(repoview.repoview, self.unfiltered().__class__):
448 450 pass
449 451 return proxycls(self, name)
450 452
451 453 @repofilecache('bookmarks', 'bookmarks.current')
452 454 def _bookmarks(self):
453 455 return bookmarks.bmstore(self)
454 456
455 457 @property
456 458 def _activebookmark(self):
457 459 return self._bookmarks.active
458 460
459 461 def bookmarkheads(self, bookmark):
460 462 name = bookmark.split('@', 1)[0]
461 463 heads = []
462 464 for mark, n in self._bookmarks.iteritems():
463 465 if mark.split('@', 1)[0] == name:
464 466 heads.append(n)
465 467 return heads
466 468
467 469 # _phaserevs and _phasesets depend on changelog. what we need is to
468 470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
469 471 # can't be easily expressed in filecache mechanism.
470 472 @storecache('phaseroots', '00changelog.i')
471 473 def _phasecache(self):
472 474 return phases.phasecache(self, self._phasedefaults)
473 475
474 476 @storecache('obsstore')
475 477 def obsstore(self):
476 478 # read default format for new obsstore.
477 479 # developer config: format.obsstore-version
478 480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
479 481 # rely on obsstore class default when possible.
480 482 kwargs = {}
481 483 if defaultformat is not None:
482 484 kwargs['defaultformat'] = defaultformat
483 485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
484 486 store = obsolete.obsstore(self.svfs, readonly=readonly,
485 487 **kwargs)
486 488 if store and readonly:
487 489 self.ui.warn(
488 490 _('obsolete feature not enabled but %i markers found!\n')
489 491 % len(list(store)))
490 492 return store
491 493
492 494 @storecache('00changelog.i')
493 495 def changelog(self):
494 496 c = changelog.changelog(self.svfs)
495 497 if 'HG_PENDING' in os.environ:
496 498 p = os.environ['HG_PENDING']
497 499 if p.startswith(self.root):
498 500 c.readpending('00changelog.i.a')
499 501 return c
500 502
501 503 @storecache('00manifest.i')
502 504 def manifest(self):
503 505 return manifest.manifest(self.svfs)
504 506
505 507 def dirlog(self, dir):
506 508 return self.manifest.dirlog(dir)
507 509
508 510 @repofilecache('dirstate')
509 511 def dirstate(self):
510 512 return dirstate.dirstate(self.vfs, self.ui, self.root,
511 513 self._dirstatevalidate)
512 514
513 515 def _dirstatevalidate(self, node):
514 516 try:
515 517 self.changelog.rev(node)
516 518 return node
517 519 except error.LookupError:
518 520 if not self._dirstatevalidatewarned:
519 521 self._dirstatevalidatewarned = True
520 522 self.ui.warn(_("warning: ignoring unknown"
521 523 " working parent %s!\n") % short(node))
522 524 return nullid
523 525
524 526 def __getitem__(self, changeid):
525 527 if changeid is None or changeid == wdirrev:
526 528 return context.workingctx(self)
527 529 if isinstance(changeid, slice):
528 530 return [context.changectx(self, i)
529 531 for i in xrange(*changeid.indices(len(self)))
530 532 if i not in self.changelog.filteredrevs]
531 533 return context.changectx(self, changeid)
532 534
533 535 def __contains__(self, changeid):
534 536 try:
535 537 self[changeid]
536 538 return True
537 539 except error.RepoLookupError:
538 540 return False
539 541
540 542 def __nonzero__(self):
541 543 return True
542 544
543 545 def __len__(self):
544 546 return len(self.changelog)
545 547
546 548 def __iter__(self):
547 549 return iter(self.changelog)
548 550
549 551 def revs(self, expr, *args):
550 552 '''Find revisions matching a revset.
551 553
552 554 The revset is specified as a string ``expr`` that may contain
553 555 %-formatting to escape certain types. See ``revset.formatspec``.
554 556
555 557 Return a revset.abstractsmartset, which is a list-like interface
556 558 that contains integer revisions.
557 559 '''
558 560 expr = revset.formatspec(expr, *args)
559 561 m = revset.match(None, expr)
560 562 return m(self)
561 563
562 564 def set(self, expr, *args):
563 565 '''Find revisions matching a revset and emit changectx instances.
564 566
565 567 This is a convenience wrapper around ``revs()`` that iterates the
566 568 result and is a generator of changectx instances.
567 569 '''
568 570 for r in self.revs(expr, *args):
569 571 yield self[r]
570 572
571 573 def url(self):
572 574 return 'file:' + self.root
573 575
574 576 def hook(self, name, throw=False, **args):
575 577 """Call a hook, passing this repo instance.
576 578
577 579 This a convenience method to aid invoking hooks. Extensions likely
578 580 won't call this unless they have registered a custom hook or are
579 581 replacing code that is expected to call a hook.
580 582 """
581 583 return hook.hook(self.ui, self, name, throw, **args)
582 584
583 585 @unfilteredmethod
584 586 def _tag(self, names, node, message, local, user, date, extra=None,
585 587 editor=False):
586 588 if isinstance(names, str):
587 589 names = (names,)
588 590
589 591 branches = self.branchmap()
590 592 for name in names:
591 593 self.hook('pretag', throw=True, node=hex(node), tag=name,
592 594 local=local)
593 595 if name in branches:
594 596 self.ui.warn(_("warning: tag %s conflicts with existing"
595 597 " branch name\n") % name)
596 598
597 599 def writetags(fp, names, munge, prevtags):
598 600 fp.seek(0, 2)
599 601 if prevtags and prevtags[-1] != '\n':
600 602 fp.write('\n')
601 603 for name in names:
602 604 if munge:
603 605 m = munge(name)
604 606 else:
605 607 m = name
606 608
607 609 if (self._tagscache.tagtypes and
608 610 name in self._tagscache.tagtypes):
609 611 old = self.tags().get(name, nullid)
610 612 fp.write('%s %s\n' % (hex(old), m))
611 613 fp.write('%s %s\n' % (hex(node), m))
612 614 fp.close()
613 615
614 616 prevtags = ''
615 617 if local:
616 618 try:
617 619 fp = self.vfs('localtags', 'r+')
618 620 except IOError:
619 621 fp = self.vfs('localtags', 'a')
620 622 else:
621 623 prevtags = fp.read()
622 624
623 625 # local tags are stored in the current charset
624 626 writetags(fp, names, None, prevtags)
625 627 for name in names:
626 628 self.hook('tag', node=hex(node), tag=name, local=local)
627 629 return
628 630
629 631 try:
630 632 fp = self.wfile('.hgtags', 'rb+')
631 633 except IOError as e:
632 634 if e.errno != errno.ENOENT:
633 635 raise
634 636 fp = self.wfile('.hgtags', 'ab')
635 637 else:
636 638 prevtags = fp.read()
637 639
638 640 # committed tags are stored in UTF-8
639 641 writetags(fp, names, encoding.fromlocal, prevtags)
640 642
641 643 fp.close()
642 644
643 645 self.invalidatecaches()
644 646
645 647 if '.hgtags' not in self.dirstate:
646 648 self[None].add(['.hgtags'])
647 649
648 650 m = matchmod.exact(self.root, '', ['.hgtags'])
649 651 tagnode = self.commit(message, user, date, extra=extra, match=m,
650 652 editor=editor)
651 653
652 654 for name in names:
653 655 self.hook('tag', node=hex(node), tag=name, local=local)
654 656
655 657 return tagnode
656 658
657 659 def tag(self, names, node, message, local, user, date, editor=False):
658 660 '''tag a revision with one or more symbolic names.
659 661
660 662 names is a list of strings or, when adding a single tag, names may be a
661 663 string.
662 664
663 665 if local is True, the tags are stored in a per-repository file.
664 666 otherwise, they are stored in the .hgtags file, and a new
665 667 changeset is committed with the change.
666 668
667 669 keyword arguments:
668 670
669 671 local: whether to store tags in non-version-controlled file
670 672 (default False)
671 673
672 674 message: commit message to use if committing
673 675
674 676 user: name of user to use if committing
675 677
676 678 date: date tuple to use if committing'''
677 679
678 680 if not local:
679 681 m = matchmod.exact(self.root, '', ['.hgtags'])
680 682 if any(self.status(match=m, unknown=True, ignored=True)):
681 683 raise error.Abort(_('working copy of .hgtags is changed'),
682 684 hint=_('please commit .hgtags manually'))
683 685
684 686 self.tags() # instantiate the cache
685 687 self._tag(names, node, message, local, user, date, editor=editor)
686 688
687 689 @filteredpropertycache
688 690 def _tagscache(self):
689 691 '''Returns a tagscache object that contains various tags related
690 692 caches.'''
691 693
692 694 # This simplifies its cache management by having one decorated
693 695 # function (this one) and the rest simply fetch things from it.
694 696 class tagscache(object):
695 697 def __init__(self):
696 698 # These two define the set of tags for this repository. tags
697 699 # maps tag name to node; tagtypes maps tag name to 'global' or
698 700 # 'local'. (Global tags are defined by .hgtags across all
699 701 # heads, and local tags are defined in .hg/localtags.)
700 702 # They constitute the in-memory cache of tags.
701 703 self.tags = self.tagtypes = None
702 704
703 705 self.nodetagscache = self.tagslist = None
704 706
705 707 cache = tagscache()
706 708 cache.tags, cache.tagtypes = self._findtags()
707 709
708 710 return cache
709 711
710 712 def tags(self):
711 713 '''return a mapping of tag to node'''
712 714 t = {}
713 715 if self.changelog.filteredrevs:
714 716 tags, tt = self._findtags()
715 717 else:
716 718 tags = self._tagscache.tags
717 719 for k, v in tags.iteritems():
718 720 try:
719 721 # ignore tags to unknown nodes
720 722 self.changelog.rev(v)
721 723 t[k] = v
722 724 except (error.LookupError, ValueError):
723 725 pass
724 726 return t
725 727
726 728 def _findtags(self):
727 729 '''Do the hard work of finding tags. Return a pair of dicts
728 730 (tags, tagtypes) where tags maps tag name to node, and tagtypes
729 731 maps tag name to a string like \'global\' or \'local\'.
730 732 Subclasses or extensions are free to add their own tags, but
731 733 should be aware that the returned dicts will be retained for the
732 734 duration of the localrepo object.'''
733 735
734 736 # XXX what tagtype should subclasses/extensions use? Currently
735 737 # mq and bookmarks add tags, but do not set the tagtype at all.
736 738 # Should each extension invent its own tag type? Should there
737 739 # be one tagtype for all such "virtual" tags? Or is the status
738 740 # quo fine?
739 741
740 742 alltags = {} # map tag name to (node, hist)
741 743 tagtypes = {}
742 744
743 745 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
744 746 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
745 747
746 748 # Build the return dicts. Have to re-encode tag names because
747 749 # the tags module always uses UTF-8 (in order not to lose info
748 750 # writing to the cache), but the rest of Mercurial wants them in
749 751 # local encoding.
750 752 tags = {}
751 753 for (name, (node, hist)) in alltags.iteritems():
752 754 if node != nullid:
753 755 tags[encoding.tolocal(name)] = node
754 756 tags['tip'] = self.changelog.tip()
755 757 tagtypes = dict([(encoding.tolocal(name), value)
756 758 for (name, value) in tagtypes.iteritems()])
757 759 return (tags, tagtypes)
758 760
759 761 def tagtype(self, tagname):
760 762 '''
761 763 return the type of the given tag. result can be:
762 764
763 765 'local' : a local tag
764 766 'global' : a global tag
765 767 None : tag does not exist
766 768 '''
767 769
768 770 return self._tagscache.tagtypes.get(tagname)
769 771
770 772 def tagslist(self):
771 773 '''return a list of tags ordered by revision'''
772 774 if not self._tagscache.tagslist:
773 775 l = []
774 776 for t, n in self.tags().iteritems():
775 777 l.append((self.changelog.rev(n), t, n))
776 778 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
777 779
778 780 return self._tagscache.tagslist
779 781
780 782 def nodetags(self, node):
781 783 '''return the tags associated with a node'''
782 784 if not self._tagscache.nodetagscache:
783 785 nodetagscache = {}
784 786 for t, n in self._tagscache.tags.iteritems():
785 787 nodetagscache.setdefault(n, []).append(t)
786 788 for tags in nodetagscache.itervalues():
787 789 tags.sort()
788 790 self._tagscache.nodetagscache = nodetagscache
789 791 return self._tagscache.nodetagscache.get(node, [])
790 792
791 793 def nodebookmarks(self, node):
792 794 """return the list of bookmarks pointing to the specified node"""
793 795 marks = []
794 796 for bookmark, n in self._bookmarks.iteritems():
795 797 if n == node:
796 798 marks.append(bookmark)
797 799 return sorted(marks)
798 800
799 801 def branchmap(self):
800 802 '''returns a dictionary {branch: [branchheads]} with branchheads
801 803 ordered by increasing revision number'''
802 804 branchmap.updatecache(self)
803 805 return self._branchcaches[self.filtername]
804 806
805 807 @unfilteredmethod
806 808 def revbranchcache(self):
807 809 if not self._revbranchcache:
808 810 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
809 811 return self._revbranchcache
810 812
811 813 def branchtip(self, branch, ignoremissing=False):
812 814 '''return the tip node for a given branch
813 815
814 816 If ignoremissing is True, then this method will not raise an error.
815 817 This is helpful for callers that only expect None for a missing branch
816 818 (e.g. namespace).
817 819
818 820 '''
819 821 try:
820 822 return self.branchmap().branchtip(branch)
821 823 except KeyError:
822 824 if not ignoremissing:
823 825 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
824 826 else:
825 827 pass
826 828
827 829 def lookup(self, key):
828 830 return self[key].node()
829 831
830 832 def lookupbranch(self, key, remote=None):
831 833 repo = remote or self
832 834 if key in repo.branchmap():
833 835 return key
834 836
835 837 repo = (remote and remote.local()) and remote or self
836 838 return repo[key].branch()
837 839
838 840 def known(self, nodes):
839 841 cl = self.changelog
840 842 nm = cl.nodemap
841 843 filtered = cl.filteredrevs
842 844 result = []
843 845 for n in nodes:
844 846 r = nm.get(n)
845 847 resp = not (r is None or r in filtered)
846 848 result.append(resp)
847 849 return result
848 850
849 851 def local(self):
850 852 return self
851 853
852 854 def publishing(self):
853 855 # it's safe (and desirable) to trust the publish flag unconditionally
854 856 # so that we don't finalize changes shared between users via ssh or nfs
855 857 return self.ui.configbool('phases', 'publish', True, untrusted=True)
856 858
857 859 def cancopy(self):
858 860 # so statichttprepo's override of local() works
859 861 if not self.local():
860 862 return False
861 863 if not self.publishing():
862 864 return True
863 865 # if publishing we can't copy if there is filtered content
864 866 return not self.filtered('visible').changelog.filteredrevs
865 867
866 868 def shared(self):
867 869 '''the type of shared repository (None if not shared)'''
868 870 if self.sharedpath != self.path:
869 871 return 'store'
870 872 return None
871 873
872 874 def join(self, f, *insidef):
873 875 return self.vfs.join(os.path.join(f, *insidef))
874 876
875 877 def wjoin(self, f, *insidef):
876 878 return self.vfs.reljoin(self.root, f, *insidef)
877 879
878 880 def file(self, f):
879 881 if f[0] == '/':
880 882 f = f[1:]
881 883 return filelog.filelog(self.svfs, f)
882 884
883 885 def changectx(self, changeid):
884 886 return self[changeid]
885 887
886 888 def setparents(self, p1, p2=nullid):
887 889 self.dirstate.beginparentchange()
888 890 copies = self.dirstate.setparents(p1, p2)
889 891 pctx = self[p1]
890 892 if copies:
891 893 # Adjust copy records, the dirstate cannot do it, it
892 894 # requires access to parents manifests. Preserve them
893 895 # only for entries added to first parent.
894 896 for f in copies:
895 897 if f not in pctx and copies[f] in pctx:
896 898 self.dirstate.copy(copies[f], f)
897 899 if p2 == nullid:
898 900 for f, s in sorted(self.dirstate.copies().items()):
899 901 if f not in pctx and s not in pctx:
900 902 self.dirstate.copy(None, f)
901 903 self.dirstate.endparentchange()
902 904
903 905 def filectx(self, path, changeid=None, fileid=None):
904 906 """changeid can be a changeset revision, node, or tag.
905 907 fileid can be a file revision or node."""
906 908 return context.filectx(self, path, changeid, fileid)
907 909
908 910 def getcwd(self):
909 911 return self.dirstate.getcwd()
910 912
911 913 def pathto(self, f, cwd=None):
912 914 return self.dirstate.pathto(f, cwd)
913 915
914 916 def wfile(self, f, mode='r'):
915 917 return self.wvfs(f, mode)
916 918
917 919 def _link(self, f):
918 920 return self.wvfs.islink(f)
919 921
920 922 def _loadfilter(self, filter):
921 923 if filter not in self.filterpats:
922 924 l = []
923 925 for pat, cmd in self.ui.configitems(filter):
924 926 if cmd == '!':
925 927 continue
926 928 mf = matchmod.match(self.root, '', [pat])
927 929 fn = None
928 930 params = cmd
929 931 for name, filterfn in self._datafilters.iteritems():
930 932 if cmd.startswith(name):
931 933 fn = filterfn
932 934 params = cmd[len(name):].lstrip()
933 935 break
934 936 if not fn:
935 937 fn = lambda s, c, **kwargs: util.filter(s, c)
936 938 # Wrap old filters not supporting keyword arguments
937 939 if not inspect.getargspec(fn)[2]:
938 940 oldfn = fn
939 941 fn = lambda s, c, **kwargs: oldfn(s, c)
940 942 l.append((mf, fn, params))
941 943 self.filterpats[filter] = l
942 944 return self.filterpats[filter]
943 945
944 946 def _filter(self, filterpats, filename, data):
945 947 for mf, fn, cmd in filterpats:
946 948 if mf(filename):
947 949 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
948 950 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
949 951 break
950 952
951 953 return data
952 954
953 955 @unfilteredpropertycache
954 956 def _encodefilterpats(self):
955 957 return self._loadfilter('encode')
956 958
957 959 @unfilteredpropertycache
958 960 def _decodefilterpats(self):
959 961 return self._loadfilter('decode')
960 962
961 963 def adddatafilter(self, name, filter):
962 964 self._datafilters[name] = filter
963 965
964 966 def wread(self, filename):
965 967 if self._link(filename):
966 968 data = self.wvfs.readlink(filename)
967 969 else:
968 970 data = self.wvfs.read(filename)
969 971 return self._filter(self._encodefilterpats, filename, data)
970 972
971 973 def wwrite(self, filename, data, flags, backgroundclose=False):
972 974 """write ``data`` into ``filename`` in the working directory
973 975
974 976 This returns length of written (maybe decoded) data.
975 977 """
976 978 data = self._filter(self._decodefilterpats, filename, data)
977 979 if 'l' in flags:
978 980 self.wvfs.symlink(data, filename)
979 981 else:
980 982 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
981 983 if 'x' in flags:
982 984 self.wvfs.setflags(filename, False, True)
983 985 return len(data)
984 986
985 987 def wwritedata(self, filename, data):
986 988 return self._filter(self._decodefilterpats, filename, data)
987 989
988 990 def currenttransaction(self):
989 991 """return the current transaction or None if non exists"""
990 992 if self._transref:
991 993 tr = self._transref()
992 994 else:
993 995 tr = None
994 996
995 997 if tr and tr.running():
996 998 return tr
997 999 return None
998 1000
999 1001 def transaction(self, desc, report=None):
1000 1002 if (self.ui.configbool('devel', 'all-warnings')
1001 1003 or self.ui.configbool('devel', 'check-locks')):
1002 1004 l = self._lockref and self._lockref()
1003 1005 if l is None or not l.held:
1004 1006 raise RuntimeError('programming error: transaction requires '
1005 1007 'locking')
1006 1008 tr = self.currenttransaction()
1007 1009 if tr is not None:
1008 1010 return tr.nest()
1009 1011
1010 1012 # abort here if the journal already exists
1011 1013 if self.svfs.exists("journal"):
1012 1014 raise error.RepoError(
1013 1015 _("abandoned transaction found"),
1014 1016 hint=_("run 'hg recover' to clean up transaction"))
1015 1017
1016 1018 idbase = "%.40f#%f" % (random.random(), time.time())
1017 1019 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1018 1020 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1019 1021
1020 1022 self._writejournal(desc)
1021 1023 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1022 1024 if report:
1023 1025 rp = report
1024 1026 else:
1025 1027 rp = self.ui.warn
1026 1028 vfsmap = {'plain': self.vfs} # root of .hg/
1027 1029 # we must avoid cyclic reference between repo and transaction.
1028 1030 reporef = weakref.ref(self)
1029 1031 def validate(tr):
1030 1032 """will run pre-closing hooks"""
1031 1033 reporef().hook('pretxnclose', throw=True,
1032 1034 txnname=desc, **tr.hookargs)
1033 1035 def releasefn(tr, success):
1034 1036 repo = reporef()
1035 1037 if success:
1036 1038 # this should be explicitly invoked here, because
1037 1039 # in-memory changes aren't written out at closing
1038 1040 # transaction, if tr.addfilegenerator (via
1039 1041 # dirstate.write or so) isn't invoked while
1040 1042 # transaction running
1041 1043 repo.dirstate.write(None)
1042 1044 else:
1043 1045 # discard all changes (including ones already written
1044 1046 # out) in this transaction
1045 1047 repo.dirstate.restorebackup(None, prefix='journal.')
1046 1048
1047 1049 repo.invalidate(clearfilecache=True)
1048 1050
1049 1051 tr = transaction.transaction(rp, self.svfs, vfsmap,
1050 1052 "journal",
1051 1053 "undo",
1052 1054 aftertrans(renames),
1053 1055 self.store.createmode,
1054 1056 validator=validate,
1055 1057 releasefn=releasefn)
1056 1058
1057 1059 tr.hookargs['txnid'] = txnid
1058 1060 # note: writing the fncache only during finalize mean that the file is
1059 1061 # outdated when running hooks. As fncache is used for streaming clone,
1060 1062 # this is not expected to break anything that happen during the hooks.
1061 1063 tr.addfinalize('flush-fncache', self.store.write)
1062 1064 def txnclosehook(tr2):
1063 1065 """To be run if transaction is successful, will schedule a hook run
1064 1066 """
1065 1067 # Don't reference tr2 in hook() so we don't hold a reference.
1066 1068 # This reduces memory consumption when there are multiple
1067 1069 # transactions per lock. This can likely go away if issue5045
1068 1070 # fixes the function accumulation.
1069 1071 hookargs = tr2.hookargs
1070 1072
1071 1073 def hook():
1072 1074 reporef().hook('txnclose', throw=False, txnname=desc,
1073 1075 **hookargs)
1074 1076 reporef()._afterlock(hook)
1075 1077 tr.addfinalize('txnclose-hook', txnclosehook)
1076 1078 def txnaborthook(tr2):
1077 1079 """To be run if transaction is aborted
1078 1080 """
1079 1081 reporef().hook('txnabort', throw=False, txnname=desc,
1080 1082 **tr2.hookargs)
1081 1083 tr.addabort('txnabort-hook', txnaborthook)
1082 1084 # avoid eager cache invalidation. in-memory data should be identical
1083 1085 # to stored data if transaction has no error.
1084 1086 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1085 1087 self._transref = weakref.ref(tr)
1086 1088 return tr
1087 1089
1088 1090 def _journalfiles(self):
1089 1091 return ((self.svfs, 'journal'),
1090 1092 (self.vfs, 'journal.dirstate'),
1091 1093 (self.vfs, 'journal.branch'),
1092 1094 (self.vfs, 'journal.desc'),
1093 1095 (self.vfs, 'journal.bookmarks'),
1094 1096 (self.svfs, 'journal.phaseroots'))
1095 1097
1096 1098 def undofiles(self):
1097 1099 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1098 1100
1099 1101 def _writejournal(self, desc):
1100 1102 self.dirstate.savebackup(None, prefix='journal.')
1101 1103 self.vfs.write("journal.branch",
1102 1104 encoding.fromlocal(self.dirstate.branch()))
1103 1105 self.vfs.write("journal.desc",
1104 1106 "%d\n%s\n" % (len(self), desc))
1105 1107 self.vfs.write("journal.bookmarks",
1106 1108 self.vfs.tryread("bookmarks"))
1107 1109 self.svfs.write("journal.phaseroots",
1108 1110 self.svfs.tryread("phaseroots"))
1109 1111
1110 1112 def recover(self):
1111 1113 with self.lock():
1112 1114 if self.svfs.exists("journal"):
1113 1115 self.ui.status(_("rolling back interrupted transaction\n"))
1114 1116 vfsmap = {'': self.svfs,
1115 1117 'plain': self.vfs,}
1116 1118 transaction.rollback(self.svfs, vfsmap, "journal",
1117 1119 self.ui.warn)
1118 1120 self.invalidate()
1119 1121 return True
1120 1122 else:
1121 1123 self.ui.warn(_("no interrupted transaction available\n"))
1122 1124 return False
1123 1125
1124 1126 def rollback(self, dryrun=False, force=False):
1125 1127 wlock = lock = dsguard = None
1126 1128 try:
1127 1129 wlock = self.wlock()
1128 1130 lock = self.lock()
1129 1131 if self.svfs.exists("undo"):
1130 1132 dsguard = cmdutil.dirstateguard(self, 'rollback')
1131 1133
1132 1134 return self._rollback(dryrun, force, dsguard)
1133 1135 else:
1134 1136 self.ui.warn(_("no rollback information available\n"))
1135 1137 return 1
1136 1138 finally:
1137 1139 release(dsguard, lock, wlock)
1138 1140
1139 1141 @unfilteredmethod # Until we get smarter cache management
1140 1142 def _rollback(self, dryrun, force, dsguard):
1141 1143 ui = self.ui
1142 1144 try:
1143 1145 args = self.vfs.read('undo.desc').splitlines()
1144 1146 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1145 1147 if len(args) >= 3:
1146 1148 detail = args[2]
1147 1149 oldtip = oldlen - 1
1148 1150
1149 1151 if detail and ui.verbose:
1150 1152 msg = (_('repository tip rolled back to revision %s'
1151 1153 ' (undo %s: %s)\n')
1152 1154 % (oldtip, desc, detail))
1153 1155 else:
1154 1156 msg = (_('repository tip rolled back to revision %s'
1155 1157 ' (undo %s)\n')
1156 1158 % (oldtip, desc))
1157 1159 except IOError:
1158 1160 msg = _('rolling back unknown transaction\n')
1159 1161 desc = None
1160 1162
1161 1163 if not force and self['.'] != self['tip'] and desc == 'commit':
1162 1164 raise error.Abort(
1163 1165 _('rollback of last commit while not checked out '
1164 1166 'may lose data'), hint=_('use -f to force'))
1165 1167
1166 1168 ui.status(msg)
1167 1169 if dryrun:
1168 1170 return 0
1169 1171
1170 1172 parents = self.dirstate.parents()
1171 1173 self.destroying()
1172 1174 vfsmap = {'plain': self.vfs, '': self.svfs}
1173 1175 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1174 1176 if self.vfs.exists('undo.bookmarks'):
1175 1177 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1176 1178 if self.svfs.exists('undo.phaseroots'):
1177 1179 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1178 1180 self.invalidate()
1179 1181
1180 1182 parentgone = (parents[0] not in self.changelog.nodemap or
1181 1183 parents[1] not in self.changelog.nodemap)
1182 1184 if parentgone:
1183 1185 # prevent dirstateguard from overwriting already restored one
1184 1186 dsguard.close()
1185 1187
1186 1188 self.dirstate.restorebackup(None, prefix='undo.')
1187 1189 try:
1188 1190 branch = self.vfs.read('undo.branch')
1189 1191 self.dirstate.setbranch(encoding.tolocal(branch))
1190 1192 except IOError:
1191 1193 ui.warn(_('named branch could not be reset: '
1192 1194 'current branch is still \'%s\'\n')
1193 1195 % self.dirstate.branch())
1194 1196
1195 1197 parents = tuple([p.rev() for p in self[None].parents()])
1196 1198 if len(parents) > 1:
1197 1199 ui.status(_('working directory now based on '
1198 1200 'revisions %d and %d\n') % parents)
1199 1201 else:
1200 1202 ui.status(_('working directory now based on '
1201 1203 'revision %d\n') % parents)
1202 1204 mergemod.mergestate.clean(self, self['.'].node())
1203 1205
1204 1206 # TODO: if we know which new heads may result from this rollback, pass
1205 1207 # them to destroy(), which will prevent the branchhead cache from being
1206 1208 # invalidated.
1207 1209 self.destroyed()
1208 1210 return 0
1209 1211
1210 1212 def invalidatecaches(self):
1211 1213
1212 1214 if '_tagscache' in vars(self):
1213 1215 # can't use delattr on proxy
1214 1216 del self.__dict__['_tagscache']
1215 1217
1216 1218 self.unfiltered()._branchcaches.clear()
1217 1219 self.invalidatevolatilesets()
1218 1220
1219 1221 def invalidatevolatilesets(self):
1220 1222 self.filteredrevcache.clear()
1221 1223 obsolete.clearobscaches(self)
1222 1224
1223 1225 def invalidatedirstate(self):
1224 1226 '''Invalidates the dirstate, causing the next call to dirstate
1225 1227 to check if it was modified since the last time it was read,
1226 1228 rereading it if it has.
1227 1229
1228 1230 This is different to dirstate.invalidate() that it doesn't always
1229 1231 rereads the dirstate. Use dirstate.invalidate() if you want to
1230 1232 explicitly read the dirstate again (i.e. restoring it to a previous
1231 1233 known good state).'''
1232 1234 if hasunfilteredcache(self, 'dirstate'):
1233 1235 for k in self.dirstate._filecache:
1234 1236 try:
1235 1237 delattr(self.dirstate, k)
1236 1238 except AttributeError:
1237 1239 pass
1238 1240 delattr(self.unfiltered(), 'dirstate')
1239 1241
1240 1242 def invalidate(self, clearfilecache=False):
1241 1243 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1242 1244 for k in self._filecache.keys():
1243 1245 # dirstate is invalidated separately in invalidatedirstate()
1244 1246 if k == 'dirstate':
1245 1247 continue
1246 1248
1247 1249 if clearfilecache:
1248 1250 del self._filecache[k]
1249 1251 try:
1250 1252 delattr(unfiltered, k)
1251 1253 except AttributeError:
1252 1254 pass
1253 1255 self.invalidatecaches()
1254 1256 self.store.invalidatecaches()
1255 1257
1256 1258 def invalidateall(self):
1257 1259 '''Fully invalidates both store and non-store parts, causing the
1258 1260 subsequent operation to reread any outside changes.'''
1259 1261 # extension should hook this to invalidate its caches
1260 1262 self.invalidate()
1261 1263 self.invalidatedirstate()
1262 1264
1263 1265 def _refreshfilecachestats(self, tr):
1264 1266 """Reload stats of cached files so that they are flagged as valid"""
1265 1267 for k, ce in self._filecache.items():
1266 1268 if k == 'dirstate' or k not in self.__dict__:
1267 1269 continue
1268 1270 ce.refresh()
1269 1271
1270 1272 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1271 1273 inheritchecker=None, parentenvvar=None):
1272 1274 parentlock = None
1273 1275 # the contents of parentenvvar are used by the underlying lock to
1274 1276 # determine whether it can be inherited
1275 1277 if parentenvvar is not None:
1276 1278 parentlock = os.environ.get(parentenvvar)
1277 1279 try:
1278 1280 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1279 1281 acquirefn=acquirefn, desc=desc,
1280 1282 inheritchecker=inheritchecker,
1281 1283 parentlock=parentlock)
1282 1284 except error.LockHeld as inst:
1283 1285 if not wait:
1284 1286 raise
1285 1287 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1286 1288 (desc, inst.locker))
1287 1289 # default to 600 seconds timeout
1288 1290 l = lockmod.lock(vfs, lockname,
1289 1291 int(self.ui.config("ui", "timeout", "600")),
1290 1292 releasefn=releasefn, acquirefn=acquirefn,
1291 1293 desc=desc)
1292 1294 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1293 1295 return l
1294 1296
1295 1297 def _afterlock(self, callback):
1296 1298 """add a callback to be run when the repository is fully unlocked
1297 1299
1298 1300 The callback will be executed when the outermost lock is released
1299 1301 (with wlock being higher level than 'lock')."""
1300 1302 for ref in (self._wlockref, self._lockref):
1301 1303 l = ref and ref()
1302 1304 if l and l.held:
1303 1305 l.postrelease.append(callback)
1304 1306 break
1305 1307 else: # no lock have been found.
1306 1308 callback()
1307 1309
1308 1310 def lock(self, wait=True):
1309 1311 '''Lock the repository store (.hg/store) and return a weak reference
1310 1312 to the lock. Use this before modifying the store (e.g. committing or
1311 1313 stripping). If you are opening a transaction, get a lock as well.)
1312 1314
1313 1315 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1314 1316 'wlock' first to avoid a dead-lock hazard.'''
1315 1317 l = self._lockref and self._lockref()
1316 1318 if l is not None and l.held:
1317 1319 l.lock()
1318 1320 return l
1319 1321
1320 1322 l = self._lock(self.svfs, "lock", wait, None,
1321 1323 self.invalidate, _('repository %s') % self.origroot)
1322 1324 self._lockref = weakref.ref(l)
1323 1325 return l
1324 1326
1325 1327 def _wlockchecktransaction(self):
1326 1328 if self.currenttransaction() is not None:
1327 1329 raise error.LockInheritanceContractViolation(
1328 1330 'wlock cannot be inherited in the middle of a transaction')
1329 1331
1330 1332 def wlock(self, wait=True):
1331 1333 '''Lock the non-store parts of the repository (everything under
1332 1334 .hg except .hg/store) and return a weak reference to the lock.
1333 1335
1334 1336 Use this before modifying files in .hg.
1335 1337
1336 1338 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1337 1339 'wlock' first to avoid a dead-lock hazard.'''
1338 1340 l = self._wlockref and self._wlockref()
1339 1341 if l is not None and l.held:
1340 1342 l.lock()
1341 1343 return l
1342 1344
1343 1345 # We do not need to check for non-waiting lock acquisition. Such
1344 1346 # acquisition would not cause dead-lock as they would just fail.
1345 1347 if wait and (self.ui.configbool('devel', 'all-warnings')
1346 1348 or self.ui.configbool('devel', 'check-locks')):
1347 1349 l = self._lockref and self._lockref()
1348 1350 if l is not None and l.held:
1349 1351 self.ui.develwarn('"wlock" acquired after "lock"')
1350 1352
1351 1353 def unlock():
1352 1354 if self.dirstate.pendingparentchange():
1353 1355 self.dirstate.invalidate()
1354 1356 else:
1355 1357 self.dirstate.write(None)
1356 1358
1357 1359 self._filecache['dirstate'].refresh()
1358 1360
1359 1361 l = self._lock(self.vfs, "wlock", wait, unlock,
1360 1362 self.invalidatedirstate, _('working directory of %s') %
1361 1363 self.origroot,
1362 1364 inheritchecker=self._wlockchecktransaction,
1363 1365 parentenvvar='HG_WLOCK_LOCKER')
1364 1366 self._wlockref = weakref.ref(l)
1365 1367 return l
1366 1368
1367 1369 def _currentlock(self, lockref):
1368 1370 """Returns the lock if it's held, or None if it's not."""
1369 1371 if lockref is None:
1370 1372 return None
1371 1373 l = lockref()
1372 1374 if l is None or not l.held:
1373 1375 return None
1374 1376 return l
1375 1377
1376 1378 def currentwlock(self):
1377 1379 """Returns the wlock if it's held, or None if it's not."""
1378 1380 return self._currentlock(self._wlockref)
1379 1381
1380 1382 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1381 1383 """
1382 1384 commit an individual file as part of a larger transaction
1383 1385 """
1384 1386
1385 1387 fname = fctx.path()
1386 1388 fparent1 = manifest1.get(fname, nullid)
1387 1389 fparent2 = manifest2.get(fname, nullid)
1388 1390 if isinstance(fctx, context.filectx):
1389 1391 node = fctx.filenode()
1390 1392 if node in [fparent1, fparent2]:
1391 1393 self.ui.debug('reusing %s filelog entry\n' % fname)
1392 1394 if manifest1.flags(fname) != fctx.flags():
1393 1395 changelist.append(fname)
1394 1396 return node
1395 1397
1396 1398 flog = self.file(fname)
1397 1399 meta = {}
1398 1400 copy = fctx.renamed()
1399 1401 if copy and copy[0] != fname:
1400 1402 # Mark the new revision of this file as a copy of another
1401 1403 # file. This copy data will effectively act as a parent
1402 1404 # of this new revision. If this is a merge, the first
1403 1405 # parent will be the nullid (meaning "look up the copy data")
1404 1406 # and the second one will be the other parent. For example:
1405 1407 #
1406 1408 # 0 --- 1 --- 3 rev1 changes file foo
1407 1409 # \ / rev2 renames foo to bar and changes it
1408 1410 # \- 2 -/ rev3 should have bar with all changes and
1409 1411 # should record that bar descends from
1410 1412 # bar in rev2 and foo in rev1
1411 1413 #
1412 1414 # this allows this merge to succeed:
1413 1415 #
1414 1416 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1415 1417 # \ / merging rev3 and rev4 should use bar@rev2
1416 1418 # \- 2 --- 4 as the merge base
1417 1419 #
1418 1420
1419 1421 cfname = copy[0]
1420 1422 crev = manifest1.get(cfname)
1421 1423 newfparent = fparent2
1422 1424
1423 1425 if manifest2: # branch merge
1424 1426 if fparent2 == nullid or crev is None: # copied on remote side
1425 1427 if cfname in manifest2:
1426 1428 crev = manifest2[cfname]
1427 1429 newfparent = fparent1
1428 1430
1429 1431 # Here, we used to search backwards through history to try to find
1430 1432 # where the file copy came from if the source of a copy was not in
1431 1433 # the parent directory. However, this doesn't actually make sense to
1432 1434 # do (what does a copy from something not in your working copy even
1433 1435 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1434 1436 # the user that copy information was dropped, so if they didn't
1435 1437 # expect this outcome it can be fixed, but this is the correct
1436 1438 # behavior in this circumstance.
1437 1439
1438 1440 if crev:
1439 1441 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1440 1442 meta["copy"] = cfname
1441 1443 meta["copyrev"] = hex(crev)
1442 1444 fparent1, fparent2 = nullid, newfparent
1443 1445 else:
1444 1446 self.ui.warn(_("warning: can't find ancestor for '%s' "
1445 1447 "copied from '%s'!\n") % (fname, cfname))
1446 1448
1447 1449 elif fparent1 == nullid:
1448 1450 fparent1, fparent2 = fparent2, nullid
1449 1451 elif fparent2 != nullid:
1450 1452 # is one parent an ancestor of the other?
1451 1453 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1452 1454 if fparent1 in fparentancestors:
1453 1455 fparent1, fparent2 = fparent2, nullid
1454 1456 elif fparent2 in fparentancestors:
1455 1457 fparent2 = nullid
1456 1458
1457 1459 # is the file changed?
1458 1460 text = fctx.data()
1459 1461 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1460 1462 changelist.append(fname)
1461 1463 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1462 1464 # are just the flags changed during merge?
1463 1465 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1464 1466 changelist.append(fname)
1465 1467
1466 1468 return fparent1
1467 1469
1468 1470 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1469 1471 """check for commit arguments that aren't commitable"""
1470 1472 if match.isexact() or match.prefix():
1471 1473 matched = set(status.modified + status.added + status.removed)
1472 1474
1473 1475 for f in match.files():
1474 1476 f = self.dirstate.normalize(f)
1475 1477 if f == '.' or f in matched or f in wctx.substate:
1476 1478 continue
1477 1479 if f in status.deleted:
1478 1480 fail(f, _('file not found!'))
1479 1481 if f in vdirs: # visited directory
1480 1482 d = f + '/'
1481 1483 for mf in matched:
1482 1484 if mf.startswith(d):
1483 1485 break
1484 1486 else:
1485 1487 fail(f, _("no match under directory!"))
1486 1488 elif f not in self.dirstate:
1487 1489 fail(f, _("file not tracked!"))
1488 1490
1489 1491 @unfilteredmethod
1490 1492 def commit(self, text="", user=None, date=None, match=None, force=False,
1491 1493 editor=False, extra=None):
1492 1494 """Add a new revision to current repository.
1493 1495
1494 1496 Revision information is gathered from the working directory,
1495 1497 match can be used to filter the committed files. If editor is
1496 1498 supplied, it is called to get a commit message.
1497 1499 """
1498 1500 if extra is None:
1499 1501 extra = {}
1500 1502
1501 1503 def fail(f, msg):
1502 1504 raise error.Abort('%s: %s' % (f, msg))
1503 1505
1504 1506 if not match:
1505 1507 match = matchmod.always(self.root, '')
1506 1508
1507 1509 if not force:
1508 1510 vdirs = []
1509 1511 match.explicitdir = vdirs.append
1510 1512 match.bad = fail
1511 1513
1512 1514 wlock = lock = tr = None
1513 1515 try:
1514 1516 wlock = self.wlock()
1515 1517 lock = self.lock() # for recent changelog (see issue4368)
1516 1518
1517 1519 wctx = self[None]
1518 1520 merge = len(wctx.parents()) > 1
1519 1521
1520 1522 if not force and merge and match.ispartial():
1521 1523 raise error.Abort(_('cannot partially commit a merge '
1522 1524 '(do not specify files or patterns)'))
1523 1525
1524 1526 status = self.status(match=match, clean=force)
1525 1527 if force:
1526 1528 status.modified.extend(status.clean) # mq may commit clean files
1527 1529
1528 1530 # check subrepos
1529 1531 subs = []
1530 1532 commitsubs = set()
1531 1533 newstate = wctx.substate.copy()
1532 1534 # only manage subrepos and .hgsubstate if .hgsub is present
1533 1535 if '.hgsub' in wctx:
1534 1536 # we'll decide whether to track this ourselves, thanks
1535 1537 for c in status.modified, status.added, status.removed:
1536 1538 if '.hgsubstate' in c:
1537 1539 c.remove('.hgsubstate')
1538 1540
1539 1541 # compare current state to last committed state
1540 1542 # build new substate based on last committed state
1541 1543 oldstate = wctx.p1().substate
1542 1544 for s in sorted(newstate.keys()):
1543 1545 if not match(s):
1544 1546 # ignore working copy, use old state if present
1545 1547 if s in oldstate:
1546 1548 newstate[s] = oldstate[s]
1547 1549 continue
1548 1550 if not force:
1549 1551 raise error.Abort(
1550 1552 _("commit with new subrepo %s excluded") % s)
1551 1553 dirtyreason = wctx.sub(s).dirtyreason(True)
1552 1554 if dirtyreason:
1553 1555 if not self.ui.configbool('ui', 'commitsubrepos'):
1554 1556 raise error.Abort(dirtyreason,
1555 1557 hint=_("use --subrepos for recursive commit"))
1556 1558 subs.append(s)
1557 1559 commitsubs.add(s)
1558 1560 else:
1559 1561 bs = wctx.sub(s).basestate()
1560 1562 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1561 1563 if oldstate.get(s, (None, None, None))[1] != bs:
1562 1564 subs.append(s)
1563 1565
1564 1566 # check for removed subrepos
1565 1567 for p in wctx.parents():
1566 1568 r = [s for s in p.substate if s not in newstate]
1567 1569 subs += [s for s in r if match(s)]
1568 1570 if subs:
1569 1571 if (not match('.hgsub') and
1570 1572 '.hgsub' in (wctx.modified() + wctx.added())):
1571 1573 raise error.Abort(
1572 1574 _("can't commit subrepos without .hgsub"))
1573 1575 status.modified.insert(0, '.hgsubstate')
1574 1576
1575 1577 elif '.hgsub' in status.removed:
1576 1578 # clean up .hgsubstate when .hgsub is removed
1577 1579 if ('.hgsubstate' in wctx and
1578 1580 '.hgsubstate' not in (status.modified + status.added +
1579 1581 status.removed)):
1580 1582 status.removed.insert(0, '.hgsubstate')
1581 1583
1582 1584 # make sure all explicit patterns are matched
1583 1585 if not force:
1584 1586 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1585 1587
1586 1588 cctx = context.workingcommitctx(self, status,
1587 1589 text, user, date, extra)
1588 1590
1589 1591 # internal config: ui.allowemptycommit
1590 1592 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1591 1593 or extra.get('close') or merge or cctx.files()
1592 1594 or self.ui.configbool('ui', 'allowemptycommit'))
1593 1595 if not allowemptycommit:
1594 1596 return None
1595 1597
1596 1598 if merge and cctx.deleted():
1597 1599 raise error.Abort(_("cannot commit merge with missing files"))
1598 1600
1599 1601 ms = mergemod.mergestate.read(self)
1600 1602
1601 1603 if list(ms.unresolved()):
1602 1604 raise error.Abort(_('unresolved merge conflicts '
1603 1605 '(see "hg help resolve")'))
1604 1606 if ms.mdstate() != 's' or list(ms.driverresolved()):
1605 1607 raise error.Abort(_('driver-resolved merge conflicts'),
1606 1608 hint=_('run "hg resolve --all" to resolve'))
1607 1609
1608 1610 if editor:
1609 1611 cctx._text = editor(self, cctx, subs)
1610 1612 edited = (text != cctx._text)
1611 1613
1612 1614 # Save commit message in case this transaction gets rolled back
1613 1615 # (e.g. by a pretxncommit hook). Leave the content alone on
1614 1616 # the assumption that the user will use the same editor again.
1615 1617 msgfn = self.savecommitmessage(cctx._text)
1616 1618
1617 1619 # commit subs and write new state
1618 1620 if subs:
1619 1621 for s in sorted(commitsubs):
1620 1622 sub = wctx.sub(s)
1621 1623 self.ui.status(_('committing subrepository %s\n') %
1622 1624 subrepo.subrelpath(sub))
1623 1625 sr = sub.commit(cctx._text, user, date)
1624 1626 newstate[s] = (newstate[s][0], sr)
1625 1627 subrepo.writestate(self, newstate)
1626 1628
1627 1629 p1, p2 = self.dirstate.parents()
1628 1630 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1629 1631 try:
1630 1632 self.hook("precommit", throw=True, parent1=hookp1,
1631 1633 parent2=hookp2)
1632 1634 tr = self.transaction('commit')
1633 1635 ret = self.commitctx(cctx, True)
1634 1636 except: # re-raises
1635 1637 if edited:
1636 1638 self.ui.write(
1637 1639 _('note: commit message saved in %s\n') % msgfn)
1638 1640 raise
1639 1641 # update bookmarks, dirstate and mergestate
1640 1642 bookmarks.update(self, [p1, p2], ret)
1641 1643 cctx.markcommitted(ret)
1642 1644 ms.reset()
1643 1645 tr.close()
1644 1646
1645 1647 finally:
1646 1648 lockmod.release(tr, lock, wlock)
1647 1649
1648 1650 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1649 1651 # hack for command that use a temporary commit (eg: histedit)
1650 1652 # temporary commit got stripped before hook release
1651 1653 if self.changelog.hasnode(ret):
1652 1654 self.hook("commit", node=node, parent1=parent1,
1653 1655 parent2=parent2)
1654 1656 self._afterlock(commithook)
1655 1657 return ret
1656 1658
1657 1659 @unfilteredmethod
1658 1660 def commitctx(self, ctx, error=False):
1659 1661 """Add a new revision to current repository.
1660 1662 Revision information is passed via the context argument.
1661 1663 """
1662 1664
1663 1665 tr = None
1664 1666 p1, p2 = ctx.p1(), ctx.p2()
1665 1667 user = ctx.user()
1666 1668
1667 1669 lock = self.lock()
1668 1670 try:
1669 1671 tr = self.transaction("commit")
1670 1672 trp = weakref.proxy(tr)
1671 1673
1672 1674 if ctx.files():
1673 1675 m1 = p1.manifest()
1674 1676 m2 = p2.manifest()
1675 1677 m = m1.copy()
1676 1678
1677 1679 # check in files
1678 1680 added = []
1679 1681 changed = []
1680 1682 removed = list(ctx.removed())
1681 1683 linkrev = len(self)
1682 1684 self.ui.note(_("committing files:\n"))
1683 1685 for f in sorted(ctx.modified() + ctx.added()):
1684 1686 self.ui.note(f + "\n")
1685 1687 try:
1686 1688 fctx = ctx[f]
1687 1689 if fctx is None:
1688 1690 removed.append(f)
1689 1691 else:
1690 1692 added.append(f)
1691 1693 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1692 1694 trp, changed)
1693 1695 m.setflag(f, fctx.flags())
1694 1696 except OSError as inst:
1695 1697 self.ui.warn(_("trouble committing %s!\n") % f)
1696 1698 raise
1697 1699 except IOError as inst:
1698 1700 errcode = getattr(inst, 'errno', errno.ENOENT)
1699 1701 if error or errcode and errcode != errno.ENOENT:
1700 1702 self.ui.warn(_("trouble committing %s!\n") % f)
1701 1703 raise
1702 1704
1703 1705 # update manifest
1704 1706 self.ui.note(_("committing manifest\n"))
1705 1707 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1706 1708 drop = [f for f in removed if f in m]
1707 1709 for f in drop:
1708 1710 del m[f]
1709 1711 mn = self.manifest.add(m, trp, linkrev,
1710 1712 p1.manifestnode(), p2.manifestnode(),
1711 1713 added, drop)
1712 1714 files = changed + removed
1713 1715 else:
1714 1716 mn = p1.manifestnode()
1715 1717 files = []
1716 1718
1717 1719 # update changelog
1718 1720 self.ui.note(_("committing changelog\n"))
1719 1721 self.changelog.delayupdate(tr)
1720 1722 n = self.changelog.add(mn, files, ctx.description(),
1721 1723 trp, p1.node(), p2.node(),
1722 1724 user, ctx.date(), ctx.extra().copy())
1723 1725 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1724 1726 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1725 1727 parent2=xp2)
1726 1728 # set the new commit is proper phase
1727 1729 targetphase = subrepo.newcommitphase(self.ui, ctx)
1728 1730 if targetphase:
1729 1731 # retract boundary do not alter parent changeset.
1730 1732 # if a parent have higher the resulting phase will
1731 1733 # be compliant anyway
1732 1734 #
1733 1735 # if minimal phase was 0 we don't need to retract anything
1734 1736 phases.retractboundary(self, tr, targetphase, [n])
1735 1737 tr.close()
1736 1738 branchmap.updatecache(self.filtered('served'))
1737 1739 return n
1738 1740 finally:
1739 1741 if tr:
1740 1742 tr.release()
1741 1743 lock.release()
1742 1744
1743 1745 @unfilteredmethod
1744 1746 def destroying(self):
1745 1747 '''Inform the repository that nodes are about to be destroyed.
1746 1748 Intended for use by strip and rollback, so there's a common
1747 1749 place for anything that has to be done before destroying history.
1748 1750
1749 1751 This is mostly useful for saving state that is in memory and waiting
1750 1752 to be flushed when the current lock is released. Because a call to
1751 1753 destroyed is imminent, the repo will be invalidated causing those
1752 1754 changes to stay in memory (waiting for the next unlock), or vanish
1753 1755 completely.
1754 1756 '''
1755 1757 # When using the same lock to commit and strip, the phasecache is left
1756 1758 # dirty after committing. Then when we strip, the repo is invalidated,
1757 1759 # causing those changes to disappear.
1758 1760 if '_phasecache' in vars(self):
1759 1761 self._phasecache.write()
1760 1762
1761 1763 @unfilteredmethod
1762 1764 def destroyed(self):
1763 1765 '''Inform the repository that nodes have been destroyed.
1764 1766 Intended for use by strip and rollback, so there's a common
1765 1767 place for anything that has to be done after destroying history.
1766 1768 '''
1767 1769 # When one tries to:
1768 1770 # 1) destroy nodes thus calling this method (e.g. strip)
1769 1771 # 2) use phasecache somewhere (e.g. commit)
1770 1772 #
1771 1773 # then 2) will fail because the phasecache contains nodes that were
1772 1774 # removed. We can either remove phasecache from the filecache,
1773 1775 # causing it to reload next time it is accessed, or simply filter
1774 1776 # the removed nodes now and write the updated cache.
1775 1777 self._phasecache.filterunknown(self)
1776 1778 self._phasecache.write()
1777 1779
1778 1780 # update the 'served' branch cache to help read only server process
1779 1781 # Thanks to branchcache collaboration this is done from the nearest
1780 1782 # filtered subset and it is expected to be fast.
1781 1783 branchmap.updatecache(self.filtered('served'))
1782 1784
1783 1785 # Ensure the persistent tag cache is updated. Doing it now
1784 1786 # means that the tag cache only has to worry about destroyed
1785 1787 # heads immediately after a strip/rollback. That in turn
1786 1788 # guarantees that "cachetip == currenttip" (comparing both rev
1787 1789 # and node) always means no nodes have been added or destroyed.
1788 1790
1789 1791 # XXX this is suboptimal when qrefresh'ing: we strip the current
1790 1792 # head, refresh the tag cache, then immediately add a new head.
1791 1793 # But I think doing it this way is necessary for the "instant
1792 1794 # tag cache retrieval" case to work.
1793 1795 self.invalidate()
1794 1796
1795 1797 def walk(self, match, node=None):
1796 1798 '''
1797 1799 walk recursively through the directory tree or a given
1798 1800 changeset, finding all files matched by the match
1799 1801 function
1800 1802 '''
1801 1803 return self[node].walk(match)
1802 1804
1803 1805 def status(self, node1='.', node2=None, match=None,
1804 1806 ignored=False, clean=False, unknown=False,
1805 1807 listsubrepos=False):
1806 1808 '''a convenience method that calls node1.status(node2)'''
1807 1809 return self[node1].status(node2, match, ignored, clean, unknown,
1808 1810 listsubrepos)
1809 1811
1810 1812 def heads(self, start=None):
1811 1813 heads = self.changelog.heads(start)
1812 1814 # sort the output in rev descending order
1813 1815 return sorted(heads, key=self.changelog.rev, reverse=True)
1814 1816
1815 1817 def branchheads(self, branch=None, start=None, closed=False):
1816 1818 '''return a (possibly filtered) list of heads for the given branch
1817 1819
1818 1820 Heads are returned in topological order, from newest to oldest.
1819 1821 If branch is None, use the dirstate branch.
1820 1822 If start is not None, return only heads reachable from start.
1821 1823 If closed is True, return heads that are marked as closed as well.
1822 1824 '''
1823 1825 if branch is None:
1824 1826 branch = self[None].branch()
1825 1827 branches = self.branchmap()
1826 1828 if branch not in branches:
1827 1829 return []
1828 1830 # the cache returns heads ordered lowest to highest
1829 1831 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1830 1832 if start is not None:
1831 1833 # filter out the heads that cannot be reached from startrev
1832 1834 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1833 1835 bheads = [h for h in bheads if h in fbheads]
1834 1836 return bheads
1835 1837
1836 1838 def branches(self, nodes):
1837 1839 if not nodes:
1838 1840 nodes = [self.changelog.tip()]
1839 1841 b = []
1840 1842 for n in nodes:
1841 1843 t = n
1842 1844 while True:
1843 1845 p = self.changelog.parents(n)
1844 1846 if p[1] != nullid or p[0] == nullid:
1845 1847 b.append((t, n, p[0], p[1]))
1846 1848 break
1847 1849 n = p[0]
1848 1850 return b
1849 1851
1850 1852 def between(self, pairs):
1851 1853 r = []
1852 1854
1853 1855 for top, bottom in pairs:
1854 1856 n, l, i = top, [], 0
1855 1857 f = 1
1856 1858
1857 1859 while n != bottom and n != nullid:
1858 1860 p = self.changelog.parents(n)[0]
1859 1861 if i == f:
1860 1862 l.append(n)
1861 1863 f = f * 2
1862 1864 n = p
1863 1865 i += 1
1864 1866
1865 1867 r.append(l)
1866 1868
1867 1869 return r
1868 1870
1869 1871 def checkpush(self, pushop):
1870 1872 """Extensions can override this function if additional checks have
1871 1873 to be performed before pushing, or call it if they override push
1872 1874 command.
1873 1875 """
1874 1876 pass
1875 1877
1876 1878 @unfilteredpropertycache
1877 1879 def prepushoutgoinghooks(self):
1878 1880 """Return util.hooks consists of a pushop with repo, remote, outgoing
1879 1881 methods, which are called before pushing changesets.
1880 1882 """
1881 1883 return util.hooks()
1882 1884
1883 1885 def pushkey(self, namespace, key, old, new):
1884 1886 try:
1885 1887 tr = self.currenttransaction()
1886 1888 hookargs = {}
1887 1889 if tr is not None:
1888 1890 hookargs.update(tr.hookargs)
1889 1891 hookargs['namespace'] = namespace
1890 1892 hookargs['key'] = key
1891 1893 hookargs['old'] = old
1892 1894 hookargs['new'] = new
1893 1895 self.hook('prepushkey', throw=True, **hookargs)
1894 1896 except error.HookAbort as exc:
1895 1897 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1896 1898 if exc.hint:
1897 1899 self.ui.write_err(_("(%s)\n") % exc.hint)
1898 1900 return False
1899 1901 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1900 1902 ret = pushkey.push(self, namespace, key, old, new)
1901 1903 def runhook():
1902 1904 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1903 1905 ret=ret)
1904 1906 self._afterlock(runhook)
1905 1907 return ret
1906 1908
1907 1909 def listkeys(self, namespace):
1908 1910 self.hook('prelistkeys', throw=True, namespace=namespace)
1909 1911 self.ui.debug('listing keys for "%s"\n' % namespace)
1910 1912 values = pushkey.list(self, namespace)
1911 1913 self.hook('listkeys', namespace=namespace, values=values)
1912 1914 return values
1913 1915
1914 1916 def debugwireargs(self, one, two, three=None, four=None, five=None):
1915 1917 '''used to test argument passing over the wire'''
1916 1918 return "%s %s %s %s %s" % (one, two, three, four, five)
1917 1919
1918 1920 def savecommitmessage(self, text):
1919 1921 fp = self.vfs('last-message.txt', 'wb')
1920 1922 try:
1921 1923 fp.write(text)
1922 1924 finally:
1923 1925 fp.close()
1924 1926 return self.pathto(fp.name[len(self.root) + 1:])
1925 1927
1926 1928 # used to avoid circular references so destructors work
1927 1929 def aftertrans(files):
1928 1930 renamefiles = [tuple(t) for t in files]
1929 1931 def a():
1930 1932 for vfs, src, dest in renamefiles:
1931 1933 try:
1932 1934 vfs.rename(src, dest)
1933 1935 except OSError: # journal file does not yet exist
1934 1936 pass
1935 1937 return a
1936 1938
1937 1939 def undoname(fn):
1938 1940 base, name = os.path.split(fn)
1939 1941 assert name.startswith('journal')
1940 1942 return os.path.join(base, name.replace('journal', 'undo', 1))
1941 1943
1942 1944 def instance(ui, path, create):
1943 1945 return localrepository(ui, util.urllocalpath(path), create)
1944 1946
1945 1947 def islocal(path):
1946 1948 return True
1947 1949
1948 1950 def newreporequirements(repo):
1949 1951 """Determine the set of requirements for a new local repository.
1950 1952
1951 1953 Extensions can wrap this function to specify custom requirements for
1952 1954 new repositories.
1953 1955 """
1954 1956 ui = repo.ui
1955 1957 requirements = set(['revlogv1'])
1956 1958 if ui.configbool('format', 'usestore', True):
1957 1959 requirements.add('store')
1958 1960 if ui.configbool('format', 'usefncache', True):
1959 1961 requirements.add('fncache')
1960 1962 if ui.configbool('format', 'dotencode', True):
1961 1963 requirements.add('dotencode')
1962 1964
1963 1965 if scmutil.gdinitconfig(ui):
1964 1966 requirements.add('generaldelta')
1965 1967 if ui.configbool('experimental', 'treemanifest', False):
1966 1968 requirements.add('treemanifest')
1967 1969 if ui.configbool('experimental', 'manifestv2', False):
1968 1970 requirements.add('manifestv2')
1969 1971
1970 1972 return requirements
@@ -1,1399 +1,1402 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import glob
13 13 import hashlib
14 14 import os
15 15 import re
16 16 import shutil
17 17 import stat
18 18 import tempfile
19 19 import threading
20 20
21 21 from .i18n import _
22 22 from .node import wdirrev
23 23 from . import (
24 24 encoding,
25 25 error,
26 26 match as matchmod,
27 27 osutil,
28 28 pathutil,
29 29 phases,
30 30 revset,
31 31 similar,
32 32 util,
33 33 )
34 34
35 35 if os.name == 'nt':
36 36 from . import scmwindows as scmplatform
37 37 else:
38 38 from . import scmposix as scmplatform
39 39
40 40 systemrcpath = scmplatform.systemrcpath
41 41 userrcpath = scmplatform.userrcpath
42 42
43 43 class status(tuple):
44 44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 45 and 'ignored' properties are only relevant to the working copy.
46 46 '''
47 47
48 48 __slots__ = ()
49 49
50 50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 51 clean):
52 52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 53 ignored, clean))
54 54
55 55 @property
56 56 def modified(self):
57 57 '''files that have been modified'''
58 58 return self[0]
59 59
60 60 @property
61 61 def added(self):
62 62 '''files that have been added'''
63 63 return self[1]
64 64
65 65 @property
66 66 def removed(self):
67 67 '''files that have been removed'''
68 68 return self[2]
69 69
70 70 @property
71 71 def deleted(self):
72 72 '''files that are in the dirstate, but have been deleted from the
73 73 working copy (aka "missing")
74 74 '''
75 75 return self[3]
76 76
77 77 @property
78 78 def unknown(self):
79 79 '''files not in the dirstate that are not ignored'''
80 80 return self[4]
81 81
82 82 @property
83 83 def ignored(self):
84 84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 85 return self[5]
86 86
87 87 @property
88 88 def clean(self):
89 89 '''files that have not been modified'''
90 90 return self[6]
91 91
92 92 def __repr__(self, *args, **kwargs):
93 93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 94 'unknown=%r, ignored=%r, clean=%r>') % self)
95 95
96 96 def itersubrepos(ctx1, ctx2):
97 97 """find subrepos in ctx1 or ctx2"""
98 98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 103
104 104 missing = set()
105 105
106 106 for subpath in ctx2.substate:
107 107 if subpath not in ctx1.substate:
108 108 del subpaths[subpath]
109 109 missing.add(subpath)
110 110
111 111 for subpath, ctx in sorted(subpaths.iteritems()):
112 112 yield subpath, ctx.sub(subpath)
113 113
114 114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 115 # status and diff will have an accurate result when it does
116 116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 117 # against itself.
118 118 for subpath in missing:
119 119 yield subpath, ctx2.nullsub(subpath, ctx1)
120 120
121 121 def nochangesfound(ui, repo, excluded=None):
122 122 '''Report no changes for push/pull, excluded is None or a list of
123 123 nodes excluded from the push/pull.
124 124 '''
125 125 secretlist = []
126 126 if excluded:
127 127 for n in excluded:
128 128 if n not in repo:
129 129 # discovery should not have included the filtered revision,
130 130 # we have to explicitly exclude it until discovery is cleanup.
131 131 continue
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def checknewlabel(repo, lbl, kind):
143 143 # Do not use the "kind" parameter in ui output.
144 144 # It makes strings difficult to translate.
145 145 if lbl in ['tip', '.', 'null']:
146 146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 147 for c in (':', '\0', '\n', '\r'):
148 148 if c in lbl:
149 149 raise error.Abort(_("%r cannot be used in a name") % c)
150 150 try:
151 151 int(lbl)
152 152 raise error.Abort(_("cannot use an integer as a name"))
153 153 except ValueError:
154 154 pass
155 155
156 156 def checkfilename(f):
157 157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 158 if '\r' in f or '\n' in f:
159 159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160 160
161 161 def checkportable(ui, f):
162 162 '''Check if filename f is portable and warn or abort depending on config'''
163 163 checkfilename(f)
164 164 abort, warn = checkportabilityalert(ui)
165 165 if abort or warn:
166 166 msg = util.checkwinfilename(f)
167 167 if msg:
168 168 msg = "%s: %r" % (msg, f)
169 169 if abort:
170 170 raise error.Abort(msg)
171 171 ui.warn(_("warning: %s\n") % msg)
172 172
173 173 def checkportabilityalert(ui):
174 174 '''check if the user's config requests nothing, a warning, or abort for
175 175 non-portable filenames'''
176 176 val = ui.config('ui', 'portablefilenames', 'warn')
177 177 lval = val.lower()
178 178 bval = util.parsebool(val)
179 179 abort = os.name == 'nt' or lval == 'abort'
180 180 warn = bval or lval == 'warn'
181 181 if bval is None and not (warn or abort or lval == 'ignore'):
182 182 raise error.ConfigError(
183 183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 184 return abort, warn
185 185
186 186 class casecollisionauditor(object):
187 187 def __init__(self, ui, abort, dirstate):
188 188 self._ui = ui
189 189 self._abort = abort
190 190 allfiles = '\0'.join(dirstate._map)
191 191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 192 self._dirstate = dirstate
193 193 # The purpose of _newfiles is so that we don't complain about
194 194 # case collisions if someone were to call this object with the
195 195 # same filename twice.
196 196 self._newfiles = set()
197 197
198 198 def __call__(self, f):
199 199 if f in self._newfiles:
200 200 return
201 201 fl = encoding.lower(f)
202 202 if fl in self._loweredfiles and f not in self._dirstate:
203 203 msg = _('possible case-folding collision for %s') % f
204 204 if self._abort:
205 205 raise error.Abort(msg)
206 206 self._ui.warn(_("warning: %s\n") % msg)
207 207 self._loweredfiles.add(fl)
208 208 self._newfiles.add(f)
209 209
210 210 def filteredhash(repo, maxrev):
211 211 """build hash of filtered revisions in the current repoview.
212 212
213 213 Multiple caches perform up-to-date validation by checking that the
214 214 tiprev and tipnode stored in the cache file match the current repository.
215 215 However, this is not sufficient for validating repoviews because the set
216 216 of revisions in the view may change without the repository tiprev and
217 217 tipnode changing.
218 218
219 219 This function hashes all the revs filtered from the view and returns
220 220 that SHA-1 digest.
221 221 """
222 222 cl = repo.changelog
223 223 if not cl.filteredrevs:
224 224 return None
225 225 key = None
226 226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 227 if revs:
228 228 s = hashlib.sha1()
229 229 for rev in revs:
230 230 s.update('%s;' % rev)
231 231 key = s.digest()
232 232 return key
233 233
234 234 class abstractvfs(object):
235 235 """Abstract base class; cannot be instantiated"""
236 236
237 237 def __init__(self, *args, **kwargs):
238 238 '''Prevent instantiation; don't call this from subclasses.'''
239 239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240 240
241 241 def tryread(self, path):
242 242 '''gracefully return an empty string for missing files'''
243 243 try:
244 244 return self.read(path)
245 245 except IOError as inst:
246 246 if inst.errno != errno.ENOENT:
247 247 raise
248 248 return ""
249 249
250 250 def tryreadlines(self, path, mode='rb'):
251 251 '''gracefully return an empty array for missing files'''
252 252 try:
253 253 return self.readlines(path, mode=mode)
254 254 except IOError as inst:
255 255 if inst.errno != errno.ENOENT:
256 256 raise
257 257 return []
258 258
259 259 def open(self, path, mode="r", text=False, atomictemp=False,
260 260 notindexed=False, backgroundclose=False):
261 261 '''Open ``path`` file, which is relative to vfs root.
262 262
263 263 Newly created directories are marked as "not to be indexed by
264 264 the content indexing service", if ``notindexed`` is specified
265 265 for "write" mode access.
266 266 '''
267 267 self.open = self.__call__
268 268 return self.__call__(path, mode, text, atomictemp, notindexed,
269 269 backgroundclose=backgroundclose)
270 270
271 271 def read(self, path):
272 272 with self(path, 'rb') as fp:
273 273 return fp.read()
274 274
275 275 def readlines(self, path, mode='rb'):
276 276 with self(path, mode=mode) as fp:
277 277 return fp.readlines()
278 278
279 279 def write(self, path, data, backgroundclose=False):
280 280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
281 281 return fp.write(data)
282 282
283 283 def writelines(self, path, data, mode='wb', notindexed=False):
284 284 with self(path, mode=mode, notindexed=notindexed) as fp:
285 285 return fp.writelines(data)
286 286
287 287 def append(self, path, data):
288 288 with self(path, 'ab') as fp:
289 289 return fp.write(data)
290 290
291 291 def basename(self, path):
292 292 """return base element of a path (as os.path.basename would do)
293 293
294 294 This exists to allow handling of strange encoding if needed."""
295 295 return os.path.basename(path)
296 296
297 297 def chmod(self, path, mode):
298 298 return os.chmod(self.join(path), mode)
299 299
300 300 def dirname(self, path):
301 301 """return dirname element of a path (as os.path.dirname would do)
302 302
303 303 This exists to allow handling of strange encoding if needed."""
304 304 return os.path.dirname(path)
305 305
306 306 def exists(self, path=None):
307 307 return os.path.exists(self.join(path))
308 308
309 309 def fstat(self, fp):
310 310 return util.fstat(fp)
311 311
312 312 def isdir(self, path=None):
313 313 return os.path.isdir(self.join(path))
314 314
315 315 def isfile(self, path=None):
316 316 return os.path.isfile(self.join(path))
317 317
318 318 def islink(self, path=None):
319 319 return os.path.islink(self.join(path))
320 320
321 321 def isfileorlink(self, path=None):
322 322 '''return whether path is a regular file or a symlink
323 323
324 324 Unlike isfile, this doesn't follow symlinks.'''
325 325 try:
326 326 st = self.lstat(path)
327 327 except OSError:
328 328 return False
329 329 mode = st.st_mode
330 330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
331 331
332 332 def reljoin(self, *paths):
333 333 """join various elements of a path together (as os.path.join would do)
334 334
335 335 The vfs base is not injected so that path stay relative. This exists
336 336 to allow handling of strange encoding if needed."""
337 337 return os.path.join(*paths)
338 338
339 339 def split(self, path):
340 340 """split top-most element of a path (as os.path.split would do)
341 341
342 342 This exists to allow handling of strange encoding if needed."""
343 343 return os.path.split(path)
344 344
345 345 def lexists(self, path=None):
346 346 return os.path.lexists(self.join(path))
347 347
348 348 def lstat(self, path=None):
349 349 return os.lstat(self.join(path))
350 350
351 351 def listdir(self, path=None):
352 352 return os.listdir(self.join(path))
353 353
354 354 def makedir(self, path=None, notindexed=True):
355 355 return util.makedir(self.join(path), notindexed)
356 356
357 357 def makedirs(self, path=None, mode=None):
358 358 return util.makedirs(self.join(path), mode)
359 359
360 360 def makelock(self, info, path):
361 361 return util.makelock(info, self.join(path))
362 362
363 363 def mkdir(self, path=None):
364 364 return os.mkdir(self.join(path))
365 365
366 366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
367 367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
368 368 dir=self.join(dir), text=text)
369 369 dname, fname = util.split(name)
370 370 if dir:
371 371 return fd, os.path.join(dir, fname)
372 372 else:
373 373 return fd, fname
374 374
375 375 def readdir(self, path=None, stat=None, skip=None):
376 376 return osutil.listdir(self.join(path), stat, skip)
377 377
378 378 def readlock(self, path):
379 379 return util.readlock(self.join(path))
380 380
381 381 def rename(self, src, dst, checkambig=False):
382 382 """Rename from src to dst
383 383
384 384 checkambig argument is used with util.filestat, and is useful
385 385 only if destination file is guarded by any lock
386 386 (e.g. repo.lock or repo.wlock).
387 387 """
388 388 dstpath = self.join(dst)
389 389 oldstat = checkambig and util.filestat(dstpath)
390 390 if oldstat and oldstat.stat:
391 391 ret = util.rename(self.join(src), dstpath)
392 392 newstat = util.filestat(dstpath)
393 393 if newstat.isambig(oldstat):
394 394 # stat of renamed file is ambiguous to original one
395 395 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
396 396 os.utime(dstpath, (advanced, advanced))
397 397 return ret
398 398 return util.rename(self.join(src), dstpath)
399 399
400 400 def readlink(self, path):
401 401 return os.readlink(self.join(path))
402 402
403 403 def removedirs(self, path=None):
404 404 """Remove a leaf directory and all empty intermediate ones
405 405 """
406 406 return util.removedirs(self.join(path))
407 407
408 408 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
409 409 """Remove a directory tree recursively
410 410
411 411 If ``forcibly``, this tries to remove READ-ONLY files, too.
412 412 """
413 413 if forcibly:
414 414 def onerror(function, path, excinfo):
415 415 if function is not os.remove:
416 416 raise
417 417 # read-only files cannot be unlinked under Windows
418 418 s = os.stat(path)
419 419 if (s.st_mode & stat.S_IWRITE) != 0:
420 420 raise
421 421 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
422 422 os.remove(path)
423 423 else:
424 424 onerror = None
425 425 return shutil.rmtree(self.join(path),
426 426 ignore_errors=ignore_errors, onerror=onerror)
427 427
428 428 def setflags(self, path, l, x):
429 429 return util.setflags(self.join(path), l, x)
430 430
431 431 def stat(self, path=None):
432 432 return os.stat(self.join(path))
433 433
434 434 def unlink(self, path=None):
435 435 return util.unlink(self.join(path))
436 436
437 437 def unlinkpath(self, path=None, ignoremissing=False):
438 438 return util.unlinkpath(self.join(path), ignoremissing)
439 439
440 440 def utime(self, path=None, t=None):
441 441 return os.utime(self.join(path), t)
442 442
443 443 def walk(self, path=None, onerror=None):
444 444 """Yield (dirpath, dirs, files) tuple for each directories under path
445 445
446 446 ``dirpath`` is relative one from the root of this vfs. This
447 447 uses ``os.sep`` as path separator, even you specify POSIX
448 448 style ``path``.
449 449
450 450 "The root of this vfs" is represented as empty ``dirpath``.
451 451 """
452 452 root = os.path.normpath(self.join(None))
453 453 # when dirpath == root, dirpath[prefixlen:] becomes empty
454 454 # because len(dirpath) < prefixlen.
455 455 prefixlen = len(pathutil.normasprefix(root))
456 456 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
457 457 yield (dirpath[prefixlen:], dirs, files)
458 458
459 459 @contextlib.contextmanager
460 460 def backgroundclosing(self, ui, expectedcount=-1):
461 461 """Allow files to be closed asynchronously.
462 462
463 463 When this context manager is active, ``backgroundclose`` can be passed
464 464 to ``__call__``/``open`` to result in the file possibly being closed
465 465 asynchronously, on a background thread.
466 466 """
467 467 # This is an arbitrary restriction and could be changed if we ever
468 468 # have a use case.
469 469 vfs = getattr(self, 'vfs', self)
470 470 if getattr(vfs, '_backgroundfilecloser', None):
471 471 raise error.Abort('can only have 1 active background file closer')
472 472
473 473 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
474 474 try:
475 475 vfs._backgroundfilecloser = bfc
476 476 yield bfc
477 477 finally:
478 478 vfs._backgroundfilecloser = None
479 479
480 480 class vfs(abstractvfs):
481 481 '''Operate files relative to a base directory
482 482
483 483 This class is used to hide the details of COW semantics and
484 484 remote file access from higher level code.
485 485 '''
486 486 def __init__(self, base, audit=True, expandpath=False, realpath=False):
487 487 if expandpath:
488 488 base = util.expandpath(base)
489 489 if realpath:
490 490 base = os.path.realpath(base)
491 491 self.base = base
492 492 self.mustaudit = audit
493 493 self.createmode = None
494 494 self._trustnlink = None
495 495
496 496 @property
497 497 def mustaudit(self):
498 498 return self._audit
499 499
500 500 @mustaudit.setter
501 501 def mustaudit(self, onoff):
502 502 self._audit = onoff
503 503 if onoff:
504 504 self.audit = pathutil.pathauditor(self.base)
505 505 else:
506 506 self.audit = util.always
507 507
508 508 @util.propertycache
509 509 def _cansymlink(self):
510 510 return util.checklink(self.base)
511 511
512 512 @util.propertycache
513 513 def _chmod(self):
514 514 return util.checkexec(self.base)
515 515
516 516 def _fixfilemode(self, name):
517 517 if self.createmode is None or not self._chmod:
518 518 return
519 519 os.chmod(name, self.createmode & 0o666)
520 520
521 521 def __call__(self, path, mode="r", text=False, atomictemp=False,
522 522 notindexed=False, backgroundclose=False, checkambig=False):
523 523 '''Open ``path`` file, which is relative to vfs root.
524 524
525 525 Newly created directories are marked as "not to be indexed by
526 526 the content indexing service", if ``notindexed`` is specified
527 527 for "write" mode access.
528 528
529 529 If ``backgroundclose`` is passed, the file may be closed asynchronously.
530 530 It can only be used if the ``self.backgroundclosing()`` context manager
531 531 is active. This should only be specified if the following criteria hold:
532 532
533 533 1. There is a potential for writing thousands of files. Unless you
534 534 are writing thousands of files, the performance benefits of
535 535 asynchronously closing files is not realized.
536 536 2. Files are opened exactly once for the ``backgroundclosing``
537 537 active duration and are therefore free of race conditions between
538 538 closing a file on a background thread and reopening it. (If the
539 539 file were opened multiple times, there could be unflushed data
540 540 because the original file handle hasn't been flushed/closed yet.)
541 541
542 542 ``checkambig`` argument is passed to atomictemplfile (valid
543 543 only for writing), and is useful only if target file is
544 544 guarded by any lock (e.g. repo.lock or repo.wlock).
545 545 '''
546 546 if self._audit:
547 547 r = util.checkosfilename(path)
548 548 if r:
549 549 raise error.Abort("%s: %r" % (r, path))
550 550 self.audit(path)
551 551 f = self.join(path)
552 552
553 553 if not text and "b" not in mode:
554 554 mode += "b" # for that other OS
555 555
556 556 nlink = -1
557 557 if mode not in ('r', 'rb'):
558 558 dirname, basename = util.split(f)
559 559 # If basename is empty, then the path is malformed because it points
560 560 # to a directory. Let the posixfile() call below raise IOError.
561 561 if basename:
562 562 if atomictemp:
563 563 util.makedirs(dirname, self.createmode, notindexed)
564 564 return util.atomictempfile(f, mode, self.createmode,
565 565 checkambig=checkambig)
566 566 try:
567 567 if 'w' in mode:
568 568 util.unlink(f)
569 569 nlink = 0
570 570 else:
571 571 # nlinks() may behave differently for files on Windows
572 572 # shares if the file is open.
573 573 with util.posixfile(f):
574 574 nlink = util.nlinks(f)
575 575 if nlink < 1:
576 576 nlink = 2 # force mktempcopy (issue1922)
577 577 except (OSError, IOError) as e:
578 578 if e.errno != errno.ENOENT:
579 579 raise
580 580 nlink = 0
581 581 util.makedirs(dirname, self.createmode, notindexed)
582 582 if nlink > 0:
583 583 if self._trustnlink is None:
584 584 self._trustnlink = nlink > 1 or util.checknlink(f)
585 585 if nlink > 1 or not self._trustnlink:
586 586 util.rename(util.mktempcopy(f), f)
587 587 fp = util.posixfile(f, mode)
588 588 if nlink == 0:
589 589 self._fixfilemode(f)
590 590
591 591 if backgroundclose:
592 592 if not self._backgroundfilecloser:
593 593 raise error.Abort('backgroundclose can only be used when a '
594 594 'backgroundclosing context manager is active')
595 595
596 596 fp = delayclosedfile(fp, self._backgroundfilecloser)
597 597
598 598 return fp
599 599
600 600 def symlink(self, src, dst):
601 601 self.audit(dst)
602 602 linkname = self.join(dst)
603 603 try:
604 604 os.unlink(linkname)
605 605 except OSError:
606 606 pass
607 607
608 608 util.makedirs(os.path.dirname(linkname), self.createmode)
609 609
610 610 if self._cansymlink:
611 611 try:
612 612 os.symlink(src, linkname)
613 613 except OSError as err:
614 614 raise OSError(err.errno, _('could not symlink to %r: %s') %
615 615 (src, err.strerror), linkname)
616 616 else:
617 617 self.write(dst, src)
618 618
619 619 def join(self, path, *insidef):
620 620 if path:
621 621 return os.path.join(self.base, path, *insidef)
622 622 else:
623 623 return self.base
624 624
625 625 opener = vfs
626 626
627 627 class auditvfs(object):
628 628 def __init__(self, vfs):
629 629 self.vfs = vfs
630 630
631 631 @property
632 632 def mustaudit(self):
633 633 return self.vfs.mustaudit
634 634
635 635 @mustaudit.setter
636 636 def mustaudit(self, onoff):
637 637 self.vfs.mustaudit = onoff
638 638
639 639 class filtervfs(abstractvfs, auditvfs):
640 640 '''Wrapper vfs for filtering filenames with a function.'''
641 641
642 642 def __init__(self, vfs, filter):
643 643 auditvfs.__init__(self, vfs)
644 644 self._filter = filter
645 645
646 646 def __call__(self, path, *args, **kwargs):
647 647 return self.vfs(self._filter(path), *args, **kwargs)
648 648
649 649 def join(self, path, *insidef):
650 650 if path:
651 651 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
652 652 else:
653 653 return self.vfs.join(path)
654 654
655 655 filteropener = filtervfs
656 656
657 657 class readonlyvfs(abstractvfs, auditvfs):
658 658 '''Wrapper vfs preventing any writing.'''
659 659
660 660 def __init__(self, vfs):
661 661 auditvfs.__init__(self, vfs)
662 662
663 663 def __call__(self, path, mode='r', *args, **kw):
664 664 if mode not in ('r', 'rb'):
665 665 raise error.Abort('this vfs is read only')
666 666 return self.vfs(path, mode, *args, **kw)
667 667
668 668 def join(self, path, *insidef):
669 669 return self.vfs.join(path, *insidef)
670 670
671 671 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
672 672 '''yield every hg repository under path, always recursively.
673 673 The recurse flag will only control recursion into repo working dirs'''
674 674 def errhandler(err):
675 675 if err.filename == path:
676 676 raise err
677 677 samestat = getattr(os.path, 'samestat', None)
678 678 if followsym and samestat is not None:
679 679 def adddir(dirlst, dirname):
680 680 match = False
681 681 dirstat = os.stat(dirname)
682 682 for lstdirstat in dirlst:
683 683 if samestat(dirstat, lstdirstat):
684 684 match = True
685 685 break
686 686 if not match:
687 687 dirlst.append(dirstat)
688 688 return not match
689 689 else:
690 690 followsym = False
691 691
692 692 if (seen_dirs is None) and followsym:
693 693 seen_dirs = []
694 694 adddir(seen_dirs, path)
695 695 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
696 696 dirs.sort()
697 697 if '.hg' in dirs:
698 698 yield root # found a repository
699 699 qroot = os.path.join(root, '.hg', 'patches')
700 700 if os.path.isdir(os.path.join(qroot, '.hg')):
701 701 yield qroot # we have a patch queue repo here
702 702 if recurse:
703 703 # avoid recursing inside the .hg directory
704 704 dirs.remove('.hg')
705 705 else:
706 706 dirs[:] = [] # don't descend further
707 707 elif followsym:
708 708 newdirs = []
709 709 for d in dirs:
710 710 fname = os.path.join(root, d)
711 711 if adddir(seen_dirs, fname):
712 712 if os.path.islink(fname):
713 713 for hgname in walkrepos(fname, True, seen_dirs):
714 714 yield hgname
715 715 else:
716 716 newdirs.append(d)
717 717 dirs[:] = newdirs
718 718
719 719 def osrcpath():
720 720 '''return default os-specific hgrc search path'''
721 721 path = []
722 722 defaultpath = os.path.join(util.datapath, 'default.d')
723 723 if os.path.isdir(defaultpath):
724 724 for f, kind in osutil.listdir(defaultpath):
725 725 if f.endswith('.rc'):
726 726 path.append(os.path.join(defaultpath, f))
727 727 path.extend(systemrcpath())
728 728 path.extend(userrcpath())
729 729 path = [os.path.normpath(f) for f in path]
730 730 return path
731 731
732 732 _rcpath = None
733 733
734 734 def rcpath():
735 735 '''return hgrc search path. if env var HGRCPATH is set, use it.
736 736 for each item in path, if directory, use files ending in .rc,
737 737 else use item.
738 738 make HGRCPATH empty to only look in .hg/hgrc of current repo.
739 739 if no HGRCPATH, use default os-specific path.'''
740 740 global _rcpath
741 741 if _rcpath is None:
742 742 if 'HGRCPATH' in os.environ:
743 743 _rcpath = []
744 744 for p in os.environ['HGRCPATH'].split(os.pathsep):
745 745 if not p:
746 746 continue
747 747 p = util.expandpath(p)
748 748 if os.path.isdir(p):
749 749 for f, kind in osutil.listdir(p):
750 750 if f.endswith('.rc'):
751 751 _rcpath.append(os.path.join(p, f))
752 752 else:
753 753 _rcpath.append(p)
754 754 else:
755 755 _rcpath = osrcpath()
756 756 return _rcpath
757 757
758 758 def intrev(rev):
759 759 """Return integer for a given revision that can be used in comparison or
760 760 arithmetic operation"""
761 761 if rev is None:
762 762 return wdirrev
763 763 return rev
764 764
765 765 def revsingle(repo, revspec, default='.'):
766 766 if not revspec and revspec != 0:
767 767 return repo[default]
768 768
769 769 l = revrange(repo, [revspec])
770 770 if not l:
771 771 raise error.Abort(_('empty revision set'))
772 772 return repo[l.last()]
773 773
774 774 def _pairspec(revspec):
775 775 tree = revset.parse(revspec)
776 776 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
777 777 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
778 778
779 779 def revpair(repo, revs):
780 780 if not revs:
781 781 return repo.dirstate.p1(), None
782 782
783 783 l = revrange(repo, revs)
784 784
785 785 if not l:
786 786 first = second = None
787 787 elif l.isascending():
788 788 first = l.min()
789 789 second = l.max()
790 790 elif l.isdescending():
791 791 first = l.max()
792 792 second = l.min()
793 793 else:
794 794 first = l.first()
795 795 second = l.last()
796 796
797 797 if first is None:
798 798 raise error.Abort(_('empty revision range'))
799 799 if (first == second and len(revs) >= 2
800 800 and not all(revrange(repo, [r]) for r in revs)):
801 801 raise error.Abort(_('empty revision on one side of range'))
802 802
803 803 # if top-level is range expression, the result must always be a pair
804 804 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
805 805 return repo.lookup(first), None
806 806
807 807 return repo.lookup(first), repo.lookup(second)
808 808
809 809 def revrange(repo, revs):
810 810 """Yield revision as strings from a list of revision specifications."""
811 811 allspecs = []
812 812 for spec in revs:
813 813 if isinstance(spec, int):
814 814 spec = revset.formatspec('rev(%d)', spec)
815 815 allspecs.append(spec)
816 816 m = revset.matchany(repo.ui, allspecs, repo)
817 817 return m(repo)
818 818
819 819 def meaningfulparents(repo, ctx):
820 820 """Return list of meaningful (or all if debug) parentrevs for rev.
821 821
822 822 For merges (two non-nullrev revisions) both parents are meaningful.
823 823 Otherwise the first parent revision is considered meaningful if it
824 824 is not the preceding revision.
825 825 """
826 826 parents = ctx.parents()
827 827 if len(parents) > 1:
828 828 return parents
829 829 if repo.ui.debugflag:
830 830 return [parents[0], repo['null']]
831 831 if parents[0].rev() >= intrev(ctx.rev()) - 1:
832 832 return []
833 833 return parents
834 834
835 835 def expandpats(pats):
836 836 '''Expand bare globs when running on windows.
837 837 On posix we assume it already has already been done by sh.'''
838 838 if not util.expandglobs:
839 839 return list(pats)
840 840 ret = []
841 841 for kindpat in pats:
842 842 kind, pat = matchmod._patsplit(kindpat, None)
843 843 if kind is None:
844 844 try:
845 845 globbed = glob.glob(pat)
846 846 except re.error:
847 847 globbed = [pat]
848 848 if globbed:
849 849 ret.extend(globbed)
850 850 continue
851 851 ret.append(kindpat)
852 852 return ret
853 853
854 854 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
855 855 badfn=None):
856 856 '''Return a matcher and the patterns that were used.
857 857 The matcher will warn about bad matches, unless an alternate badfn callback
858 858 is provided.'''
859 859 if pats == ("",):
860 860 pats = []
861 861 if opts is None:
862 862 opts = {}
863 863 if not globbed and default == 'relpath':
864 864 pats = expandpats(pats or [])
865 865
866 866 def bad(f, msg):
867 867 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
868 868
869 869 if badfn is None:
870 870 badfn = bad
871 871
872 872 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
873 873 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
874 874
875 875 if m.always():
876 876 pats = []
877 877 return m, pats
878 878
879 879 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
880 880 badfn=None):
881 881 '''Return a matcher that will warn about bad matches.'''
882 882 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
883 883
884 884 def matchall(repo):
885 885 '''Return a matcher that will efficiently match everything.'''
886 886 return matchmod.always(repo.root, repo.getcwd())
887 887
888 888 def matchfiles(repo, files, badfn=None):
889 889 '''Return a matcher that will efficiently match exactly these files.'''
890 890 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
891 891
892 892 def origpath(ui, repo, filepath):
893 893 '''customize where .orig files are created
894 894
895 895 Fetch user defined path from config file: [ui] origbackuppath = <path>
896 896 Fall back to default (filepath) if not specified
897 897 '''
898 898 origbackuppath = ui.config('ui', 'origbackuppath', None)
899 899 if origbackuppath is None:
900 900 return filepath + ".orig"
901 901
902 902 filepathfromroot = os.path.relpath(filepath, start=repo.root)
903 903 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
904 904
905 905 origbackupdir = repo.vfs.dirname(fullorigpath)
906 906 if not repo.vfs.exists(origbackupdir):
907 907 ui.note(_('creating directory: %s\n') % origbackupdir)
908 908 util.makedirs(origbackupdir)
909 909
910 910 return fullorigpath + ".orig"
911 911
912 912 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
913 913 if opts is None:
914 914 opts = {}
915 915 m = matcher
916 916 if dry_run is None:
917 917 dry_run = opts.get('dry_run')
918 918 if similarity is None:
919 919 similarity = float(opts.get('similarity') or 0)
920 920
921 921 ret = 0
922 922 join = lambda f: os.path.join(prefix, f)
923 923
924 924 def matchessubrepo(matcher, subpath):
925 925 if matcher.exact(subpath):
926 926 return True
927 927 for f in matcher.files():
928 928 if f.startswith(subpath):
929 929 return True
930 930 return False
931 931
932 932 wctx = repo[None]
933 933 for subpath in sorted(wctx.substate):
934 934 if opts.get('subrepos') or matchessubrepo(m, subpath):
935 935 sub = wctx.sub(subpath)
936 936 try:
937 937 submatch = matchmod.subdirmatcher(subpath, m)
938 938 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
939 939 ret = 1
940 940 except error.LookupError:
941 941 repo.ui.status(_("skipping missing subrepository: %s\n")
942 942 % join(subpath))
943 943
944 944 rejected = []
945 945 def badfn(f, msg):
946 946 if f in m.files():
947 947 m.bad(f, msg)
948 948 rejected.append(f)
949 949
950 950 badmatch = matchmod.badmatch(m, badfn)
951 951 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
952 952 badmatch)
953 953
954 954 unknownset = set(unknown + forgotten)
955 955 toprint = unknownset.copy()
956 956 toprint.update(deleted)
957 957 for abs in sorted(toprint):
958 958 if repo.ui.verbose or not m.exact(abs):
959 959 if abs in unknownset:
960 960 status = _('adding %s\n') % m.uipath(abs)
961 961 else:
962 962 status = _('removing %s\n') % m.uipath(abs)
963 963 repo.ui.status(status)
964 964
965 965 renames = _findrenames(repo, m, added + unknown, removed + deleted,
966 966 similarity)
967 967
968 968 if not dry_run:
969 969 _markchanges(repo, unknown + forgotten, deleted, renames)
970 970
971 971 for f in rejected:
972 972 if f in m.files():
973 973 return 1
974 974 return ret
975 975
976 976 def marktouched(repo, files, similarity=0.0):
977 977 '''Assert that files have somehow been operated upon. files are relative to
978 978 the repo root.'''
979 979 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
980 980 rejected = []
981 981
982 982 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
983 983
984 984 if repo.ui.verbose:
985 985 unknownset = set(unknown + forgotten)
986 986 toprint = unknownset.copy()
987 987 toprint.update(deleted)
988 988 for abs in sorted(toprint):
989 989 if abs in unknownset:
990 990 status = _('adding %s\n') % abs
991 991 else:
992 992 status = _('removing %s\n') % abs
993 993 repo.ui.status(status)
994 994
995 995 renames = _findrenames(repo, m, added + unknown, removed + deleted,
996 996 similarity)
997 997
998 998 _markchanges(repo, unknown + forgotten, deleted, renames)
999 999
1000 1000 for f in rejected:
1001 1001 if f in m.files():
1002 1002 return 1
1003 1003 return 0
1004 1004
1005 1005 def _interestingfiles(repo, matcher):
1006 1006 '''Walk dirstate with matcher, looking for files that addremove would care
1007 1007 about.
1008 1008
1009 1009 This is different from dirstate.status because it doesn't care about
1010 1010 whether files are modified or clean.'''
1011 1011 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1012 1012 audit_path = pathutil.pathauditor(repo.root)
1013 1013
1014 1014 ctx = repo[None]
1015 1015 dirstate = repo.dirstate
1016 1016 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1017 1017 full=False)
1018 1018 for abs, st in walkresults.iteritems():
1019 1019 dstate = dirstate[abs]
1020 1020 if dstate == '?' and audit_path.check(abs):
1021 1021 unknown.append(abs)
1022 1022 elif dstate != 'r' and not st:
1023 1023 deleted.append(abs)
1024 1024 elif dstate == 'r' and st:
1025 1025 forgotten.append(abs)
1026 1026 # for finding renames
1027 1027 elif dstate == 'r' and not st:
1028 1028 removed.append(abs)
1029 1029 elif dstate == 'a':
1030 1030 added.append(abs)
1031 1031
1032 1032 return added, unknown, deleted, removed, forgotten
1033 1033
1034 1034 def _findrenames(repo, matcher, added, removed, similarity):
1035 1035 '''Find renames from removed files to added ones.'''
1036 1036 renames = {}
1037 1037 if similarity > 0:
1038 1038 for old, new, score in similar.findrenames(repo, added, removed,
1039 1039 similarity):
1040 1040 if (repo.ui.verbose or not matcher.exact(old)
1041 1041 or not matcher.exact(new)):
1042 1042 repo.ui.status(_('recording removal of %s as rename to %s '
1043 1043 '(%d%% similar)\n') %
1044 1044 (matcher.rel(old), matcher.rel(new),
1045 1045 score * 100))
1046 1046 renames[new] = old
1047 1047 return renames
1048 1048
1049 1049 def _markchanges(repo, unknown, deleted, renames):
1050 1050 '''Marks the files in unknown as added, the files in deleted as removed,
1051 1051 and the files in renames as copied.'''
1052 1052 wctx = repo[None]
1053 1053 with repo.wlock():
1054 1054 wctx.forget(deleted)
1055 1055 wctx.add(unknown)
1056 1056 for new, old in renames.iteritems():
1057 1057 wctx.copy(old, new)
1058 1058
1059 1059 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1060 1060 """Update the dirstate to reflect the intent of copying src to dst. For
1061 1061 different reasons it might not end with dst being marked as copied from src.
1062 1062 """
1063 1063 origsrc = repo.dirstate.copied(src) or src
1064 1064 if dst == origsrc: # copying back a copy?
1065 1065 if repo.dirstate[dst] not in 'mn' and not dryrun:
1066 1066 repo.dirstate.normallookup(dst)
1067 1067 else:
1068 1068 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1069 1069 if not ui.quiet:
1070 1070 ui.warn(_("%s has not been committed yet, so no copy "
1071 1071 "data will be stored for %s.\n")
1072 1072 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1073 1073 if repo.dirstate[dst] in '?r' and not dryrun:
1074 1074 wctx.add([dst])
1075 1075 elif not dryrun:
1076 1076 wctx.copy(origsrc, dst)
1077 1077
1078 1078 def readrequires(opener, supported):
1079 1079 '''Reads and parses .hg/requires and checks if all entries found
1080 1080 are in the list of supported features.'''
1081 1081 requirements = set(opener.read("requires").splitlines())
1082 1082 missings = []
1083 1083 for r in requirements:
1084 1084 if r not in supported:
1085 1085 if not r or not r[0].isalnum():
1086 1086 raise error.RequirementError(_(".hg/requires file is corrupt"))
1087 1087 missings.append(r)
1088 1088 missings.sort()
1089 1089 if missings:
1090 1090 raise error.RequirementError(
1091 1091 _("repository requires features unknown to this Mercurial: %s")
1092 1092 % " ".join(missings),
1093 1093 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1094 1094 " for more information"))
1095 1095 return requirements
1096 1096
1097 1097 def writerequires(opener, requirements):
1098 1098 with opener('requires', 'w') as fp:
1099 1099 for r in sorted(requirements):
1100 1100 fp.write("%s\n" % r)
1101 1101
1102 1102 class filecachesubentry(object):
1103 1103 def __init__(self, path, stat):
1104 1104 self.path = path
1105 1105 self.cachestat = None
1106 1106 self._cacheable = None
1107 1107
1108 1108 if stat:
1109 1109 self.cachestat = filecachesubentry.stat(self.path)
1110 1110
1111 1111 if self.cachestat:
1112 1112 self._cacheable = self.cachestat.cacheable()
1113 1113 else:
1114 1114 # None means we don't know yet
1115 1115 self._cacheable = None
1116 1116
1117 1117 def refresh(self):
1118 1118 if self.cacheable():
1119 1119 self.cachestat = filecachesubentry.stat(self.path)
1120 1120
1121 1121 def cacheable(self):
1122 1122 if self._cacheable is not None:
1123 1123 return self._cacheable
1124 1124
1125 1125 # we don't know yet, assume it is for now
1126 1126 return True
1127 1127
1128 1128 def changed(self):
1129 1129 # no point in going further if we can't cache it
1130 1130 if not self.cacheable():
1131 1131 return True
1132 1132
1133 1133 newstat = filecachesubentry.stat(self.path)
1134 1134
1135 1135 # we may not know if it's cacheable yet, check again now
1136 1136 if newstat and self._cacheable is None:
1137 1137 self._cacheable = newstat.cacheable()
1138 1138
1139 1139 # check again
1140 1140 if not self._cacheable:
1141 1141 return True
1142 1142
1143 1143 if self.cachestat != newstat:
1144 1144 self.cachestat = newstat
1145 1145 return True
1146 1146 else:
1147 1147 return False
1148 1148
1149 1149 @staticmethod
1150 1150 def stat(path):
1151 1151 try:
1152 1152 return util.cachestat(path)
1153 1153 except OSError as e:
1154 1154 if e.errno != errno.ENOENT:
1155 1155 raise
1156 1156
1157 1157 class filecacheentry(object):
1158 1158 def __init__(self, paths, stat=True):
1159 1159 self._entries = []
1160 1160 for path in paths:
1161 1161 self._entries.append(filecachesubentry(path, stat))
1162 1162
1163 1163 def changed(self):
1164 1164 '''true if any entry has changed'''
1165 1165 for entry in self._entries:
1166 1166 if entry.changed():
1167 1167 return True
1168 1168 return False
1169 1169
1170 1170 def refresh(self):
1171 1171 for entry in self._entries:
1172 1172 entry.refresh()
1173 1173
1174 1174 class filecache(object):
1175 1175 '''A property like decorator that tracks files under .hg/ for updates.
1176 1176
1177 1177 Records stat info when called in _filecache.
1178 1178
1179 1179 On subsequent calls, compares old stat info with new info, and recreates the
1180 1180 object when any of the files changes, updating the new stat info in
1181 1181 _filecache.
1182 1182
1183 1183 Mercurial either atomic renames or appends for files under .hg,
1184 1184 so to ensure the cache is reliable we need the filesystem to be able
1185 1185 to tell us if a file has been replaced. If it can't, we fallback to
1186 1186 recreating the object on every call (essentially the same behavior as
1187 1187 propertycache).
1188 1188
1189 1189 '''
1190 1190 def __init__(self, *paths):
1191 1191 self.paths = paths
1192 1192
1193 1193 def join(self, obj, fname):
1194 1194 """Used to compute the runtime path of a cached file.
1195 1195
1196 1196 Users should subclass filecache and provide their own version of this
1197 1197 function to call the appropriate join function on 'obj' (an instance
1198 1198 of the class that its member function was decorated).
1199 1199 """
1200 1200 return obj.join(fname)
1201 1201
1202 1202 def __call__(self, func):
1203 1203 self.func = func
1204 1204 self.name = func.__name__
1205 1205 return self
1206 1206
1207 1207 def __get__(self, obj, type=None):
1208 # if accessed on the class, return the descriptor itself.
1209 if obj is None:
1210 return self
1208 1211 # do we need to check if the file changed?
1209 1212 if self.name in obj.__dict__:
1210 1213 assert self.name in obj._filecache, self.name
1211 1214 return obj.__dict__[self.name]
1212 1215
1213 1216 entry = obj._filecache.get(self.name)
1214 1217
1215 1218 if entry:
1216 1219 if entry.changed():
1217 1220 entry.obj = self.func(obj)
1218 1221 else:
1219 1222 paths = [self.join(obj, path) for path in self.paths]
1220 1223
1221 1224 # We stat -before- creating the object so our cache doesn't lie if
1222 1225 # a writer modified between the time we read and stat
1223 1226 entry = filecacheentry(paths, True)
1224 1227 entry.obj = self.func(obj)
1225 1228
1226 1229 obj._filecache[self.name] = entry
1227 1230
1228 1231 obj.__dict__[self.name] = entry.obj
1229 1232 return entry.obj
1230 1233
1231 1234 def __set__(self, obj, value):
1232 1235 if self.name not in obj._filecache:
1233 1236 # we add an entry for the missing value because X in __dict__
1234 1237 # implies X in _filecache
1235 1238 paths = [self.join(obj, path) for path in self.paths]
1236 1239 ce = filecacheentry(paths, False)
1237 1240 obj._filecache[self.name] = ce
1238 1241 else:
1239 1242 ce = obj._filecache[self.name]
1240 1243
1241 1244 ce.obj = value # update cached copy
1242 1245 obj.__dict__[self.name] = value # update copy returned by obj.x
1243 1246
1244 1247 def __delete__(self, obj):
1245 1248 try:
1246 1249 del obj.__dict__[self.name]
1247 1250 except KeyError:
1248 1251 raise AttributeError(self.name)
1249 1252
1250 1253 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1251 1254 if lock is None:
1252 1255 raise error.LockInheritanceContractViolation(
1253 1256 'lock can only be inherited while held')
1254 1257 if environ is None:
1255 1258 environ = {}
1256 1259 with lock.inherit() as locker:
1257 1260 environ[envvar] = locker
1258 1261 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1259 1262
1260 1263 def wlocksub(repo, cmd, *args, **kwargs):
1261 1264 """run cmd as a subprocess that allows inheriting repo's wlock
1262 1265
1263 1266 This can only be called while the wlock is held. This takes all the
1264 1267 arguments that ui.system does, and returns the exit code of the
1265 1268 subprocess."""
1266 1269 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1267 1270 **kwargs)
1268 1271
1269 1272 def gdinitconfig(ui):
1270 1273 """helper function to know if a repo should be created as general delta
1271 1274 """
1272 1275 # experimental config: format.generaldelta
1273 1276 return (ui.configbool('format', 'generaldelta', False)
1274 1277 or ui.configbool('format', 'usegeneraldelta', True))
1275 1278
1276 1279 def gddeltaconfig(ui):
1277 1280 """helper function to know if incoming delta should be optimised
1278 1281 """
1279 1282 # experimental config: format.generaldelta
1280 1283 return ui.configbool('format', 'generaldelta', False)
1281 1284
1282 1285 class delayclosedfile(object):
1283 1286 """Proxy for a file object whose close is delayed.
1284 1287
1285 1288 Do not instantiate outside of the vfs layer.
1286 1289 """
1287 1290
1288 1291 def __init__(self, fh, closer):
1289 1292 object.__setattr__(self, '_origfh', fh)
1290 1293 object.__setattr__(self, '_closer', closer)
1291 1294
1292 1295 def __getattr__(self, attr):
1293 1296 return getattr(self._origfh, attr)
1294 1297
1295 1298 def __setattr__(self, attr, value):
1296 1299 return setattr(self._origfh, attr, value)
1297 1300
1298 1301 def __delattr__(self, attr):
1299 1302 return delattr(self._origfh, attr)
1300 1303
1301 1304 def __enter__(self):
1302 1305 return self._origfh.__enter__()
1303 1306
1304 1307 def __exit__(self, exc_type, exc_value, exc_tb):
1305 1308 self._closer.close(self._origfh)
1306 1309
1307 1310 def close(self):
1308 1311 self._closer.close(self._origfh)
1309 1312
1310 1313 class backgroundfilecloser(object):
1311 1314 """Coordinates background closing of file handles on multiple threads."""
1312 1315 def __init__(self, ui, expectedcount=-1):
1313 1316 self._running = False
1314 1317 self._entered = False
1315 1318 self._threads = []
1316 1319 self._threadexception = None
1317 1320
1318 1321 # Only Windows/NTFS has slow file closing. So only enable by default
1319 1322 # on that platform. But allow to be enabled elsewhere for testing.
1320 1323 defaultenabled = os.name == 'nt'
1321 1324 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1322 1325
1323 1326 if not enabled:
1324 1327 return
1325 1328
1326 1329 # There is overhead to starting and stopping the background threads.
1327 1330 # Don't do background processing unless the file count is large enough
1328 1331 # to justify it.
1329 1332 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1330 1333 2048)
1331 1334 # FUTURE dynamically start background threads after minfilecount closes.
1332 1335 # (We don't currently have any callers that don't know their file count)
1333 1336 if expectedcount > 0 and expectedcount < minfilecount:
1334 1337 return
1335 1338
1336 1339 # Windows defaults to a limit of 512 open files. A buffer of 128
1337 1340 # should give us enough headway.
1338 1341 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1339 1342 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1340 1343
1341 1344 ui.debug('starting %d threads for background file closing\n' %
1342 1345 threadcount)
1343 1346
1344 1347 self._queue = util.queue(maxsize=maxqueue)
1345 1348 self._running = True
1346 1349
1347 1350 for i in range(threadcount):
1348 1351 t = threading.Thread(target=self._worker, name='backgroundcloser')
1349 1352 self._threads.append(t)
1350 1353 t.start()
1351 1354
1352 1355 def __enter__(self):
1353 1356 self._entered = True
1354 1357 return self
1355 1358
1356 1359 def __exit__(self, exc_type, exc_value, exc_tb):
1357 1360 self._running = False
1358 1361
1359 1362 # Wait for threads to finish closing so open files don't linger for
1360 1363 # longer than lifetime of context manager.
1361 1364 for t in self._threads:
1362 1365 t.join()
1363 1366
1364 1367 def _worker(self):
1365 1368 """Main routine for worker thread."""
1366 1369 while True:
1367 1370 try:
1368 1371 fh = self._queue.get(block=True, timeout=0.100)
1369 1372 # Need to catch or the thread will terminate and
1370 1373 # we could orphan file descriptors.
1371 1374 try:
1372 1375 fh.close()
1373 1376 except Exception as e:
1374 1377 # Stash so can re-raise from main thread later.
1375 1378 self._threadexception = e
1376 1379 except util.empty:
1377 1380 if not self._running:
1378 1381 break
1379 1382
1380 1383 def close(self, fh):
1381 1384 """Schedule a file for closing."""
1382 1385 if not self._entered:
1383 1386 raise error.Abort('can only call close() when context manager '
1384 1387 'active')
1385 1388
1386 1389 # If a background thread encountered an exception, raise now so we fail
1387 1390 # fast. Otherwise we may potentially go on for minutes until the error
1388 1391 # is acted on.
1389 1392 if self._threadexception:
1390 1393 e = self._threadexception
1391 1394 self._threadexception = None
1392 1395 raise e
1393 1396
1394 1397 # If we're not actively running, close synchronously.
1395 1398 if not self._running:
1396 1399 fh.close()
1397 1400 return
1398 1401
1399 1402 self._queue.put(fh, block=True, timeout=None)
General Comments 0
You need to be logged in to leave comments. Login now