##// END OF EJS Templates
scmutil: improve documentation of revset APIs...
Gregory Szorc -
r29417:526b027b default
parent child Browse files
Show More
@@ -1,1972 +1,1978 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 urlerr = util.urlerr
62 62 urlreq = util.urlreq
63 63
64 64 class repofilecache(scmutil.filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 69 if repo is None:
70 70 return self
71 71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 72 def __set__(self, repo, value):
73 73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 74 def __delete__(self, repo):
75 75 return super(repofilecache, self).__delete__(repo.unfiltered())
76 76
77 77 class storecache(repofilecache):
78 78 """filecache for files in the store"""
79 79 def join(self, obj, fname):
80 80 return obj.sjoin(fname)
81 81
82 82 class unfilteredpropertycache(util.propertycache):
83 83 """propertycache that apply to unfiltered repo only"""
84 84
85 85 def __get__(self, repo, type=None):
86 86 unfi = repo.unfiltered()
87 87 if unfi is repo:
88 88 return super(unfilteredpropertycache, self).__get__(unfi)
89 89 return getattr(unfi, self.name)
90 90
91 91 class filteredpropertycache(util.propertycache):
92 92 """propertycache that must take filtering in account"""
93 93
94 94 def cachevalue(self, obj, value):
95 95 object.__setattr__(obj, self.name, value)
96 96
97 97
98 98 def hasunfilteredcache(repo, name):
99 99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 100 return name in vars(repo.unfiltered())
101 101
102 102 def unfilteredmethod(orig):
103 103 """decorate method that always need to be run on unfiltered version"""
104 104 def wrapper(repo, *args, **kwargs):
105 105 return orig(repo.unfiltered(), *args, **kwargs)
106 106 return wrapper
107 107
108 108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 109 'unbundle'))
110 110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 111
112 112 class localpeer(peer.peerrepository):
113 113 '''peer for a local repo; reflects only the most recent API'''
114 114
115 115 def __init__(self, repo, caps=moderncaps):
116 116 peer.peerrepository.__init__(self)
117 117 self._repo = repo.filtered('served')
118 118 self.ui = repo.ui
119 119 self._caps = repo._restrictcapabilities(caps)
120 120 self.requirements = repo.requirements
121 121 self.supportedformats = repo.supportedformats
122 122
123 123 def close(self):
124 124 self._repo.close()
125 125
126 126 def _capabilities(self):
127 127 return self._caps
128 128
129 129 def local(self):
130 130 return self._repo
131 131
132 132 def canpush(self):
133 133 return True
134 134
135 135 def url(self):
136 136 return self._repo.url()
137 137
138 138 def lookup(self, key):
139 139 return self._repo.lookup(key)
140 140
141 141 def branchmap(self):
142 142 return self._repo.branchmap()
143 143
144 144 def heads(self):
145 145 return self._repo.heads()
146 146
147 147 def known(self, nodes):
148 148 return self._repo.known(nodes)
149 149
150 150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 151 **kwargs):
152 152 cg = exchange.getbundle(self._repo, source, heads=heads,
153 153 common=common, bundlecaps=bundlecaps, **kwargs)
154 154 if bundlecaps is not None and 'HG20' in bundlecaps:
155 155 # When requesting a bundle2, getbundle returns a stream to make the
156 156 # wire level function happier. We need to build a proper object
157 157 # from it in local peer.
158 158 cg = bundle2.getunbundler(self.ui, cg)
159 159 return cg
160 160
161 161 # TODO We might want to move the next two calls into legacypeer and add
162 162 # unbundle instead.
163 163
164 164 def unbundle(self, cg, heads, url):
165 165 """apply a bundle on a repo
166 166
167 167 This function handles the repo locking itself."""
168 168 try:
169 169 try:
170 170 cg = exchange.readbundle(self.ui, cg, None)
171 171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
172 172 if util.safehasattr(ret, 'getchunks'):
173 173 # This is a bundle20 object, turn it into an unbundler.
174 174 # This little dance should be dropped eventually when the
175 175 # API is finally improved.
176 176 stream = util.chunkbuffer(ret.getchunks())
177 177 ret = bundle2.getunbundler(self.ui, stream)
178 178 return ret
179 179 except Exception as exc:
180 180 # If the exception contains output salvaged from a bundle2
181 181 # reply, we need to make sure it is printed before continuing
182 182 # to fail. So we build a bundle2 with such output and consume
183 183 # it directly.
184 184 #
185 185 # This is not very elegant but allows a "simple" solution for
186 186 # issue4594
187 187 output = getattr(exc, '_bundle2salvagedoutput', ())
188 188 if output:
189 189 bundler = bundle2.bundle20(self._repo.ui)
190 190 for out in output:
191 191 bundler.addpart(out)
192 192 stream = util.chunkbuffer(bundler.getchunks())
193 193 b = bundle2.getunbundler(self.ui, stream)
194 194 bundle2.processbundle(self._repo, b)
195 195 raise
196 196 except error.PushRaced as exc:
197 197 raise error.ResponseError(_('push failed:'), str(exc))
198 198
199 199 def lock(self):
200 200 return self._repo.lock()
201 201
202 202 def addchangegroup(self, cg, source, url):
203 203 return cg.apply(self._repo, source, url)
204 204
205 205 def pushkey(self, namespace, key, old, new):
206 206 return self._repo.pushkey(namespace, key, old, new)
207 207
208 208 def listkeys(self, namespace):
209 209 return self._repo.listkeys(namespace)
210 210
211 211 def debugwireargs(self, one, two, three=None, four=None, five=None):
212 212 '''used to test argument passing over the wire'''
213 213 return "%s %s %s %s %s" % (one, two, three, four, five)
214 214
215 215 class locallegacypeer(localpeer):
216 216 '''peer extension which implements legacy methods too; used for tests with
217 217 restricted capabilities'''
218 218
219 219 def __init__(self, repo):
220 220 localpeer.__init__(self, repo, caps=legacycaps)
221 221
222 222 def branches(self, nodes):
223 223 return self._repo.branches(nodes)
224 224
225 225 def between(self, pairs):
226 226 return self._repo.between(pairs)
227 227
228 228 def changegroup(self, basenodes, source):
229 229 return changegroup.changegroup(self._repo, basenodes, source)
230 230
231 231 def changegroupsubset(self, bases, heads, source):
232 232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
233 233
234 234 class localrepository(object):
235 235
236 236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
237 237 'manifestv2'))
238 238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
239 239 'dotencode'))
240 240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
241 241 filtername = None
242 242
243 243 # a list of (ui, featureset) functions.
244 244 # only functions defined in module of enabled extensions are invoked
245 245 featuresetupfuncs = set()
246 246
247 247 def __init__(self, baseui, path=None, create=False):
248 248 self.requirements = set()
249 249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
250 250 self.wopener = self.wvfs
251 251 self.root = self.wvfs.base
252 252 self.path = self.wvfs.join(".hg")
253 253 self.origroot = path
254 254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
255 255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
256 256 realfs=False)
257 257 self.vfs = scmutil.vfs(self.path)
258 258 self.opener = self.vfs
259 259 self.baseui = baseui
260 260 self.ui = baseui.copy()
261 261 self.ui.copy = baseui.copy # prevent copying repo configuration
262 262 # A list of callback to shape the phase if no data were found.
263 263 # Callback are in the form: func(repo, roots) --> processed root.
264 264 # This list it to be filled by extension during repo setup
265 265 self._phasedefaults = []
266 266 try:
267 267 self.ui.readconfig(self.join("hgrc"), self.root)
268 268 extensions.loadall(self.ui)
269 269 except IOError:
270 270 pass
271 271
272 272 if self.featuresetupfuncs:
273 273 self.supported = set(self._basesupported) # use private copy
274 274 extmods = set(m.__name__ for n, m
275 275 in extensions.extensions(self.ui))
276 276 for setupfunc in self.featuresetupfuncs:
277 277 if setupfunc.__module__ in extmods:
278 278 setupfunc(self.ui, self.supported)
279 279 else:
280 280 self.supported = self._basesupported
281 281
282 282 if not self.vfs.isdir():
283 283 if create:
284 284 self.requirements = newreporequirements(self)
285 285
286 286 if not self.wvfs.exists():
287 287 self.wvfs.makedirs()
288 288 self.vfs.makedir(notindexed=True)
289 289
290 290 if 'store' in self.requirements:
291 291 self.vfs.mkdir("store")
292 292
293 293 # create an invalid changelog
294 294 self.vfs.append(
295 295 "00changelog.i",
296 296 '\0\0\0\2' # represents revlogv2
297 297 ' dummy changelog to prevent using the old repo layout'
298 298 )
299 299 else:
300 300 raise error.RepoError(_("repository %s not found") % path)
301 301 elif create:
302 302 raise error.RepoError(_("repository %s already exists") % path)
303 303 else:
304 304 try:
305 305 self.requirements = scmutil.readrequires(
306 306 self.vfs, self.supported)
307 307 except IOError as inst:
308 308 if inst.errno != errno.ENOENT:
309 309 raise
310 310
311 311 self.sharedpath = self.path
312 312 try:
313 313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
314 314 realpath=True)
315 315 s = vfs.base
316 316 if not vfs.exists():
317 317 raise error.RepoError(
318 318 _('.hg/sharedpath points to nonexistent directory %s') % s)
319 319 self.sharedpath = s
320 320 except IOError as inst:
321 321 if inst.errno != errno.ENOENT:
322 322 raise
323 323
324 324 self.store = store.store(
325 325 self.requirements, self.sharedpath, scmutil.vfs)
326 326 self.spath = self.store.path
327 327 self.svfs = self.store.vfs
328 328 self.sjoin = self.store.join
329 329 self.vfs.createmode = self.store.createmode
330 330 self._applyopenerreqs()
331 331 if create:
332 332 self._writerequirements()
333 333
334 334 self._dirstatevalidatewarned = False
335 335
336 336 self._branchcaches = {}
337 337 self._revbranchcache = None
338 338 self.filterpats = {}
339 339 self._datafilters = {}
340 340 self._transref = self._lockref = self._wlockref = None
341 341
342 342 # A cache for various files under .hg/ that tracks file changes,
343 343 # (used by the filecache decorator)
344 344 #
345 345 # Maps a property name to its util.filecacheentry
346 346 self._filecache = {}
347 347
348 348 # hold sets of revision to be filtered
349 349 # should be cleared when something might have changed the filter value:
350 350 # - new changesets,
351 351 # - phase change,
352 352 # - new obsolescence marker,
353 353 # - working directory parent change,
354 354 # - bookmark changes
355 355 self.filteredrevcache = {}
356 356
357 357 # generic mapping between names and nodes
358 358 self.names = namespaces.namespaces()
359 359
360 360 def close(self):
361 361 self._writecaches()
362 362
363 363 def _writecaches(self):
364 364 if self._revbranchcache:
365 365 self._revbranchcache.write()
366 366
367 367 def _restrictcapabilities(self, caps):
368 368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
369 369 caps = set(caps)
370 370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
371 371 caps.add('bundle2=' + urlreq.quote(capsblob))
372 372 return caps
373 373
374 374 def _applyopenerreqs(self):
375 375 self.svfs.options = dict((r, 1) for r in self.requirements
376 376 if r in self.openerreqs)
377 377 # experimental config: format.chunkcachesize
378 378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
379 379 if chunkcachesize is not None:
380 380 self.svfs.options['chunkcachesize'] = chunkcachesize
381 381 # experimental config: format.maxchainlen
382 382 maxchainlen = self.ui.configint('format', 'maxchainlen')
383 383 if maxchainlen is not None:
384 384 self.svfs.options['maxchainlen'] = maxchainlen
385 385 # experimental config: format.manifestcachesize
386 386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
387 387 if manifestcachesize is not None:
388 388 self.svfs.options['manifestcachesize'] = manifestcachesize
389 389 # experimental config: format.aggressivemergedeltas
390 390 aggressivemergedeltas = self.ui.configbool('format',
391 391 'aggressivemergedeltas', False)
392 392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
393 393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
394 394
395 395 def _writerequirements(self):
396 396 scmutil.writerequires(self.vfs, self.requirements)
397 397
398 398 def _checknested(self, path):
399 399 """Determine if path is a legal nested repository."""
400 400 if not path.startswith(self.root):
401 401 return False
402 402 subpath = path[len(self.root) + 1:]
403 403 normsubpath = util.pconvert(subpath)
404 404
405 405 # XXX: Checking against the current working copy is wrong in
406 406 # the sense that it can reject things like
407 407 #
408 408 # $ hg cat -r 10 sub/x.txt
409 409 #
410 410 # if sub/ is no longer a subrepository in the working copy
411 411 # parent revision.
412 412 #
413 413 # However, it can of course also allow things that would have
414 414 # been rejected before, such as the above cat command if sub/
415 415 # is a subrepository now, but was a normal directory before.
416 416 # The old path auditor would have rejected by mistake since it
417 417 # panics when it sees sub/.hg/.
418 418 #
419 419 # All in all, checking against the working copy seems sensible
420 420 # since we want to prevent access to nested repositories on
421 421 # the filesystem *now*.
422 422 ctx = self[None]
423 423 parts = util.splitpath(subpath)
424 424 while parts:
425 425 prefix = '/'.join(parts)
426 426 if prefix in ctx.substate:
427 427 if prefix == normsubpath:
428 428 return True
429 429 else:
430 430 sub = ctx.sub(prefix)
431 431 return sub.checknested(subpath[len(prefix) + 1:])
432 432 else:
433 433 parts.pop()
434 434 return False
435 435
436 436 def peer(self):
437 437 return localpeer(self) # not cached to avoid reference cycle
438 438
439 439 def unfiltered(self):
440 440 """Return unfiltered version of the repository
441 441
442 442 Intended to be overwritten by filtered repo."""
443 443 return self
444 444
445 445 def filtered(self, name):
446 446 """Return a filtered version of a repository"""
447 447 # build a new class with the mixin and the current class
448 448 # (possibly subclass of the repo)
449 449 class proxycls(repoview.repoview, self.unfiltered().__class__):
450 450 pass
451 451 return proxycls(self, name)
452 452
453 453 @repofilecache('bookmarks', 'bookmarks.current')
454 454 def _bookmarks(self):
455 455 return bookmarks.bmstore(self)
456 456
457 457 @property
458 458 def _activebookmark(self):
459 459 return self._bookmarks.active
460 460
461 461 def bookmarkheads(self, bookmark):
462 462 name = bookmark.split('@', 1)[0]
463 463 heads = []
464 464 for mark, n in self._bookmarks.iteritems():
465 465 if mark.split('@', 1)[0] == name:
466 466 heads.append(n)
467 467 return heads
468 468
469 469 # _phaserevs and _phasesets depend on changelog. what we need is to
470 470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
471 471 # can't be easily expressed in filecache mechanism.
472 472 @storecache('phaseroots', '00changelog.i')
473 473 def _phasecache(self):
474 474 return phases.phasecache(self, self._phasedefaults)
475 475
476 476 @storecache('obsstore')
477 477 def obsstore(self):
478 478 # read default format for new obsstore.
479 479 # developer config: format.obsstore-version
480 480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
481 481 # rely on obsstore class default when possible.
482 482 kwargs = {}
483 483 if defaultformat is not None:
484 484 kwargs['defaultformat'] = defaultformat
485 485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
486 486 store = obsolete.obsstore(self.svfs, readonly=readonly,
487 487 **kwargs)
488 488 if store and readonly:
489 489 self.ui.warn(
490 490 _('obsolete feature not enabled but %i markers found!\n')
491 491 % len(list(store)))
492 492 return store
493 493
494 494 @storecache('00changelog.i')
495 495 def changelog(self):
496 496 c = changelog.changelog(self.svfs)
497 497 if 'HG_PENDING' in os.environ:
498 498 p = os.environ['HG_PENDING']
499 499 if p.startswith(self.root):
500 500 c.readpending('00changelog.i.a')
501 501 return c
502 502
503 503 @storecache('00manifest.i')
504 504 def manifest(self):
505 505 return manifest.manifest(self.svfs)
506 506
507 507 def dirlog(self, dir):
508 508 return self.manifest.dirlog(dir)
509 509
510 510 @repofilecache('dirstate')
511 511 def dirstate(self):
512 512 return dirstate.dirstate(self.vfs, self.ui, self.root,
513 513 self._dirstatevalidate)
514 514
515 515 def _dirstatevalidate(self, node):
516 516 try:
517 517 self.changelog.rev(node)
518 518 return node
519 519 except error.LookupError:
520 520 if not self._dirstatevalidatewarned:
521 521 self._dirstatevalidatewarned = True
522 522 self.ui.warn(_("warning: ignoring unknown"
523 523 " working parent %s!\n") % short(node))
524 524 return nullid
525 525
526 526 def __getitem__(self, changeid):
527 527 if changeid is None or changeid == wdirrev:
528 528 return context.workingctx(self)
529 529 if isinstance(changeid, slice):
530 530 return [context.changectx(self, i)
531 531 for i in xrange(*changeid.indices(len(self)))
532 532 if i not in self.changelog.filteredrevs]
533 533 return context.changectx(self, changeid)
534 534
535 535 def __contains__(self, changeid):
536 536 try:
537 537 self[changeid]
538 538 return True
539 539 except error.RepoLookupError:
540 540 return False
541 541
542 542 def __nonzero__(self):
543 543 return True
544 544
545 545 def __len__(self):
546 546 return len(self.changelog)
547 547
548 548 def __iter__(self):
549 549 return iter(self.changelog)
550 550
551 551 def revs(self, expr, *args):
552 552 '''Find revisions matching a revset.
553 553
554 554 The revset is specified as a string ``expr`` that may contain
555 555 %-formatting to escape certain types. See ``revset.formatspec``.
556 556
557 Return a revset.abstractsmartset, which is a list-like interface
557 Revset aliases from the configuration are not expanded. To expand
558 user aliases, consider calling ``scmutil.revrange()``.
559
560 Returns a revset.abstractsmartset, which is a list-like interface
558 561 that contains integer revisions.
559 562 '''
560 563 expr = revset.formatspec(expr, *args)
561 564 m = revset.match(None, expr)
562 565 return m(self)
563 566
564 567 def set(self, expr, *args):
565 568 '''Find revisions matching a revset and emit changectx instances.
566 569
567 570 This is a convenience wrapper around ``revs()`` that iterates the
568 571 result and is a generator of changectx instances.
572
573 Revset aliases from the configuration are not expanded. To expand
574 user aliases, consider calling ``scmutil.revrange()``.
569 575 '''
570 576 for r in self.revs(expr, *args):
571 577 yield self[r]
572 578
573 579 def url(self):
574 580 return 'file:' + self.root
575 581
576 582 def hook(self, name, throw=False, **args):
577 583 """Call a hook, passing this repo instance.
578 584
579 585 This a convenience method to aid invoking hooks. Extensions likely
580 586 won't call this unless they have registered a custom hook or are
581 587 replacing code that is expected to call a hook.
582 588 """
583 589 return hook.hook(self.ui, self, name, throw, **args)
584 590
585 591 @unfilteredmethod
586 592 def _tag(self, names, node, message, local, user, date, extra=None,
587 593 editor=False):
588 594 if isinstance(names, str):
589 595 names = (names,)
590 596
591 597 branches = self.branchmap()
592 598 for name in names:
593 599 self.hook('pretag', throw=True, node=hex(node), tag=name,
594 600 local=local)
595 601 if name in branches:
596 602 self.ui.warn(_("warning: tag %s conflicts with existing"
597 603 " branch name\n") % name)
598 604
599 605 def writetags(fp, names, munge, prevtags):
600 606 fp.seek(0, 2)
601 607 if prevtags and prevtags[-1] != '\n':
602 608 fp.write('\n')
603 609 for name in names:
604 610 if munge:
605 611 m = munge(name)
606 612 else:
607 613 m = name
608 614
609 615 if (self._tagscache.tagtypes and
610 616 name in self._tagscache.tagtypes):
611 617 old = self.tags().get(name, nullid)
612 618 fp.write('%s %s\n' % (hex(old), m))
613 619 fp.write('%s %s\n' % (hex(node), m))
614 620 fp.close()
615 621
616 622 prevtags = ''
617 623 if local:
618 624 try:
619 625 fp = self.vfs('localtags', 'r+')
620 626 except IOError:
621 627 fp = self.vfs('localtags', 'a')
622 628 else:
623 629 prevtags = fp.read()
624 630
625 631 # local tags are stored in the current charset
626 632 writetags(fp, names, None, prevtags)
627 633 for name in names:
628 634 self.hook('tag', node=hex(node), tag=name, local=local)
629 635 return
630 636
631 637 try:
632 638 fp = self.wfile('.hgtags', 'rb+')
633 639 except IOError as e:
634 640 if e.errno != errno.ENOENT:
635 641 raise
636 642 fp = self.wfile('.hgtags', 'ab')
637 643 else:
638 644 prevtags = fp.read()
639 645
640 646 # committed tags are stored in UTF-8
641 647 writetags(fp, names, encoding.fromlocal, prevtags)
642 648
643 649 fp.close()
644 650
645 651 self.invalidatecaches()
646 652
647 653 if '.hgtags' not in self.dirstate:
648 654 self[None].add(['.hgtags'])
649 655
650 656 m = matchmod.exact(self.root, '', ['.hgtags'])
651 657 tagnode = self.commit(message, user, date, extra=extra, match=m,
652 658 editor=editor)
653 659
654 660 for name in names:
655 661 self.hook('tag', node=hex(node), tag=name, local=local)
656 662
657 663 return tagnode
658 664
659 665 def tag(self, names, node, message, local, user, date, editor=False):
660 666 '''tag a revision with one or more symbolic names.
661 667
662 668 names is a list of strings or, when adding a single tag, names may be a
663 669 string.
664 670
665 671 if local is True, the tags are stored in a per-repository file.
666 672 otherwise, they are stored in the .hgtags file, and a new
667 673 changeset is committed with the change.
668 674
669 675 keyword arguments:
670 676
671 677 local: whether to store tags in non-version-controlled file
672 678 (default False)
673 679
674 680 message: commit message to use if committing
675 681
676 682 user: name of user to use if committing
677 683
678 684 date: date tuple to use if committing'''
679 685
680 686 if not local:
681 687 m = matchmod.exact(self.root, '', ['.hgtags'])
682 688 if any(self.status(match=m, unknown=True, ignored=True)):
683 689 raise error.Abort(_('working copy of .hgtags is changed'),
684 690 hint=_('please commit .hgtags manually'))
685 691
686 692 self.tags() # instantiate the cache
687 693 self._tag(names, node, message, local, user, date, editor=editor)
688 694
689 695 @filteredpropertycache
690 696 def _tagscache(self):
691 697 '''Returns a tagscache object that contains various tags related
692 698 caches.'''
693 699
694 700 # This simplifies its cache management by having one decorated
695 701 # function (this one) and the rest simply fetch things from it.
696 702 class tagscache(object):
697 703 def __init__(self):
698 704 # These two define the set of tags for this repository. tags
699 705 # maps tag name to node; tagtypes maps tag name to 'global' or
700 706 # 'local'. (Global tags are defined by .hgtags across all
701 707 # heads, and local tags are defined in .hg/localtags.)
702 708 # They constitute the in-memory cache of tags.
703 709 self.tags = self.tagtypes = None
704 710
705 711 self.nodetagscache = self.tagslist = None
706 712
707 713 cache = tagscache()
708 714 cache.tags, cache.tagtypes = self._findtags()
709 715
710 716 return cache
711 717
712 718 def tags(self):
713 719 '''return a mapping of tag to node'''
714 720 t = {}
715 721 if self.changelog.filteredrevs:
716 722 tags, tt = self._findtags()
717 723 else:
718 724 tags = self._tagscache.tags
719 725 for k, v in tags.iteritems():
720 726 try:
721 727 # ignore tags to unknown nodes
722 728 self.changelog.rev(v)
723 729 t[k] = v
724 730 except (error.LookupError, ValueError):
725 731 pass
726 732 return t
727 733
728 734 def _findtags(self):
729 735 '''Do the hard work of finding tags. Return a pair of dicts
730 736 (tags, tagtypes) where tags maps tag name to node, and tagtypes
731 737 maps tag name to a string like \'global\' or \'local\'.
732 738 Subclasses or extensions are free to add their own tags, but
733 739 should be aware that the returned dicts will be retained for the
734 740 duration of the localrepo object.'''
735 741
736 742 # XXX what tagtype should subclasses/extensions use? Currently
737 743 # mq and bookmarks add tags, but do not set the tagtype at all.
738 744 # Should each extension invent its own tag type? Should there
739 745 # be one tagtype for all such "virtual" tags? Or is the status
740 746 # quo fine?
741 747
742 748 alltags = {} # map tag name to (node, hist)
743 749 tagtypes = {}
744 750
745 751 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
746 752 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
747 753
748 754 # Build the return dicts. Have to re-encode tag names because
749 755 # the tags module always uses UTF-8 (in order not to lose info
750 756 # writing to the cache), but the rest of Mercurial wants them in
751 757 # local encoding.
752 758 tags = {}
753 759 for (name, (node, hist)) in alltags.iteritems():
754 760 if node != nullid:
755 761 tags[encoding.tolocal(name)] = node
756 762 tags['tip'] = self.changelog.tip()
757 763 tagtypes = dict([(encoding.tolocal(name), value)
758 764 for (name, value) in tagtypes.iteritems()])
759 765 return (tags, tagtypes)
760 766
761 767 def tagtype(self, tagname):
762 768 '''
763 769 return the type of the given tag. result can be:
764 770
765 771 'local' : a local tag
766 772 'global' : a global tag
767 773 None : tag does not exist
768 774 '''
769 775
770 776 return self._tagscache.tagtypes.get(tagname)
771 777
772 778 def tagslist(self):
773 779 '''return a list of tags ordered by revision'''
774 780 if not self._tagscache.tagslist:
775 781 l = []
776 782 for t, n in self.tags().iteritems():
777 783 l.append((self.changelog.rev(n), t, n))
778 784 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
779 785
780 786 return self._tagscache.tagslist
781 787
782 788 def nodetags(self, node):
783 789 '''return the tags associated with a node'''
784 790 if not self._tagscache.nodetagscache:
785 791 nodetagscache = {}
786 792 for t, n in self._tagscache.tags.iteritems():
787 793 nodetagscache.setdefault(n, []).append(t)
788 794 for tags in nodetagscache.itervalues():
789 795 tags.sort()
790 796 self._tagscache.nodetagscache = nodetagscache
791 797 return self._tagscache.nodetagscache.get(node, [])
792 798
793 799 def nodebookmarks(self, node):
794 800 """return the list of bookmarks pointing to the specified node"""
795 801 marks = []
796 802 for bookmark, n in self._bookmarks.iteritems():
797 803 if n == node:
798 804 marks.append(bookmark)
799 805 return sorted(marks)
800 806
801 807 def branchmap(self):
802 808 '''returns a dictionary {branch: [branchheads]} with branchheads
803 809 ordered by increasing revision number'''
804 810 branchmap.updatecache(self)
805 811 return self._branchcaches[self.filtername]
806 812
807 813 @unfilteredmethod
808 814 def revbranchcache(self):
809 815 if not self._revbranchcache:
810 816 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
811 817 return self._revbranchcache
812 818
813 819 def branchtip(self, branch, ignoremissing=False):
814 820 '''return the tip node for a given branch
815 821
816 822 If ignoremissing is True, then this method will not raise an error.
817 823 This is helpful for callers that only expect None for a missing branch
818 824 (e.g. namespace).
819 825
820 826 '''
821 827 try:
822 828 return self.branchmap().branchtip(branch)
823 829 except KeyError:
824 830 if not ignoremissing:
825 831 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
826 832 else:
827 833 pass
828 834
829 835 def lookup(self, key):
830 836 return self[key].node()
831 837
832 838 def lookupbranch(self, key, remote=None):
833 839 repo = remote or self
834 840 if key in repo.branchmap():
835 841 return key
836 842
837 843 repo = (remote and remote.local()) and remote or self
838 844 return repo[key].branch()
839 845
840 846 def known(self, nodes):
841 847 cl = self.changelog
842 848 nm = cl.nodemap
843 849 filtered = cl.filteredrevs
844 850 result = []
845 851 for n in nodes:
846 852 r = nm.get(n)
847 853 resp = not (r is None or r in filtered)
848 854 result.append(resp)
849 855 return result
850 856
851 857 def local(self):
852 858 return self
853 859
854 860 def publishing(self):
855 861 # it's safe (and desirable) to trust the publish flag unconditionally
856 862 # so that we don't finalize changes shared between users via ssh or nfs
857 863 return self.ui.configbool('phases', 'publish', True, untrusted=True)
858 864
859 865 def cancopy(self):
860 866 # so statichttprepo's override of local() works
861 867 if not self.local():
862 868 return False
863 869 if not self.publishing():
864 870 return True
865 871 # if publishing we can't copy if there is filtered content
866 872 return not self.filtered('visible').changelog.filteredrevs
867 873
868 874 def shared(self):
869 875 '''the type of shared repository (None if not shared)'''
870 876 if self.sharedpath != self.path:
871 877 return 'store'
872 878 return None
873 879
874 880 def join(self, f, *insidef):
875 881 return self.vfs.join(os.path.join(f, *insidef))
876 882
877 883 def wjoin(self, f, *insidef):
878 884 return self.vfs.reljoin(self.root, f, *insidef)
879 885
880 886 def file(self, f):
881 887 if f[0] == '/':
882 888 f = f[1:]
883 889 return filelog.filelog(self.svfs, f)
884 890
885 891 def changectx(self, changeid):
886 892 return self[changeid]
887 893
888 894 def setparents(self, p1, p2=nullid):
889 895 self.dirstate.beginparentchange()
890 896 copies = self.dirstate.setparents(p1, p2)
891 897 pctx = self[p1]
892 898 if copies:
893 899 # Adjust copy records, the dirstate cannot do it, it
894 900 # requires access to parents manifests. Preserve them
895 901 # only for entries added to first parent.
896 902 for f in copies:
897 903 if f not in pctx and copies[f] in pctx:
898 904 self.dirstate.copy(copies[f], f)
899 905 if p2 == nullid:
900 906 for f, s in sorted(self.dirstate.copies().items()):
901 907 if f not in pctx and s not in pctx:
902 908 self.dirstate.copy(None, f)
903 909 self.dirstate.endparentchange()
904 910
905 911 def filectx(self, path, changeid=None, fileid=None):
906 912 """changeid can be a changeset revision, node, or tag.
907 913 fileid can be a file revision or node."""
908 914 return context.filectx(self, path, changeid, fileid)
909 915
910 916 def getcwd(self):
911 917 return self.dirstate.getcwd()
912 918
913 919 def pathto(self, f, cwd=None):
914 920 return self.dirstate.pathto(f, cwd)
915 921
916 922 def wfile(self, f, mode='r'):
917 923 return self.wvfs(f, mode)
918 924
919 925 def _link(self, f):
920 926 return self.wvfs.islink(f)
921 927
922 928 def _loadfilter(self, filter):
923 929 if filter not in self.filterpats:
924 930 l = []
925 931 for pat, cmd in self.ui.configitems(filter):
926 932 if cmd == '!':
927 933 continue
928 934 mf = matchmod.match(self.root, '', [pat])
929 935 fn = None
930 936 params = cmd
931 937 for name, filterfn in self._datafilters.iteritems():
932 938 if cmd.startswith(name):
933 939 fn = filterfn
934 940 params = cmd[len(name):].lstrip()
935 941 break
936 942 if not fn:
937 943 fn = lambda s, c, **kwargs: util.filter(s, c)
938 944 # Wrap old filters not supporting keyword arguments
939 945 if not inspect.getargspec(fn)[2]:
940 946 oldfn = fn
941 947 fn = lambda s, c, **kwargs: oldfn(s, c)
942 948 l.append((mf, fn, params))
943 949 self.filterpats[filter] = l
944 950 return self.filterpats[filter]
945 951
946 952 def _filter(self, filterpats, filename, data):
947 953 for mf, fn, cmd in filterpats:
948 954 if mf(filename):
949 955 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
950 956 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
951 957 break
952 958
953 959 return data
954 960
955 961 @unfilteredpropertycache
956 962 def _encodefilterpats(self):
957 963 return self._loadfilter('encode')
958 964
959 965 @unfilteredpropertycache
960 966 def _decodefilterpats(self):
961 967 return self._loadfilter('decode')
962 968
963 969 def adddatafilter(self, name, filter):
964 970 self._datafilters[name] = filter
965 971
966 972 def wread(self, filename):
967 973 if self._link(filename):
968 974 data = self.wvfs.readlink(filename)
969 975 else:
970 976 data = self.wvfs.read(filename)
971 977 return self._filter(self._encodefilterpats, filename, data)
972 978
973 979 def wwrite(self, filename, data, flags, backgroundclose=False):
974 980 """write ``data`` into ``filename`` in the working directory
975 981
976 982 This returns length of written (maybe decoded) data.
977 983 """
978 984 data = self._filter(self._decodefilterpats, filename, data)
979 985 if 'l' in flags:
980 986 self.wvfs.symlink(data, filename)
981 987 else:
982 988 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
983 989 if 'x' in flags:
984 990 self.wvfs.setflags(filename, False, True)
985 991 return len(data)
986 992
987 993 def wwritedata(self, filename, data):
988 994 return self._filter(self._decodefilterpats, filename, data)
989 995
990 996 def currenttransaction(self):
991 997 """return the current transaction or None if non exists"""
992 998 if self._transref:
993 999 tr = self._transref()
994 1000 else:
995 1001 tr = None
996 1002
997 1003 if tr and tr.running():
998 1004 return tr
999 1005 return None
1000 1006
1001 1007 def transaction(self, desc, report=None):
1002 1008 if (self.ui.configbool('devel', 'all-warnings')
1003 1009 or self.ui.configbool('devel', 'check-locks')):
1004 1010 l = self._lockref and self._lockref()
1005 1011 if l is None or not l.held:
1006 1012 raise RuntimeError('programming error: transaction requires '
1007 1013 'locking')
1008 1014 tr = self.currenttransaction()
1009 1015 if tr is not None:
1010 1016 return tr.nest()
1011 1017
1012 1018 # abort here if the journal already exists
1013 1019 if self.svfs.exists("journal"):
1014 1020 raise error.RepoError(
1015 1021 _("abandoned transaction found"),
1016 1022 hint=_("run 'hg recover' to clean up transaction"))
1017 1023
1018 1024 idbase = "%.40f#%f" % (random.random(), time.time())
1019 1025 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1020 1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1021 1027
1022 1028 self._writejournal(desc)
1023 1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1024 1030 if report:
1025 1031 rp = report
1026 1032 else:
1027 1033 rp = self.ui.warn
1028 1034 vfsmap = {'plain': self.vfs} # root of .hg/
1029 1035 # we must avoid cyclic reference between repo and transaction.
1030 1036 reporef = weakref.ref(self)
1031 1037 def validate(tr):
1032 1038 """will run pre-closing hooks"""
1033 1039 reporef().hook('pretxnclose', throw=True,
1034 1040 txnname=desc, **tr.hookargs)
1035 1041 def releasefn(tr, success):
1036 1042 repo = reporef()
1037 1043 if success:
1038 1044 # this should be explicitly invoked here, because
1039 1045 # in-memory changes aren't written out at closing
1040 1046 # transaction, if tr.addfilegenerator (via
1041 1047 # dirstate.write or so) isn't invoked while
1042 1048 # transaction running
1043 1049 repo.dirstate.write(None)
1044 1050 else:
1045 1051 # discard all changes (including ones already written
1046 1052 # out) in this transaction
1047 1053 repo.dirstate.restorebackup(None, prefix='journal.')
1048 1054
1049 1055 repo.invalidate(clearfilecache=True)
1050 1056
1051 1057 tr = transaction.transaction(rp, self.svfs, vfsmap,
1052 1058 "journal",
1053 1059 "undo",
1054 1060 aftertrans(renames),
1055 1061 self.store.createmode,
1056 1062 validator=validate,
1057 1063 releasefn=releasefn)
1058 1064
1059 1065 tr.hookargs['txnid'] = txnid
1060 1066 # note: writing the fncache only during finalize mean that the file is
1061 1067 # outdated when running hooks. As fncache is used for streaming clone,
1062 1068 # this is not expected to break anything that happen during the hooks.
1063 1069 tr.addfinalize('flush-fncache', self.store.write)
1064 1070 def txnclosehook(tr2):
1065 1071 """To be run if transaction is successful, will schedule a hook run
1066 1072 """
1067 1073 # Don't reference tr2 in hook() so we don't hold a reference.
1068 1074 # This reduces memory consumption when there are multiple
1069 1075 # transactions per lock. This can likely go away if issue5045
1070 1076 # fixes the function accumulation.
1071 1077 hookargs = tr2.hookargs
1072 1078
1073 1079 def hook():
1074 1080 reporef().hook('txnclose', throw=False, txnname=desc,
1075 1081 **hookargs)
1076 1082 reporef()._afterlock(hook)
1077 1083 tr.addfinalize('txnclose-hook', txnclosehook)
1078 1084 def txnaborthook(tr2):
1079 1085 """To be run if transaction is aborted
1080 1086 """
1081 1087 reporef().hook('txnabort', throw=False, txnname=desc,
1082 1088 **tr2.hookargs)
1083 1089 tr.addabort('txnabort-hook', txnaborthook)
1084 1090 # avoid eager cache invalidation. in-memory data should be identical
1085 1091 # to stored data if transaction has no error.
1086 1092 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1087 1093 self._transref = weakref.ref(tr)
1088 1094 return tr
1089 1095
1090 1096 def _journalfiles(self):
1091 1097 return ((self.svfs, 'journal'),
1092 1098 (self.vfs, 'journal.dirstate'),
1093 1099 (self.vfs, 'journal.branch'),
1094 1100 (self.vfs, 'journal.desc'),
1095 1101 (self.vfs, 'journal.bookmarks'),
1096 1102 (self.svfs, 'journal.phaseroots'))
1097 1103
1098 1104 def undofiles(self):
1099 1105 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1100 1106
1101 1107 def _writejournal(self, desc):
1102 1108 self.dirstate.savebackup(None, prefix='journal.')
1103 1109 self.vfs.write("journal.branch",
1104 1110 encoding.fromlocal(self.dirstate.branch()))
1105 1111 self.vfs.write("journal.desc",
1106 1112 "%d\n%s\n" % (len(self), desc))
1107 1113 self.vfs.write("journal.bookmarks",
1108 1114 self.vfs.tryread("bookmarks"))
1109 1115 self.svfs.write("journal.phaseroots",
1110 1116 self.svfs.tryread("phaseroots"))
1111 1117
1112 1118 def recover(self):
1113 1119 with self.lock():
1114 1120 if self.svfs.exists("journal"):
1115 1121 self.ui.status(_("rolling back interrupted transaction\n"))
1116 1122 vfsmap = {'': self.svfs,
1117 1123 'plain': self.vfs,}
1118 1124 transaction.rollback(self.svfs, vfsmap, "journal",
1119 1125 self.ui.warn)
1120 1126 self.invalidate()
1121 1127 return True
1122 1128 else:
1123 1129 self.ui.warn(_("no interrupted transaction available\n"))
1124 1130 return False
1125 1131
1126 1132 def rollback(self, dryrun=False, force=False):
1127 1133 wlock = lock = dsguard = None
1128 1134 try:
1129 1135 wlock = self.wlock()
1130 1136 lock = self.lock()
1131 1137 if self.svfs.exists("undo"):
1132 1138 dsguard = cmdutil.dirstateguard(self, 'rollback')
1133 1139
1134 1140 return self._rollback(dryrun, force, dsguard)
1135 1141 else:
1136 1142 self.ui.warn(_("no rollback information available\n"))
1137 1143 return 1
1138 1144 finally:
1139 1145 release(dsguard, lock, wlock)
1140 1146
1141 1147 @unfilteredmethod # Until we get smarter cache management
1142 1148 def _rollback(self, dryrun, force, dsguard):
1143 1149 ui = self.ui
1144 1150 try:
1145 1151 args = self.vfs.read('undo.desc').splitlines()
1146 1152 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1147 1153 if len(args) >= 3:
1148 1154 detail = args[2]
1149 1155 oldtip = oldlen - 1
1150 1156
1151 1157 if detail and ui.verbose:
1152 1158 msg = (_('repository tip rolled back to revision %s'
1153 1159 ' (undo %s: %s)\n')
1154 1160 % (oldtip, desc, detail))
1155 1161 else:
1156 1162 msg = (_('repository tip rolled back to revision %s'
1157 1163 ' (undo %s)\n')
1158 1164 % (oldtip, desc))
1159 1165 except IOError:
1160 1166 msg = _('rolling back unknown transaction\n')
1161 1167 desc = None
1162 1168
1163 1169 if not force and self['.'] != self['tip'] and desc == 'commit':
1164 1170 raise error.Abort(
1165 1171 _('rollback of last commit while not checked out '
1166 1172 'may lose data'), hint=_('use -f to force'))
1167 1173
1168 1174 ui.status(msg)
1169 1175 if dryrun:
1170 1176 return 0
1171 1177
1172 1178 parents = self.dirstate.parents()
1173 1179 self.destroying()
1174 1180 vfsmap = {'plain': self.vfs, '': self.svfs}
1175 1181 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1176 1182 if self.vfs.exists('undo.bookmarks'):
1177 1183 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1178 1184 if self.svfs.exists('undo.phaseroots'):
1179 1185 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1180 1186 self.invalidate()
1181 1187
1182 1188 parentgone = (parents[0] not in self.changelog.nodemap or
1183 1189 parents[1] not in self.changelog.nodemap)
1184 1190 if parentgone:
1185 1191 # prevent dirstateguard from overwriting already restored one
1186 1192 dsguard.close()
1187 1193
1188 1194 self.dirstate.restorebackup(None, prefix='undo.')
1189 1195 try:
1190 1196 branch = self.vfs.read('undo.branch')
1191 1197 self.dirstate.setbranch(encoding.tolocal(branch))
1192 1198 except IOError:
1193 1199 ui.warn(_('named branch could not be reset: '
1194 1200 'current branch is still \'%s\'\n')
1195 1201 % self.dirstate.branch())
1196 1202
1197 1203 parents = tuple([p.rev() for p in self[None].parents()])
1198 1204 if len(parents) > 1:
1199 1205 ui.status(_('working directory now based on '
1200 1206 'revisions %d and %d\n') % parents)
1201 1207 else:
1202 1208 ui.status(_('working directory now based on '
1203 1209 'revision %d\n') % parents)
1204 1210 mergemod.mergestate.clean(self, self['.'].node())
1205 1211
1206 1212 # TODO: if we know which new heads may result from this rollback, pass
1207 1213 # them to destroy(), which will prevent the branchhead cache from being
1208 1214 # invalidated.
1209 1215 self.destroyed()
1210 1216 return 0
1211 1217
1212 1218 def invalidatecaches(self):
1213 1219
1214 1220 if '_tagscache' in vars(self):
1215 1221 # can't use delattr on proxy
1216 1222 del self.__dict__['_tagscache']
1217 1223
1218 1224 self.unfiltered()._branchcaches.clear()
1219 1225 self.invalidatevolatilesets()
1220 1226
1221 1227 def invalidatevolatilesets(self):
1222 1228 self.filteredrevcache.clear()
1223 1229 obsolete.clearobscaches(self)
1224 1230
1225 1231 def invalidatedirstate(self):
1226 1232 '''Invalidates the dirstate, causing the next call to dirstate
1227 1233 to check if it was modified since the last time it was read,
1228 1234 rereading it if it has.
1229 1235
1230 1236 This is different to dirstate.invalidate() that it doesn't always
1231 1237 rereads the dirstate. Use dirstate.invalidate() if you want to
1232 1238 explicitly read the dirstate again (i.e. restoring it to a previous
1233 1239 known good state).'''
1234 1240 if hasunfilteredcache(self, 'dirstate'):
1235 1241 for k in self.dirstate._filecache:
1236 1242 try:
1237 1243 delattr(self.dirstate, k)
1238 1244 except AttributeError:
1239 1245 pass
1240 1246 delattr(self.unfiltered(), 'dirstate')
1241 1247
1242 1248 def invalidate(self, clearfilecache=False):
1243 1249 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1244 1250 for k in self._filecache.keys():
1245 1251 # dirstate is invalidated separately in invalidatedirstate()
1246 1252 if k == 'dirstate':
1247 1253 continue
1248 1254
1249 1255 if clearfilecache:
1250 1256 del self._filecache[k]
1251 1257 try:
1252 1258 delattr(unfiltered, k)
1253 1259 except AttributeError:
1254 1260 pass
1255 1261 self.invalidatecaches()
1256 1262 self.store.invalidatecaches()
1257 1263
1258 1264 def invalidateall(self):
1259 1265 '''Fully invalidates both store and non-store parts, causing the
1260 1266 subsequent operation to reread any outside changes.'''
1261 1267 # extension should hook this to invalidate its caches
1262 1268 self.invalidate()
1263 1269 self.invalidatedirstate()
1264 1270
1265 1271 def _refreshfilecachestats(self, tr):
1266 1272 """Reload stats of cached files so that they are flagged as valid"""
1267 1273 for k, ce in self._filecache.items():
1268 1274 if k == 'dirstate' or k not in self.__dict__:
1269 1275 continue
1270 1276 ce.refresh()
1271 1277
1272 1278 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1273 1279 inheritchecker=None, parentenvvar=None):
1274 1280 parentlock = None
1275 1281 # the contents of parentenvvar are used by the underlying lock to
1276 1282 # determine whether it can be inherited
1277 1283 if parentenvvar is not None:
1278 1284 parentlock = os.environ.get(parentenvvar)
1279 1285 try:
1280 1286 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1281 1287 acquirefn=acquirefn, desc=desc,
1282 1288 inheritchecker=inheritchecker,
1283 1289 parentlock=parentlock)
1284 1290 except error.LockHeld as inst:
1285 1291 if not wait:
1286 1292 raise
1287 1293 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1288 1294 (desc, inst.locker))
1289 1295 # default to 600 seconds timeout
1290 1296 l = lockmod.lock(vfs, lockname,
1291 1297 int(self.ui.config("ui", "timeout", "600")),
1292 1298 releasefn=releasefn, acquirefn=acquirefn,
1293 1299 desc=desc)
1294 1300 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1295 1301 return l
1296 1302
1297 1303 def _afterlock(self, callback):
1298 1304 """add a callback to be run when the repository is fully unlocked
1299 1305
1300 1306 The callback will be executed when the outermost lock is released
1301 1307 (with wlock being higher level than 'lock')."""
1302 1308 for ref in (self._wlockref, self._lockref):
1303 1309 l = ref and ref()
1304 1310 if l and l.held:
1305 1311 l.postrelease.append(callback)
1306 1312 break
1307 1313 else: # no lock have been found.
1308 1314 callback()
1309 1315
1310 1316 def lock(self, wait=True):
1311 1317 '''Lock the repository store (.hg/store) and return a weak reference
1312 1318 to the lock. Use this before modifying the store (e.g. committing or
1313 1319 stripping). If you are opening a transaction, get a lock as well.)
1314 1320
1315 1321 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1316 1322 'wlock' first to avoid a dead-lock hazard.'''
1317 1323 l = self._lockref and self._lockref()
1318 1324 if l is not None and l.held:
1319 1325 l.lock()
1320 1326 return l
1321 1327
1322 1328 l = self._lock(self.svfs, "lock", wait, None,
1323 1329 self.invalidate, _('repository %s') % self.origroot)
1324 1330 self._lockref = weakref.ref(l)
1325 1331 return l
1326 1332
1327 1333 def _wlockchecktransaction(self):
1328 1334 if self.currenttransaction() is not None:
1329 1335 raise error.LockInheritanceContractViolation(
1330 1336 'wlock cannot be inherited in the middle of a transaction')
1331 1337
1332 1338 def wlock(self, wait=True):
1333 1339 '''Lock the non-store parts of the repository (everything under
1334 1340 .hg except .hg/store) and return a weak reference to the lock.
1335 1341
1336 1342 Use this before modifying files in .hg.
1337 1343
1338 1344 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1339 1345 'wlock' first to avoid a dead-lock hazard.'''
1340 1346 l = self._wlockref and self._wlockref()
1341 1347 if l is not None and l.held:
1342 1348 l.lock()
1343 1349 return l
1344 1350
1345 1351 # We do not need to check for non-waiting lock acquisition. Such
1346 1352 # acquisition would not cause dead-lock as they would just fail.
1347 1353 if wait and (self.ui.configbool('devel', 'all-warnings')
1348 1354 or self.ui.configbool('devel', 'check-locks')):
1349 1355 l = self._lockref and self._lockref()
1350 1356 if l is not None and l.held:
1351 1357 self.ui.develwarn('"wlock" acquired after "lock"')
1352 1358
1353 1359 def unlock():
1354 1360 if self.dirstate.pendingparentchange():
1355 1361 self.dirstate.invalidate()
1356 1362 else:
1357 1363 self.dirstate.write(None)
1358 1364
1359 1365 self._filecache['dirstate'].refresh()
1360 1366
1361 1367 l = self._lock(self.vfs, "wlock", wait, unlock,
1362 1368 self.invalidatedirstate, _('working directory of %s') %
1363 1369 self.origroot,
1364 1370 inheritchecker=self._wlockchecktransaction,
1365 1371 parentenvvar='HG_WLOCK_LOCKER')
1366 1372 self._wlockref = weakref.ref(l)
1367 1373 return l
1368 1374
1369 1375 def _currentlock(self, lockref):
1370 1376 """Returns the lock if it's held, or None if it's not."""
1371 1377 if lockref is None:
1372 1378 return None
1373 1379 l = lockref()
1374 1380 if l is None or not l.held:
1375 1381 return None
1376 1382 return l
1377 1383
1378 1384 def currentwlock(self):
1379 1385 """Returns the wlock if it's held, or None if it's not."""
1380 1386 return self._currentlock(self._wlockref)
1381 1387
1382 1388 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1383 1389 """
1384 1390 commit an individual file as part of a larger transaction
1385 1391 """
1386 1392
1387 1393 fname = fctx.path()
1388 1394 fparent1 = manifest1.get(fname, nullid)
1389 1395 fparent2 = manifest2.get(fname, nullid)
1390 1396 if isinstance(fctx, context.filectx):
1391 1397 node = fctx.filenode()
1392 1398 if node in [fparent1, fparent2]:
1393 1399 self.ui.debug('reusing %s filelog entry\n' % fname)
1394 1400 if manifest1.flags(fname) != fctx.flags():
1395 1401 changelist.append(fname)
1396 1402 return node
1397 1403
1398 1404 flog = self.file(fname)
1399 1405 meta = {}
1400 1406 copy = fctx.renamed()
1401 1407 if copy and copy[0] != fname:
1402 1408 # Mark the new revision of this file as a copy of another
1403 1409 # file. This copy data will effectively act as a parent
1404 1410 # of this new revision. If this is a merge, the first
1405 1411 # parent will be the nullid (meaning "look up the copy data")
1406 1412 # and the second one will be the other parent. For example:
1407 1413 #
1408 1414 # 0 --- 1 --- 3 rev1 changes file foo
1409 1415 # \ / rev2 renames foo to bar and changes it
1410 1416 # \- 2 -/ rev3 should have bar with all changes and
1411 1417 # should record that bar descends from
1412 1418 # bar in rev2 and foo in rev1
1413 1419 #
1414 1420 # this allows this merge to succeed:
1415 1421 #
1416 1422 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1417 1423 # \ / merging rev3 and rev4 should use bar@rev2
1418 1424 # \- 2 --- 4 as the merge base
1419 1425 #
1420 1426
1421 1427 cfname = copy[0]
1422 1428 crev = manifest1.get(cfname)
1423 1429 newfparent = fparent2
1424 1430
1425 1431 if manifest2: # branch merge
1426 1432 if fparent2 == nullid or crev is None: # copied on remote side
1427 1433 if cfname in manifest2:
1428 1434 crev = manifest2[cfname]
1429 1435 newfparent = fparent1
1430 1436
1431 1437 # Here, we used to search backwards through history to try to find
1432 1438 # where the file copy came from if the source of a copy was not in
1433 1439 # the parent directory. However, this doesn't actually make sense to
1434 1440 # do (what does a copy from something not in your working copy even
1435 1441 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1436 1442 # the user that copy information was dropped, so if they didn't
1437 1443 # expect this outcome it can be fixed, but this is the correct
1438 1444 # behavior in this circumstance.
1439 1445
1440 1446 if crev:
1441 1447 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1442 1448 meta["copy"] = cfname
1443 1449 meta["copyrev"] = hex(crev)
1444 1450 fparent1, fparent2 = nullid, newfparent
1445 1451 else:
1446 1452 self.ui.warn(_("warning: can't find ancestor for '%s' "
1447 1453 "copied from '%s'!\n") % (fname, cfname))
1448 1454
1449 1455 elif fparent1 == nullid:
1450 1456 fparent1, fparent2 = fparent2, nullid
1451 1457 elif fparent2 != nullid:
1452 1458 # is one parent an ancestor of the other?
1453 1459 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1454 1460 if fparent1 in fparentancestors:
1455 1461 fparent1, fparent2 = fparent2, nullid
1456 1462 elif fparent2 in fparentancestors:
1457 1463 fparent2 = nullid
1458 1464
1459 1465 # is the file changed?
1460 1466 text = fctx.data()
1461 1467 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1462 1468 changelist.append(fname)
1463 1469 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1464 1470 # are just the flags changed during merge?
1465 1471 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1466 1472 changelist.append(fname)
1467 1473
1468 1474 return fparent1
1469 1475
1470 1476 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1471 1477 """check for commit arguments that aren't commitable"""
1472 1478 if match.isexact() or match.prefix():
1473 1479 matched = set(status.modified + status.added + status.removed)
1474 1480
1475 1481 for f in match.files():
1476 1482 f = self.dirstate.normalize(f)
1477 1483 if f == '.' or f in matched or f in wctx.substate:
1478 1484 continue
1479 1485 if f in status.deleted:
1480 1486 fail(f, _('file not found!'))
1481 1487 if f in vdirs: # visited directory
1482 1488 d = f + '/'
1483 1489 for mf in matched:
1484 1490 if mf.startswith(d):
1485 1491 break
1486 1492 else:
1487 1493 fail(f, _("no match under directory!"))
1488 1494 elif f not in self.dirstate:
1489 1495 fail(f, _("file not tracked!"))
1490 1496
1491 1497 @unfilteredmethod
1492 1498 def commit(self, text="", user=None, date=None, match=None, force=False,
1493 1499 editor=False, extra=None):
1494 1500 """Add a new revision to current repository.
1495 1501
1496 1502 Revision information is gathered from the working directory,
1497 1503 match can be used to filter the committed files. If editor is
1498 1504 supplied, it is called to get a commit message.
1499 1505 """
1500 1506 if extra is None:
1501 1507 extra = {}
1502 1508
1503 1509 def fail(f, msg):
1504 1510 raise error.Abort('%s: %s' % (f, msg))
1505 1511
1506 1512 if not match:
1507 1513 match = matchmod.always(self.root, '')
1508 1514
1509 1515 if not force:
1510 1516 vdirs = []
1511 1517 match.explicitdir = vdirs.append
1512 1518 match.bad = fail
1513 1519
1514 1520 wlock = lock = tr = None
1515 1521 try:
1516 1522 wlock = self.wlock()
1517 1523 lock = self.lock() # for recent changelog (see issue4368)
1518 1524
1519 1525 wctx = self[None]
1520 1526 merge = len(wctx.parents()) > 1
1521 1527
1522 1528 if not force and merge and match.ispartial():
1523 1529 raise error.Abort(_('cannot partially commit a merge '
1524 1530 '(do not specify files or patterns)'))
1525 1531
1526 1532 status = self.status(match=match, clean=force)
1527 1533 if force:
1528 1534 status.modified.extend(status.clean) # mq may commit clean files
1529 1535
1530 1536 # check subrepos
1531 1537 subs = []
1532 1538 commitsubs = set()
1533 1539 newstate = wctx.substate.copy()
1534 1540 # only manage subrepos and .hgsubstate if .hgsub is present
1535 1541 if '.hgsub' in wctx:
1536 1542 # we'll decide whether to track this ourselves, thanks
1537 1543 for c in status.modified, status.added, status.removed:
1538 1544 if '.hgsubstate' in c:
1539 1545 c.remove('.hgsubstate')
1540 1546
1541 1547 # compare current state to last committed state
1542 1548 # build new substate based on last committed state
1543 1549 oldstate = wctx.p1().substate
1544 1550 for s in sorted(newstate.keys()):
1545 1551 if not match(s):
1546 1552 # ignore working copy, use old state if present
1547 1553 if s in oldstate:
1548 1554 newstate[s] = oldstate[s]
1549 1555 continue
1550 1556 if not force:
1551 1557 raise error.Abort(
1552 1558 _("commit with new subrepo %s excluded") % s)
1553 1559 dirtyreason = wctx.sub(s).dirtyreason(True)
1554 1560 if dirtyreason:
1555 1561 if not self.ui.configbool('ui', 'commitsubrepos'):
1556 1562 raise error.Abort(dirtyreason,
1557 1563 hint=_("use --subrepos for recursive commit"))
1558 1564 subs.append(s)
1559 1565 commitsubs.add(s)
1560 1566 else:
1561 1567 bs = wctx.sub(s).basestate()
1562 1568 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1563 1569 if oldstate.get(s, (None, None, None))[1] != bs:
1564 1570 subs.append(s)
1565 1571
1566 1572 # check for removed subrepos
1567 1573 for p in wctx.parents():
1568 1574 r = [s for s in p.substate if s not in newstate]
1569 1575 subs += [s for s in r if match(s)]
1570 1576 if subs:
1571 1577 if (not match('.hgsub') and
1572 1578 '.hgsub' in (wctx.modified() + wctx.added())):
1573 1579 raise error.Abort(
1574 1580 _("can't commit subrepos without .hgsub"))
1575 1581 status.modified.insert(0, '.hgsubstate')
1576 1582
1577 1583 elif '.hgsub' in status.removed:
1578 1584 # clean up .hgsubstate when .hgsub is removed
1579 1585 if ('.hgsubstate' in wctx and
1580 1586 '.hgsubstate' not in (status.modified + status.added +
1581 1587 status.removed)):
1582 1588 status.removed.insert(0, '.hgsubstate')
1583 1589
1584 1590 # make sure all explicit patterns are matched
1585 1591 if not force:
1586 1592 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1587 1593
1588 1594 cctx = context.workingcommitctx(self, status,
1589 1595 text, user, date, extra)
1590 1596
1591 1597 # internal config: ui.allowemptycommit
1592 1598 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1593 1599 or extra.get('close') or merge or cctx.files()
1594 1600 or self.ui.configbool('ui', 'allowemptycommit'))
1595 1601 if not allowemptycommit:
1596 1602 return None
1597 1603
1598 1604 if merge and cctx.deleted():
1599 1605 raise error.Abort(_("cannot commit merge with missing files"))
1600 1606
1601 1607 ms = mergemod.mergestate.read(self)
1602 1608
1603 1609 if list(ms.unresolved()):
1604 1610 raise error.Abort(_('unresolved merge conflicts '
1605 1611 '(see "hg help resolve")'))
1606 1612 if ms.mdstate() != 's' or list(ms.driverresolved()):
1607 1613 raise error.Abort(_('driver-resolved merge conflicts'),
1608 1614 hint=_('run "hg resolve --all" to resolve'))
1609 1615
1610 1616 if editor:
1611 1617 cctx._text = editor(self, cctx, subs)
1612 1618 edited = (text != cctx._text)
1613 1619
1614 1620 # Save commit message in case this transaction gets rolled back
1615 1621 # (e.g. by a pretxncommit hook). Leave the content alone on
1616 1622 # the assumption that the user will use the same editor again.
1617 1623 msgfn = self.savecommitmessage(cctx._text)
1618 1624
1619 1625 # commit subs and write new state
1620 1626 if subs:
1621 1627 for s in sorted(commitsubs):
1622 1628 sub = wctx.sub(s)
1623 1629 self.ui.status(_('committing subrepository %s\n') %
1624 1630 subrepo.subrelpath(sub))
1625 1631 sr = sub.commit(cctx._text, user, date)
1626 1632 newstate[s] = (newstate[s][0], sr)
1627 1633 subrepo.writestate(self, newstate)
1628 1634
1629 1635 p1, p2 = self.dirstate.parents()
1630 1636 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1631 1637 try:
1632 1638 self.hook("precommit", throw=True, parent1=hookp1,
1633 1639 parent2=hookp2)
1634 1640 tr = self.transaction('commit')
1635 1641 ret = self.commitctx(cctx, True)
1636 1642 except: # re-raises
1637 1643 if edited:
1638 1644 self.ui.write(
1639 1645 _('note: commit message saved in %s\n') % msgfn)
1640 1646 raise
1641 1647 # update bookmarks, dirstate and mergestate
1642 1648 bookmarks.update(self, [p1, p2], ret)
1643 1649 cctx.markcommitted(ret)
1644 1650 ms.reset()
1645 1651 tr.close()
1646 1652
1647 1653 finally:
1648 1654 lockmod.release(tr, lock, wlock)
1649 1655
1650 1656 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1651 1657 # hack for command that use a temporary commit (eg: histedit)
1652 1658 # temporary commit got stripped before hook release
1653 1659 if self.changelog.hasnode(ret):
1654 1660 self.hook("commit", node=node, parent1=parent1,
1655 1661 parent2=parent2)
1656 1662 self._afterlock(commithook)
1657 1663 return ret
1658 1664
1659 1665 @unfilteredmethod
1660 1666 def commitctx(self, ctx, error=False):
1661 1667 """Add a new revision to current repository.
1662 1668 Revision information is passed via the context argument.
1663 1669 """
1664 1670
1665 1671 tr = None
1666 1672 p1, p2 = ctx.p1(), ctx.p2()
1667 1673 user = ctx.user()
1668 1674
1669 1675 lock = self.lock()
1670 1676 try:
1671 1677 tr = self.transaction("commit")
1672 1678 trp = weakref.proxy(tr)
1673 1679
1674 1680 if ctx.files():
1675 1681 m1 = p1.manifest()
1676 1682 m2 = p2.manifest()
1677 1683 m = m1.copy()
1678 1684
1679 1685 # check in files
1680 1686 added = []
1681 1687 changed = []
1682 1688 removed = list(ctx.removed())
1683 1689 linkrev = len(self)
1684 1690 self.ui.note(_("committing files:\n"))
1685 1691 for f in sorted(ctx.modified() + ctx.added()):
1686 1692 self.ui.note(f + "\n")
1687 1693 try:
1688 1694 fctx = ctx[f]
1689 1695 if fctx is None:
1690 1696 removed.append(f)
1691 1697 else:
1692 1698 added.append(f)
1693 1699 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1694 1700 trp, changed)
1695 1701 m.setflag(f, fctx.flags())
1696 1702 except OSError as inst:
1697 1703 self.ui.warn(_("trouble committing %s!\n") % f)
1698 1704 raise
1699 1705 except IOError as inst:
1700 1706 errcode = getattr(inst, 'errno', errno.ENOENT)
1701 1707 if error or errcode and errcode != errno.ENOENT:
1702 1708 self.ui.warn(_("trouble committing %s!\n") % f)
1703 1709 raise
1704 1710
1705 1711 # update manifest
1706 1712 self.ui.note(_("committing manifest\n"))
1707 1713 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1708 1714 drop = [f for f in removed if f in m]
1709 1715 for f in drop:
1710 1716 del m[f]
1711 1717 mn = self.manifest.add(m, trp, linkrev,
1712 1718 p1.manifestnode(), p2.manifestnode(),
1713 1719 added, drop)
1714 1720 files = changed + removed
1715 1721 else:
1716 1722 mn = p1.manifestnode()
1717 1723 files = []
1718 1724
1719 1725 # update changelog
1720 1726 self.ui.note(_("committing changelog\n"))
1721 1727 self.changelog.delayupdate(tr)
1722 1728 n = self.changelog.add(mn, files, ctx.description(),
1723 1729 trp, p1.node(), p2.node(),
1724 1730 user, ctx.date(), ctx.extra().copy())
1725 1731 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1726 1732 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1727 1733 parent2=xp2)
1728 1734 # set the new commit is proper phase
1729 1735 targetphase = subrepo.newcommitphase(self.ui, ctx)
1730 1736 if targetphase:
1731 1737 # retract boundary do not alter parent changeset.
1732 1738 # if a parent have higher the resulting phase will
1733 1739 # be compliant anyway
1734 1740 #
1735 1741 # if minimal phase was 0 we don't need to retract anything
1736 1742 phases.retractboundary(self, tr, targetphase, [n])
1737 1743 tr.close()
1738 1744 branchmap.updatecache(self.filtered('served'))
1739 1745 return n
1740 1746 finally:
1741 1747 if tr:
1742 1748 tr.release()
1743 1749 lock.release()
1744 1750
1745 1751 @unfilteredmethod
1746 1752 def destroying(self):
1747 1753 '''Inform the repository that nodes are about to be destroyed.
1748 1754 Intended for use by strip and rollback, so there's a common
1749 1755 place for anything that has to be done before destroying history.
1750 1756
1751 1757 This is mostly useful for saving state that is in memory and waiting
1752 1758 to be flushed when the current lock is released. Because a call to
1753 1759 destroyed is imminent, the repo will be invalidated causing those
1754 1760 changes to stay in memory (waiting for the next unlock), or vanish
1755 1761 completely.
1756 1762 '''
1757 1763 # When using the same lock to commit and strip, the phasecache is left
1758 1764 # dirty after committing. Then when we strip, the repo is invalidated,
1759 1765 # causing those changes to disappear.
1760 1766 if '_phasecache' in vars(self):
1761 1767 self._phasecache.write()
1762 1768
1763 1769 @unfilteredmethod
1764 1770 def destroyed(self):
1765 1771 '''Inform the repository that nodes have been destroyed.
1766 1772 Intended for use by strip and rollback, so there's a common
1767 1773 place for anything that has to be done after destroying history.
1768 1774 '''
1769 1775 # When one tries to:
1770 1776 # 1) destroy nodes thus calling this method (e.g. strip)
1771 1777 # 2) use phasecache somewhere (e.g. commit)
1772 1778 #
1773 1779 # then 2) will fail because the phasecache contains nodes that were
1774 1780 # removed. We can either remove phasecache from the filecache,
1775 1781 # causing it to reload next time it is accessed, or simply filter
1776 1782 # the removed nodes now and write the updated cache.
1777 1783 self._phasecache.filterunknown(self)
1778 1784 self._phasecache.write()
1779 1785
1780 1786 # update the 'served' branch cache to help read only server process
1781 1787 # Thanks to branchcache collaboration this is done from the nearest
1782 1788 # filtered subset and it is expected to be fast.
1783 1789 branchmap.updatecache(self.filtered('served'))
1784 1790
1785 1791 # Ensure the persistent tag cache is updated. Doing it now
1786 1792 # means that the tag cache only has to worry about destroyed
1787 1793 # heads immediately after a strip/rollback. That in turn
1788 1794 # guarantees that "cachetip == currenttip" (comparing both rev
1789 1795 # and node) always means no nodes have been added or destroyed.
1790 1796
1791 1797 # XXX this is suboptimal when qrefresh'ing: we strip the current
1792 1798 # head, refresh the tag cache, then immediately add a new head.
1793 1799 # But I think doing it this way is necessary for the "instant
1794 1800 # tag cache retrieval" case to work.
1795 1801 self.invalidate()
1796 1802
1797 1803 def walk(self, match, node=None):
1798 1804 '''
1799 1805 walk recursively through the directory tree or a given
1800 1806 changeset, finding all files matched by the match
1801 1807 function
1802 1808 '''
1803 1809 return self[node].walk(match)
1804 1810
1805 1811 def status(self, node1='.', node2=None, match=None,
1806 1812 ignored=False, clean=False, unknown=False,
1807 1813 listsubrepos=False):
1808 1814 '''a convenience method that calls node1.status(node2)'''
1809 1815 return self[node1].status(node2, match, ignored, clean, unknown,
1810 1816 listsubrepos)
1811 1817
1812 1818 def heads(self, start=None):
1813 1819 heads = self.changelog.heads(start)
1814 1820 # sort the output in rev descending order
1815 1821 return sorted(heads, key=self.changelog.rev, reverse=True)
1816 1822
1817 1823 def branchheads(self, branch=None, start=None, closed=False):
1818 1824 '''return a (possibly filtered) list of heads for the given branch
1819 1825
1820 1826 Heads are returned in topological order, from newest to oldest.
1821 1827 If branch is None, use the dirstate branch.
1822 1828 If start is not None, return only heads reachable from start.
1823 1829 If closed is True, return heads that are marked as closed as well.
1824 1830 '''
1825 1831 if branch is None:
1826 1832 branch = self[None].branch()
1827 1833 branches = self.branchmap()
1828 1834 if branch not in branches:
1829 1835 return []
1830 1836 # the cache returns heads ordered lowest to highest
1831 1837 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1832 1838 if start is not None:
1833 1839 # filter out the heads that cannot be reached from startrev
1834 1840 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1835 1841 bheads = [h for h in bheads if h in fbheads]
1836 1842 return bheads
1837 1843
1838 1844 def branches(self, nodes):
1839 1845 if not nodes:
1840 1846 nodes = [self.changelog.tip()]
1841 1847 b = []
1842 1848 for n in nodes:
1843 1849 t = n
1844 1850 while True:
1845 1851 p = self.changelog.parents(n)
1846 1852 if p[1] != nullid or p[0] == nullid:
1847 1853 b.append((t, n, p[0], p[1]))
1848 1854 break
1849 1855 n = p[0]
1850 1856 return b
1851 1857
1852 1858 def between(self, pairs):
1853 1859 r = []
1854 1860
1855 1861 for top, bottom in pairs:
1856 1862 n, l, i = top, [], 0
1857 1863 f = 1
1858 1864
1859 1865 while n != bottom and n != nullid:
1860 1866 p = self.changelog.parents(n)[0]
1861 1867 if i == f:
1862 1868 l.append(n)
1863 1869 f = f * 2
1864 1870 n = p
1865 1871 i += 1
1866 1872
1867 1873 r.append(l)
1868 1874
1869 1875 return r
1870 1876
1871 1877 def checkpush(self, pushop):
1872 1878 """Extensions can override this function if additional checks have
1873 1879 to be performed before pushing, or call it if they override push
1874 1880 command.
1875 1881 """
1876 1882 pass
1877 1883
1878 1884 @unfilteredpropertycache
1879 1885 def prepushoutgoinghooks(self):
1880 1886 """Return util.hooks consists of a pushop with repo, remote, outgoing
1881 1887 methods, which are called before pushing changesets.
1882 1888 """
1883 1889 return util.hooks()
1884 1890
1885 1891 def pushkey(self, namespace, key, old, new):
1886 1892 try:
1887 1893 tr = self.currenttransaction()
1888 1894 hookargs = {}
1889 1895 if tr is not None:
1890 1896 hookargs.update(tr.hookargs)
1891 1897 hookargs['namespace'] = namespace
1892 1898 hookargs['key'] = key
1893 1899 hookargs['old'] = old
1894 1900 hookargs['new'] = new
1895 1901 self.hook('prepushkey', throw=True, **hookargs)
1896 1902 except error.HookAbort as exc:
1897 1903 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1898 1904 if exc.hint:
1899 1905 self.ui.write_err(_("(%s)\n") % exc.hint)
1900 1906 return False
1901 1907 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1902 1908 ret = pushkey.push(self, namespace, key, old, new)
1903 1909 def runhook():
1904 1910 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1905 1911 ret=ret)
1906 1912 self._afterlock(runhook)
1907 1913 return ret
1908 1914
1909 1915 def listkeys(self, namespace):
1910 1916 self.hook('prelistkeys', throw=True, namespace=namespace)
1911 1917 self.ui.debug('listing keys for "%s"\n' % namespace)
1912 1918 values = pushkey.list(self, namespace)
1913 1919 self.hook('listkeys', namespace=namespace, values=values)
1914 1920 return values
1915 1921
1916 1922 def debugwireargs(self, one, two, three=None, four=None, five=None):
1917 1923 '''used to test argument passing over the wire'''
1918 1924 return "%s %s %s %s %s" % (one, two, three, four, five)
1919 1925
1920 1926 def savecommitmessage(self, text):
1921 1927 fp = self.vfs('last-message.txt', 'wb')
1922 1928 try:
1923 1929 fp.write(text)
1924 1930 finally:
1925 1931 fp.close()
1926 1932 return self.pathto(fp.name[len(self.root) + 1:])
1927 1933
1928 1934 # used to avoid circular references so destructors work
1929 1935 def aftertrans(files):
1930 1936 renamefiles = [tuple(t) for t in files]
1931 1937 def a():
1932 1938 for vfs, src, dest in renamefiles:
1933 1939 try:
1934 1940 vfs.rename(src, dest)
1935 1941 except OSError: # journal file does not yet exist
1936 1942 pass
1937 1943 return a
1938 1944
1939 1945 def undoname(fn):
1940 1946 base, name = os.path.split(fn)
1941 1947 assert name.startswith('journal')
1942 1948 return os.path.join(base, name.replace('journal', 'undo', 1))
1943 1949
1944 1950 def instance(ui, path, create):
1945 1951 return localrepository(ui, util.urllocalpath(path), create)
1946 1952
1947 1953 def islocal(path):
1948 1954 return True
1949 1955
1950 1956 def newreporequirements(repo):
1951 1957 """Determine the set of requirements for a new local repository.
1952 1958
1953 1959 Extensions can wrap this function to specify custom requirements for
1954 1960 new repositories.
1955 1961 """
1956 1962 ui = repo.ui
1957 1963 requirements = set(['revlogv1'])
1958 1964 if ui.configbool('format', 'usestore', True):
1959 1965 requirements.add('store')
1960 1966 if ui.configbool('format', 'usefncache', True):
1961 1967 requirements.add('fncache')
1962 1968 if ui.configbool('format', 'dotencode', True):
1963 1969 requirements.add('dotencode')
1964 1970
1965 1971 if scmutil.gdinitconfig(ui):
1966 1972 requirements.add('generaldelta')
1967 1973 if ui.configbool('experimental', 'treemanifest', False):
1968 1974 requirements.add('treemanifest')
1969 1975 if ui.configbool('experimental', 'manifestv2', False):
1970 1976 requirements.add('manifestv2')
1971 1977
1972 1978 return requirements
@@ -1,1404 +1,1423 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import glob
13 13 import hashlib
14 14 import os
15 15 import re
16 16 import shutil
17 17 import stat
18 18 import tempfile
19 19 import threading
20 20
21 21 from .i18n import _
22 22 from .node import wdirrev
23 23 from . import (
24 24 encoding,
25 25 error,
26 26 match as matchmod,
27 27 osutil,
28 28 pathutil,
29 29 phases,
30 30 revset,
31 31 similar,
32 32 util,
33 33 )
34 34
35 35 if os.name == 'nt':
36 36 from . import scmwindows as scmplatform
37 37 else:
38 38 from . import scmposix as scmplatform
39 39
40 40 systemrcpath = scmplatform.systemrcpath
41 41 userrcpath = scmplatform.userrcpath
42 42
43 43 class status(tuple):
44 44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 45 and 'ignored' properties are only relevant to the working copy.
46 46 '''
47 47
48 48 __slots__ = ()
49 49
50 50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 51 clean):
52 52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 53 ignored, clean))
54 54
55 55 @property
56 56 def modified(self):
57 57 '''files that have been modified'''
58 58 return self[0]
59 59
60 60 @property
61 61 def added(self):
62 62 '''files that have been added'''
63 63 return self[1]
64 64
65 65 @property
66 66 def removed(self):
67 67 '''files that have been removed'''
68 68 return self[2]
69 69
70 70 @property
71 71 def deleted(self):
72 72 '''files that are in the dirstate, but have been deleted from the
73 73 working copy (aka "missing")
74 74 '''
75 75 return self[3]
76 76
77 77 @property
78 78 def unknown(self):
79 79 '''files not in the dirstate that are not ignored'''
80 80 return self[4]
81 81
82 82 @property
83 83 def ignored(self):
84 84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 85 return self[5]
86 86
87 87 @property
88 88 def clean(self):
89 89 '''files that have not been modified'''
90 90 return self[6]
91 91
92 92 def __repr__(self, *args, **kwargs):
93 93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 94 'unknown=%r, ignored=%r, clean=%r>') % self)
95 95
96 96 def itersubrepos(ctx1, ctx2):
97 97 """find subrepos in ctx1 or ctx2"""
98 98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 103
104 104 missing = set()
105 105
106 106 for subpath in ctx2.substate:
107 107 if subpath not in ctx1.substate:
108 108 del subpaths[subpath]
109 109 missing.add(subpath)
110 110
111 111 for subpath, ctx in sorted(subpaths.iteritems()):
112 112 yield subpath, ctx.sub(subpath)
113 113
114 114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 115 # status and diff will have an accurate result when it does
116 116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 117 # against itself.
118 118 for subpath in missing:
119 119 yield subpath, ctx2.nullsub(subpath, ctx1)
120 120
121 121 def nochangesfound(ui, repo, excluded=None):
122 122 '''Report no changes for push/pull, excluded is None or a list of
123 123 nodes excluded from the push/pull.
124 124 '''
125 125 secretlist = []
126 126 if excluded:
127 127 for n in excluded:
128 128 if n not in repo:
129 129 # discovery should not have included the filtered revision,
130 130 # we have to explicitly exclude it until discovery is cleanup.
131 131 continue
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def checknewlabel(repo, lbl, kind):
143 143 # Do not use the "kind" parameter in ui output.
144 144 # It makes strings difficult to translate.
145 145 if lbl in ['tip', '.', 'null']:
146 146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 147 for c in (':', '\0', '\n', '\r'):
148 148 if c in lbl:
149 149 raise error.Abort(_("%r cannot be used in a name") % c)
150 150 try:
151 151 int(lbl)
152 152 raise error.Abort(_("cannot use an integer as a name"))
153 153 except ValueError:
154 154 pass
155 155
156 156 def checkfilename(f):
157 157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 158 if '\r' in f or '\n' in f:
159 159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160 160
161 161 def checkportable(ui, f):
162 162 '''Check if filename f is portable and warn or abort depending on config'''
163 163 checkfilename(f)
164 164 abort, warn = checkportabilityalert(ui)
165 165 if abort or warn:
166 166 msg = util.checkwinfilename(f)
167 167 if msg:
168 168 msg = "%s: %r" % (msg, f)
169 169 if abort:
170 170 raise error.Abort(msg)
171 171 ui.warn(_("warning: %s\n") % msg)
172 172
173 173 def checkportabilityalert(ui):
174 174 '''check if the user's config requests nothing, a warning, or abort for
175 175 non-portable filenames'''
176 176 val = ui.config('ui', 'portablefilenames', 'warn')
177 177 lval = val.lower()
178 178 bval = util.parsebool(val)
179 179 abort = os.name == 'nt' or lval == 'abort'
180 180 warn = bval or lval == 'warn'
181 181 if bval is None and not (warn or abort or lval == 'ignore'):
182 182 raise error.ConfigError(
183 183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 184 return abort, warn
185 185
186 186 class casecollisionauditor(object):
187 187 def __init__(self, ui, abort, dirstate):
188 188 self._ui = ui
189 189 self._abort = abort
190 190 allfiles = '\0'.join(dirstate._map)
191 191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 192 self._dirstate = dirstate
193 193 # The purpose of _newfiles is so that we don't complain about
194 194 # case collisions if someone were to call this object with the
195 195 # same filename twice.
196 196 self._newfiles = set()
197 197
198 198 def __call__(self, f):
199 199 if f in self._newfiles:
200 200 return
201 201 fl = encoding.lower(f)
202 202 if fl in self._loweredfiles and f not in self._dirstate:
203 203 msg = _('possible case-folding collision for %s') % f
204 204 if self._abort:
205 205 raise error.Abort(msg)
206 206 self._ui.warn(_("warning: %s\n") % msg)
207 207 self._loweredfiles.add(fl)
208 208 self._newfiles.add(f)
209 209
210 210 def filteredhash(repo, maxrev):
211 211 """build hash of filtered revisions in the current repoview.
212 212
213 213 Multiple caches perform up-to-date validation by checking that the
214 214 tiprev and tipnode stored in the cache file match the current repository.
215 215 However, this is not sufficient for validating repoviews because the set
216 216 of revisions in the view may change without the repository tiprev and
217 217 tipnode changing.
218 218
219 219 This function hashes all the revs filtered from the view and returns
220 220 that SHA-1 digest.
221 221 """
222 222 cl = repo.changelog
223 223 if not cl.filteredrevs:
224 224 return None
225 225 key = None
226 226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 227 if revs:
228 228 s = hashlib.sha1()
229 229 for rev in revs:
230 230 s.update('%s;' % rev)
231 231 key = s.digest()
232 232 return key
233 233
234 234 class abstractvfs(object):
235 235 """Abstract base class; cannot be instantiated"""
236 236
237 237 def __init__(self, *args, **kwargs):
238 238 '''Prevent instantiation; don't call this from subclasses.'''
239 239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240 240
241 241 def tryread(self, path):
242 242 '''gracefully return an empty string for missing files'''
243 243 try:
244 244 return self.read(path)
245 245 except IOError as inst:
246 246 if inst.errno != errno.ENOENT:
247 247 raise
248 248 return ""
249 249
250 250 def tryreadlines(self, path, mode='rb'):
251 251 '''gracefully return an empty array for missing files'''
252 252 try:
253 253 return self.readlines(path, mode=mode)
254 254 except IOError as inst:
255 255 if inst.errno != errno.ENOENT:
256 256 raise
257 257 return []
258 258
259 259 def open(self, path, mode="r", text=False, atomictemp=False,
260 260 notindexed=False, backgroundclose=False):
261 261 '''Open ``path`` file, which is relative to vfs root.
262 262
263 263 Newly created directories are marked as "not to be indexed by
264 264 the content indexing service", if ``notindexed`` is specified
265 265 for "write" mode access.
266 266 '''
267 267 self.open = self.__call__
268 268 return self.__call__(path, mode, text, atomictemp, notindexed,
269 269 backgroundclose=backgroundclose)
270 270
271 271 def read(self, path):
272 272 with self(path, 'rb') as fp:
273 273 return fp.read()
274 274
275 275 def readlines(self, path, mode='rb'):
276 276 with self(path, mode=mode) as fp:
277 277 return fp.readlines()
278 278
279 279 def write(self, path, data, backgroundclose=False):
280 280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
281 281 return fp.write(data)
282 282
283 283 def writelines(self, path, data, mode='wb', notindexed=False):
284 284 with self(path, mode=mode, notindexed=notindexed) as fp:
285 285 return fp.writelines(data)
286 286
287 287 def append(self, path, data):
288 288 with self(path, 'ab') as fp:
289 289 return fp.write(data)
290 290
291 291 def basename(self, path):
292 292 """return base element of a path (as os.path.basename would do)
293 293
294 294 This exists to allow handling of strange encoding if needed."""
295 295 return os.path.basename(path)
296 296
297 297 def chmod(self, path, mode):
298 298 return os.chmod(self.join(path), mode)
299 299
300 300 def dirname(self, path):
301 301 """return dirname element of a path (as os.path.dirname would do)
302 302
303 303 This exists to allow handling of strange encoding if needed."""
304 304 return os.path.dirname(path)
305 305
306 306 def exists(self, path=None):
307 307 return os.path.exists(self.join(path))
308 308
309 309 def fstat(self, fp):
310 310 return util.fstat(fp)
311 311
312 312 def isdir(self, path=None):
313 313 return os.path.isdir(self.join(path))
314 314
315 315 def isfile(self, path=None):
316 316 return os.path.isfile(self.join(path))
317 317
318 318 def islink(self, path=None):
319 319 return os.path.islink(self.join(path))
320 320
321 321 def isfileorlink(self, path=None):
322 322 '''return whether path is a regular file or a symlink
323 323
324 324 Unlike isfile, this doesn't follow symlinks.'''
325 325 try:
326 326 st = self.lstat(path)
327 327 except OSError:
328 328 return False
329 329 mode = st.st_mode
330 330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
331 331
332 332 def reljoin(self, *paths):
333 333 """join various elements of a path together (as os.path.join would do)
334 334
335 335 The vfs base is not injected so that path stay relative. This exists
336 336 to allow handling of strange encoding if needed."""
337 337 return os.path.join(*paths)
338 338
339 339 def split(self, path):
340 340 """split top-most element of a path (as os.path.split would do)
341 341
342 342 This exists to allow handling of strange encoding if needed."""
343 343 return os.path.split(path)
344 344
345 345 def lexists(self, path=None):
346 346 return os.path.lexists(self.join(path))
347 347
348 348 def lstat(self, path=None):
349 349 return os.lstat(self.join(path))
350 350
351 351 def listdir(self, path=None):
352 352 return os.listdir(self.join(path))
353 353
354 354 def makedir(self, path=None, notindexed=True):
355 355 return util.makedir(self.join(path), notindexed)
356 356
357 357 def makedirs(self, path=None, mode=None):
358 358 return util.makedirs(self.join(path), mode)
359 359
360 360 def makelock(self, info, path):
361 361 return util.makelock(info, self.join(path))
362 362
363 363 def mkdir(self, path=None):
364 364 return os.mkdir(self.join(path))
365 365
366 366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
367 367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
368 368 dir=self.join(dir), text=text)
369 369 dname, fname = util.split(name)
370 370 if dir:
371 371 return fd, os.path.join(dir, fname)
372 372 else:
373 373 return fd, fname
374 374
375 375 def readdir(self, path=None, stat=None, skip=None):
376 376 return osutil.listdir(self.join(path), stat, skip)
377 377
378 378 def readlock(self, path):
379 379 return util.readlock(self.join(path))
380 380
381 381 def rename(self, src, dst, checkambig=False):
382 382 """Rename from src to dst
383 383
384 384 checkambig argument is used with util.filestat, and is useful
385 385 only if destination file is guarded by any lock
386 386 (e.g. repo.lock or repo.wlock).
387 387 """
388 388 dstpath = self.join(dst)
389 389 oldstat = checkambig and util.filestat(dstpath)
390 390 if oldstat and oldstat.stat:
391 391 ret = util.rename(self.join(src), dstpath)
392 392 newstat = util.filestat(dstpath)
393 393 if newstat.isambig(oldstat):
394 394 # stat of renamed file is ambiguous to original one
395 395 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
396 396 os.utime(dstpath, (advanced, advanced))
397 397 return ret
398 398 return util.rename(self.join(src), dstpath)
399 399
400 400 def readlink(self, path):
401 401 return os.readlink(self.join(path))
402 402
403 403 def removedirs(self, path=None):
404 404 """Remove a leaf directory and all empty intermediate ones
405 405 """
406 406 return util.removedirs(self.join(path))
407 407
408 408 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
409 409 """Remove a directory tree recursively
410 410
411 411 If ``forcibly``, this tries to remove READ-ONLY files, too.
412 412 """
413 413 if forcibly:
414 414 def onerror(function, path, excinfo):
415 415 if function is not os.remove:
416 416 raise
417 417 # read-only files cannot be unlinked under Windows
418 418 s = os.stat(path)
419 419 if (s.st_mode & stat.S_IWRITE) != 0:
420 420 raise
421 421 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
422 422 os.remove(path)
423 423 else:
424 424 onerror = None
425 425 return shutil.rmtree(self.join(path),
426 426 ignore_errors=ignore_errors, onerror=onerror)
427 427
428 428 def setflags(self, path, l, x):
429 429 return util.setflags(self.join(path), l, x)
430 430
431 431 def stat(self, path=None):
432 432 return os.stat(self.join(path))
433 433
434 434 def unlink(self, path=None):
435 435 return util.unlink(self.join(path))
436 436
437 437 def unlinkpath(self, path=None, ignoremissing=False):
438 438 return util.unlinkpath(self.join(path), ignoremissing)
439 439
440 440 def utime(self, path=None, t=None):
441 441 return os.utime(self.join(path), t)
442 442
443 443 def walk(self, path=None, onerror=None):
444 444 """Yield (dirpath, dirs, files) tuple for each directories under path
445 445
446 446 ``dirpath`` is relative one from the root of this vfs. This
447 447 uses ``os.sep`` as path separator, even you specify POSIX
448 448 style ``path``.
449 449
450 450 "The root of this vfs" is represented as empty ``dirpath``.
451 451 """
452 452 root = os.path.normpath(self.join(None))
453 453 # when dirpath == root, dirpath[prefixlen:] becomes empty
454 454 # because len(dirpath) < prefixlen.
455 455 prefixlen = len(pathutil.normasprefix(root))
456 456 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
457 457 yield (dirpath[prefixlen:], dirs, files)
458 458
459 459 @contextlib.contextmanager
460 460 def backgroundclosing(self, ui, expectedcount=-1):
461 461 """Allow files to be closed asynchronously.
462 462
463 463 When this context manager is active, ``backgroundclose`` can be passed
464 464 to ``__call__``/``open`` to result in the file possibly being closed
465 465 asynchronously, on a background thread.
466 466 """
467 467 # This is an arbitrary restriction and could be changed if we ever
468 468 # have a use case.
469 469 vfs = getattr(self, 'vfs', self)
470 470 if getattr(vfs, '_backgroundfilecloser', None):
471 471 raise error.Abort(
472 472 _('can only have 1 active background file closer'))
473 473
474 474 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
475 475 try:
476 476 vfs._backgroundfilecloser = bfc
477 477 yield bfc
478 478 finally:
479 479 vfs._backgroundfilecloser = None
480 480
481 481 class vfs(abstractvfs):
482 482 '''Operate files relative to a base directory
483 483
484 484 This class is used to hide the details of COW semantics and
485 485 remote file access from higher level code.
486 486 '''
487 487 def __init__(self, base, audit=True, expandpath=False, realpath=False):
488 488 if expandpath:
489 489 base = util.expandpath(base)
490 490 if realpath:
491 491 base = os.path.realpath(base)
492 492 self.base = base
493 493 self.mustaudit = audit
494 494 self.createmode = None
495 495 self._trustnlink = None
496 496
497 497 @property
498 498 def mustaudit(self):
499 499 return self._audit
500 500
501 501 @mustaudit.setter
502 502 def mustaudit(self, onoff):
503 503 self._audit = onoff
504 504 if onoff:
505 505 self.audit = pathutil.pathauditor(self.base)
506 506 else:
507 507 self.audit = util.always
508 508
509 509 @util.propertycache
510 510 def _cansymlink(self):
511 511 return util.checklink(self.base)
512 512
513 513 @util.propertycache
514 514 def _chmod(self):
515 515 return util.checkexec(self.base)
516 516
517 517 def _fixfilemode(self, name):
518 518 if self.createmode is None or not self._chmod:
519 519 return
520 520 os.chmod(name, self.createmode & 0o666)
521 521
522 522 def __call__(self, path, mode="r", text=False, atomictemp=False,
523 523 notindexed=False, backgroundclose=False, checkambig=False):
524 524 '''Open ``path`` file, which is relative to vfs root.
525 525
526 526 Newly created directories are marked as "not to be indexed by
527 527 the content indexing service", if ``notindexed`` is specified
528 528 for "write" mode access.
529 529
530 530 If ``backgroundclose`` is passed, the file may be closed asynchronously.
531 531 It can only be used if the ``self.backgroundclosing()`` context manager
532 532 is active. This should only be specified if the following criteria hold:
533 533
534 534 1. There is a potential for writing thousands of files. Unless you
535 535 are writing thousands of files, the performance benefits of
536 536 asynchronously closing files is not realized.
537 537 2. Files are opened exactly once for the ``backgroundclosing``
538 538 active duration and are therefore free of race conditions between
539 539 closing a file on a background thread and reopening it. (If the
540 540 file were opened multiple times, there could be unflushed data
541 541 because the original file handle hasn't been flushed/closed yet.)
542 542
543 543 ``checkambig`` argument is passed to atomictemplfile (valid
544 544 only for writing), and is useful only if target file is
545 545 guarded by any lock (e.g. repo.lock or repo.wlock).
546 546 '''
547 547 if self._audit:
548 548 r = util.checkosfilename(path)
549 549 if r:
550 550 raise error.Abort("%s: %r" % (r, path))
551 551 self.audit(path)
552 552 f = self.join(path)
553 553
554 554 if not text and "b" not in mode:
555 555 mode += "b" # for that other OS
556 556
557 557 nlink = -1
558 558 if mode not in ('r', 'rb'):
559 559 dirname, basename = util.split(f)
560 560 # If basename is empty, then the path is malformed because it points
561 561 # to a directory. Let the posixfile() call below raise IOError.
562 562 if basename:
563 563 if atomictemp:
564 564 util.makedirs(dirname, self.createmode, notindexed)
565 565 return util.atomictempfile(f, mode, self.createmode,
566 566 checkambig=checkambig)
567 567 try:
568 568 if 'w' in mode:
569 569 util.unlink(f)
570 570 nlink = 0
571 571 else:
572 572 # nlinks() may behave differently for files on Windows
573 573 # shares if the file is open.
574 574 with util.posixfile(f):
575 575 nlink = util.nlinks(f)
576 576 if nlink < 1:
577 577 nlink = 2 # force mktempcopy (issue1922)
578 578 except (OSError, IOError) as e:
579 579 if e.errno != errno.ENOENT:
580 580 raise
581 581 nlink = 0
582 582 util.makedirs(dirname, self.createmode, notindexed)
583 583 if nlink > 0:
584 584 if self._trustnlink is None:
585 585 self._trustnlink = nlink > 1 or util.checknlink(f)
586 586 if nlink > 1 or not self._trustnlink:
587 587 util.rename(util.mktempcopy(f), f)
588 588 fp = util.posixfile(f, mode)
589 589 if nlink == 0:
590 590 self._fixfilemode(f)
591 591
592 592 if backgroundclose:
593 593 if not self._backgroundfilecloser:
594 594 raise error.Abort(_('backgroundclose can only be used when a '
595 595 'backgroundclosing context manager is active')
596 596 )
597 597
598 598 fp = delayclosedfile(fp, self._backgroundfilecloser)
599 599
600 600 return fp
601 601
602 602 def symlink(self, src, dst):
603 603 self.audit(dst)
604 604 linkname = self.join(dst)
605 605 try:
606 606 os.unlink(linkname)
607 607 except OSError:
608 608 pass
609 609
610 610 util.makedirs(os.path.dirname(linkname), self.createmode)
611 611
612 612 if self._cansymlink:
613 613 try:
614 614 os.symlink(src, linkname)
615 615 except OSError as err:
616 616 raise OSError(err.errno, _('could not symlink to %r: %s') %
617 617 (src, err.strerror), linkname)
618 618 else:
619 619 self.write(dst, src)
620 620
621 621 def join(self, path, *insidef):
622 622 if path:
623 623 return os.path.join(self.base, path, *insidef)
624 624 else:
625 625 return self.base
626 626
627 627 opener = vfs
628 628
629 629 class auditvfs(object):
630 630 def __init__(self, vfs):
631 631 self.vfs = vfs
632 632
633 633 @property
634 634 def mustaudit(self):
635 635 return self.vfs.mustaudit
636 636
637 637 @mustaudit.setter
638 638 def mustaudit(self, onoff):
639 639 self.vfs.mustaudit = onoff
640 640
641 641 class filtervfs(abstractvfs, auditvfs):
642 642 '''Wrapper vfs for filtering filenames with a function.'''
643 643
644 644 def __init__(self, vfs, filter):
645 645 auditvfs.__init__(self, vfs)
646 646 self._filter = filter
647 647
648 648 def __call__(self, path, *args, **kwargs):
649 649 return self.vfs(self._filter(path), *args, **kwargs)
650 650
651 651 def join(self, path, *insidef):
652 652 if path:
653 653 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
654 654 else:
655 655 return self.vfs.join(path)
656 656
657 657 filteropener = filtervfs
658 658
659 659 class readonlyvfs(abstractvfs, auditvfs):
660 660 '''Wrapper vfs preventing any writing.'''
661 661
662 662 def __init__(self, vfs):
663 663 auditvfs.__init__(self, vfs)
664 664
665 665 def __call__(self, path, mode='r', *args, **kw):
666 666 if mode not in ('r', 'rb'):
667 667 raise error.Abort(_('this vfs is read only'))
668 668 return self.vfs(path, mode, *args, **kw)
669 669
670 670 def join(self, path, *insidef):
671 671 return self.vfs.join(path, *insidef)
672 672
673 673 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
674 674 '''yield every hg repository under path, always recursively.
675 675 The recurse flag will only control recursion into repo working dirs'''
676 676 def errhandler(err):
677 677 if err.filename == path:
678 678 raise err
679 679 samestat = getattr(os.path, 'samestat', None)
680 680 if followsym and samestat is not None:
681 681 def adddir(dirlst, dirname):
682 682 match = False
683 683 dirstat = os.stat(dirname)
684 684 for lstdirstat in dirlst:
685 685 if samestat(dirstat, lstdirstat):
686 686 match = True
687 687 break
688 688 if not match:
689 689 dirlst.append(dirstat)
690 690 return not match
691 691 else:
692 692 followsym = False
693 693
694 694 if (seen_dirs is None) and followsym:
695 695 seen_dirs = []
696 696 adddir(seen_dirs, path)
697 697 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
698 698 dirs.sort()
699 699 if '.hg' in dirs:
700 700 yield root # found a repository
701 701 qroot = os.path.join(root, '.hg', 'patches')
702 702 if os.path.isdir(os.path.join(qroot, '.hg')):
703 703 yield qroot # we have a patch queue repo here
704 704 if recurse:
705 705 # avoid recursing inside the .hg directory
706 706 dirs.remove('.hg')
707 707 else:
708 708 dirs[:] = [] # don't descend further
709 709 elif followsym:
710 710 newdirs = []
711 711 for d in dirs:
712 712 fname = os.path.join(root, d)
713 713 if adddir(seen_dirs, fname):
714 714 if os.path.islink(fname):
715 715 for hgname in walkrepos(fname, True, seen_dirs):
716 716 yield hgname
717 717 else:
718 718 newdirs.append(d)
719 719 dirs[:] = newdirs
720 720
721 721 def osrcpath():
722 722 '''return default os-specific hgrc search path'''
723 723 path = []
724 724 defaultpath = os.path.join(util.datapath, 'default.d')
725 725 if os.path.isdir(defaultpath):
726 726 for f, kind in osutil.listdir(defaultpath):
727 727 if f.endswith('.rc'):
728 728 path.append(os.path.join(defaultpath, f))
729 729 path.extend(systemrcpath())
730 730 path.extend(userrcpath())
731 731 path = [os.path.normpath(f) for f in path]
732 732 return path
733 733
734 734 _rcpath = None
735 735
736 736 def rcpath():
737 737 '''return hgrc search path. if env var HGRCPATH is set, use it.
738 738 for each item in path, if directory, use files ending in .rc,
739 739 else use item.
740 740 make HGRCPATH empty to only look in .hg/hgrc of current repo.
741 741 if no HGRCPATH, use default os-specific path.'''
742 742 global _rcpath
743 743 if _rcpath is None:
744 744 if 'HGRCPATH' in os.environ:
745 745 _rcpath = []
746 746 for p in os.environ['HGRCPATH'].split(os.pathsep):
747 747 if not p:
748 748 continue
749 749 p = util.expandpath(p)
750 750 if os.path.isdir(p):
751 751 for f, kind in osutil.listdir(p):
752 752 if f.endswith('.rc'):
753 753 _rcpath.append(os.path.join(p, f))
754 754 else:
755 755 _rcpath.append(p)
756 756 else:
757 757 _rcpath = osrcpath()
758 758 return _rcpath
759 759
760 760 def intrev(rev):
761 761 """Return integer for a given revision that can be used in comparison or
762 762 arithmetic operation"""
763 763 if rev is None:
764 764 return wdirrev
765 765 return rev
766 766
767 767 def revsingle(repo, revspec, default='.'):
768 768 if not revspec and revspec != 0:
769 769 return repo[default]
770 770
771 771 l = revrange(repo, [revspec])
772 772 if not l:
773 773 raise error.Abort(_('empty revision set'))
774 774 return repo[l.last()]
775 775
776 776 def _pairspec(revspec):
777 777 tree = revset.parse(revspec)
778 778 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
779 779 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
780 780
781 781 def revpair(repo, revs):
782 782 if not revs:
783 783 return repo.dirstate.p1(), None
784 784
785 785 l = revrange(repo, revs)
786 786
787 787 if not l:
788 788 first = second = None
789 789 elif l.isascending():
790 790 first = l.min()
791 791 second = l.max()
792 792 elif l.isdescending():
793 793 first = l.max()
794 794 second = l.min()
795 795 else:
796 796 first = l.first()
797 797 second = l.last()
798 798
799 799 if first is None:
800 800 raise error.Abort(_('empty revision range'))
801 801 if (first == second and len(revs) >= 2
802 802 and not all(revrange(repo, [r]) for r in revs)):
803 803 raise error.Abort(_('empty revision on one side of range'))
804 804
805 805 # if top-level is range expression, the result must always be a pair
806 806 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
807 807 return repo.lookup(first), None
808 808
809 809 return repo.lookup(first), repo.lookup(second)
810 810
811 def revrange(repo, revs):
812 """Yield revision as strings from a list of revision specifications."""
811 def revrange(repo, specs):
812 """Execute 1 to many revsets and return the union.
813
814 This is the preferred mechanism for executing revsets using user-specified
815 config options, such as revset aliases.
816
817 The revsets specified by ``specs`` will be executed via a chained ``OR``
818 expression. If ``specs`` is empty, an empty result is returned.
819
820 ``specs`` can contain integers, in which case they are assumed to be
821 revision numbers.
822
823 It is assumed the revsets are already formatted. If you have arguments
824 that need to be expanded in the revset, call ``revset.formatspec()``
825 and pass the result as an element of ``specs``.
826
827 Specifying a single revset is allowed.
828
829 Returns a ``revset.abstractsmartset`` which is a list-like interface over
830 integer revisions.
831 """
813 832 allspecs = []
814 for spec in revs:
833 for spec in specs:
815 834 if isinstance(spec, int):
816 835 spec = revset.formatspec('rev(%d)', spec)
817 836 allspecs.append(spec)
818 837 m = revset.matchany(repo.ui, allspecs, repo)
819 838 return m(repo)
820 839
821 840 def meaningfulparents(repo, ctx):
822 841 """Return list of meaningful (or all if debug) parentrevs for rev.
823 842
824 843 For merges (two non-nullrev revisions) both parents are meaningful.
825 844 Otherwise the first parent revision is considered meaningful if it
826 845 is not the preceding revision.
827 846 """
828 847 parents = ctx.parents()
829 848 if len(parents) > 1:
830 849 return parents
831 850 if repo.ui.debugflag:
832 851 return [parents[0], repo['null']]
833 852 if parents[0].rev() >= intrev(ctx.rev()) - 1:
834 853 return []
835 854 return parents
836 855
837 856 def expandpats(pats):
838 857 '''Expand bare globs when running on windows.
839 858 On posix we assume it already has already been done by sh.'''
840 859 if not util.expandglobs:
841 860 return list(pats)
842 861 ret = []
843 862 for kindpat in pats:
844 863 kind, pat = matchmod._patsplit(kindpat, None)
845 864 if kind is None:
846 865 try:
847 866 globbed = glob.glob(pat)
848 867 except re.error:
849 868 globbed = [pat]
850 869 if globbed:
851 870 ret.extend(globbed)
852 871 continue
853 872 ret.append(kindpat)
854 873 return ret
855 874
856 875 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
857 876 badfn=None):
858 877 '''Return a matcher and the patterns that were used.
859 878 The matcher will warn about bad matches, unless an alternate badfn callback
860 879 is provided.'''
861 880 if pats == ("",):
862 881 pats = []
863 882 if opts is None:
864 883 opts = {}
865 884 if not globbed and default == 'relpath':
866 885 pats = expandpats(pats or [])
867 886
868 887 def bad(f, msg):
869 888 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
870 889
871 890 if badfn is None:
872 891 badfn = bad
873 892
874 893 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
875 894 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
876 895
877 896 if m.always():
878 897 pats = []
879 898 return m, pats
880 899
881 900 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
882 901 badfn=None):
883 902 '''Return a matcher that will warn about bad matches.'''
884 903 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
885 904
886 905 def matchall(repo):
887 906 '''Return a matcher that will efficiently match everything.'''
888 907 return matchmod.always(repo.root, repo.getcwd())
889 908
890 909 def matchfiles(repo, files, badfn=None):
891 910 '''Return a matcher that will efficiently match exactly these files.'''
892 911 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
893 912
894 913 def origpath(ui, repo, filepath):
895 914 '''customize where .orig files are created
896 915
897 916 Fetch user defined path from config file: [ui] origbackuppath = <path>
898 917 Fall back to default (filepath) if not specified
899 918 '''
900 919 origbackuppath = ui.config('ui', 'origbackuppath', None)
901 920 if origbackuppath is None:
902 921 return filepath + ".orig"
903 922
904 923 filepathfromroot = os.path.relpath(filepath, start=repo.root)
905 924 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
906 925
907 926 origbackupdir = repo.vfs.dirname(fullorigpath)
908 927 if not repo.vfs.exists(origbackupdir):
909 928 ui.note(_('creating directory: %s\n') % origbackupdir)
910 929 util.makedirs(origbackupdir)
911 930
912 931 return fullorigpath + ".orig"
913 932
914 933 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
915 934 if opts is None:
916 935 opts = {}
917 936 m = matcher
918 937 if dry_run is None:
919 938 dry_run = opts.get('dry_run')
920 939 if similarity is None:
921 940 similarity = float(opts.get('similarity') or 0)
922 941
923 942 ret = 0
924 943 join = lambda f: os.path.join(prefix, f)
925 944
926 945 def matchessubrepo(matcher, subpath):
927 946 if matcher.exact(subpath):
928 947 return True
929 948 for f in matcher.files():
930 949 if f.startswith(subpath):
931 950 return True
932 951 return False
933 952
934 953 wctx = repo[None]
935 954 for subpath in sorted(wctx.substate):
936 955 if opts.get('subrepos') or matchessubrepo(m, subpath):
937 956 sub = wctx.sub(subpath)
938 957 try:
939 958 submatch = matchmod.subdirmatcher(subpath, m)
940 959 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
941 960 ret = 1
942 961 except error.LookupError:
943 962 repo.ui.status(_("skipping missing subrepository: %s\n")
944 963 % join(subpath))
945 964
946 965 rejected = []
947 966 def badfn(f, msg):
948 967 if f in m.files():
949 968 m.bad(f, msg)
950 969 rejected.append(f)
951 970
952 971 badmatch = matchmod.badmatch(m, badfn)
953 972 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
954 973 badmatch)
955 974
956 975 unknownset = set(unknown + forgotten)
957 976 toprint = unknownset.copy()
958 977 toprint.update(deleted)
959 978 for abs in sorted(toprint):
960 979 if repo.ui.verbose or not m.exact(abs):
961 980 if abs in unknownset:
962 981 status = _('adding %s\n') % m.uipath(abs)
963 982 else:
964 983 status = _('removing %s\n') % m.uipath(abs)
965 984 repo.ui.status(status)
966 985
967 986 renames = _findrenames(repo, m, added + unknown, removed + deleted,
968 987 similarity)
969 988
970 989 if not dry_run:
971 990 _markchanges(repo, unknown + forgotten, deleted, renames)
972 991
973 992 for f in rejected:
974 993 if f in m.files():
975 994 return 1
976 995 return ret
977 996
978 997 def marktouched(repo, files, similarity=0.0):
979 998 '''Assert that files have somehow been operated upon. files are relative to
980 999 the repo root.'''
981 1000 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
982 1001 rejected = []
983 1002
984 1003 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
985 1004
986 1005 if repo.ui.verbose:
987 1006 unknownset = set(unknown + forgotten)
988 1007 toprint = unknownset.copy()
989 1008 toprint.update(deleted)
990 1009 for abs in sorted(toprint):
991 1010 if abs in unknownset:
992 1011 status = _('adding %s\n') % abs
993 1012 else:
994 1013 status = _('removing %s\n') % abs
995 1014 repo.ui.status(status)
996 1015
997 1016 renames = _findrenames(repo, m, added + unknown, removed + deleted,
998 1017 similarity)
999 1018
1000 1019 _markchanges(repo, unknown + forgotten, deleted, renames)
1001 1020
1002 1021 for f in rejected:
1003 1022 if f in m.files():
1004 1023 return 1
1005 1024 return 0
1006 1025
1007 1026 def _interestingfiles(repo, matcher):
1008 1027 '''Walk dirstate with matcher, looking for files that addremove would care
1009 1028 about.
1010 1029
1011 1030 This is different from dirstate.status because it doesn't care about
1012 1031 whether files are modified or clean.'''
1013 1032 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1014 1033 audit_path = pathutil.pathauditor(repo.root)
1015 1034
1016 1035 ctx = repo[None]
1017 1036 dirstate = repo.dirstate
1018 1037 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1019 1038 full=False)
1020 1039 for abs, st in walkresults.iteritems():
1021 1040 dstate = dirstate[abs]
1022 1041 if dstate == '?' and audit_path.check(abs):
1023 1042 unknown.append(abs)
1024 1043 elif dstate != 'r' and not st:
1025 1044 deleted.append(abs)
1026 1045 elif dstate == 'r' and st:
1027 1046 forgotten.append(abs)
1028 1047 # for finding renames
1029 1048 elif dstate == 'r' and not st:
1030 1049 removed.append(abs)
1031 1050 elif dstate == 'a':
1032 1051 added.append(abs)
1033 1052
1034 1053 return added, unknown, deleted, removed, forgotten
1035 1054
1036 1055 def _findrenames(repo, matcher, added, removed, similarity):
1037 1056 '''Find renames from removed files to added ones.'''
1038 1057 renames = {}
1039 1058 if similarity > 0:
1040 1059 for old, new, score in similar.findrenames(repo, added, removed,
1041 1060 similarity):
1042 1061 if (repo.ui.verbose or not matcher.exact(old)
1043 1062 or not matcher.exact(new)):
1044 1063 repo.ui.status(_('recording removal of %s as rename to %s '
1045 1064 '(%d%% similar)\n') %
1046 1065 (matcher.rel(old), matcher.rel(new),
1047 1066 score * 100))
1048 1067 renames[new] = old
1049 1068 return renames
1050 1069
1051 1070 def _markchanges(repo, unknown, deleted, renames):
1052 1071 '''Marks the files in unknown as added, the files in deleted as removed,
1053 1072 and the files in renames as copied.'''
1054 1073 wctx = repo[None]
1055 1074 with repo.wlock():
1056 1075 wctx.forget(deleted)
1057 1076 wctx.add(unknown)
1058 1077 for new, old in renames.iteritems():
1059 1078 wctx.copy(old, new)
1060 1079
1061 1080 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1062 1081 """Update the dirstate to reflect the intent of copying src to dst. For
1063 1082 different reasons it might not end with dst being marked as copied from src.
1064 1083 """
1065 1084 origsrc = repo.dirstate.copied(src) or src
1066 1085 if dst == origsrc: # copying back a copy?
1067 1086 if repo.dirstate[dst] not in 'mn' and not dryrun:
1068 1087 repo.dirstate.normallookup(dst)
1069 1088 else:
1070 1089 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1071 1090 if not ui.quiet:
1072 1091 ui.warn(_("%s has not been committed yet, so no copy "
1073 1092 "data will be stored for %s.\n")
1074 1093 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1075 1094 if repo.dirstate[dst] in '?r' and not dryrun:
1076 1095 wctx.add([dst])
1077 1096 elif not dryrun:
1078 1097 wctx.copy(origsrc, dst)
1079 1098
1080 1099 def readrequires(opener, supported):
1081 1100 '''Reads and parses .hg/requires and checks if all entries found
1082 1101 are in the list of supported features.'''
1083 1102 requirements = set(opener.read("requires").splitlines())
1084 1103 missings = []
1085 1104 for r in requirements:
1086 1105 if r not in supported:
1087 1106 if not r or not r[0].isalnum():
1088 1107 raise error.RequirementError(_(".hg/requires file is corrupt"))
1089 1108 missings.append(r)
1090 1109 missings.sort()
1091 1110 if missings:
1092 1111 raise error.RequirementError(
1093 1112 _("repository requires features unknown to this Mercurial: %s")
1094 1113 % " ".join(missings),
1095 1114 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1096 1115 " for more information"))
1097 1116 return requirements
1098 1117
1099 1118 def writerequires(opener, requirements):
1100 1119 with opener('requires', 'w') as fp:
1101 1120 for r in sorted(requirements):
1102 1121 fp.write("%s\n" % r)
1103 1122
1104 1123 class filecachesubentry(object):
1105 1124 def __init__(self, path, stat):
1106 1125 self.path = path
1107 1126 self.cachestat = None
1108 1127 self._cacheable = None
1109 1128
1110 1129 if stat:
1111 1130 self.cachestat = filecachesubentry.stat(self.path)
1112 1131
1113 1132 if self.cachestat:
1114 1133 self._cacheable = self.cachestat.cacheable()
1115 1134 else:
1116 1135 # None means we don't know yet
1117 1136 self._cacheable = None
1118 1137
1119 1138 def refresh(self):
1120 1139 if self.cacheable():
1121 1140 self.cachestat = filecachesubentry.stat(self.path)
1122 1141
1123 1142 def cacheable(self):
1124 1143 if self._cacheable is not None:
1125 1144 return self._cacheable
1126 1145
1127 1146 # we don't know yet, assume it is for now
1128 1147 return True
1129 1148
1130 1149 def changed(self):
1131 1150 # no point in going further if we can't cache it
1132 1151 if not self.cacheable():
1133 1152 return True
1134 1153
1135 1154 newstat = filecachesubentry.stat(self.path)
1136 1155
1137 1156 # we may not know if it's cacheable yet, check again now
1138 1157 if newstat and self._cacheable is None:
1139 1158 self._cacheable = newstat.cacheable()
1140 1159
1141 1160 # check again
1142 1161 if not self._cacheable:
1143 1162 return True
1144 1163
1145 1164 if self.cachestat != newstat:
1146 1165 self.cachestat = newstat
1147 1166 return True
1148 1167 else:
1149 1168 return False
1150 1169
1151 1170 @staticmethod
1152 1171 def stat(path):
1153 1172 try:
1154 1173 return util.cachestat(path)
1155 1174 except OSError as e:
1156 1175 if e.errno != errno.ENOENT:
1157 1176 raise
1158 1177
1159 1178 class filecacheentry(object):
1160 1179 def __init__(self, paths, stat=True):
1161 1180 self._entries = []
1162 1181 for path in paths:
1163 1182 self._entries.append(filecachesubentry(path, stat))
1164 1183
1165 1184 def changed(self):
1166 1185 '''true if any entry has changed'''
1167 1186 for entry in self._entries:
1168 1187 if entry.changed():
1169 1188 return True
1170 1189 return False
1171 1190
1172 1191 def refresh(self):
1173 1192 for entry in self._entries:
1174 1193 entry.refresh()
1175 1194
1176 1195 class filecache(object):
1177 1196 '''A property like decorator that tracks files under .hg/ for updates.
1178 1197
1179 1198 Records stat info when called in _filecache.
1180 1199
1181 1200 On subsequent calls, compares old stat info with new info, and recreates the
1182 1201 object when any of the files changes, updating the new stat info in
1183 1202 _filecache.
1184 1203
1185 1204 Mercurial either atomic renames or appends for files under .hg,
1186 1205 so to ensure the cache is reliable we need the filesystem to be able
1187 1206 to tell us if a file has been replaced. If it can't, we fallback to
1188 1207 recreating the object on every call (essentially the same behavior as
1189 1208 propertycache).
1190 1209
1191 1210 '''
1192 1211 def __init__(self, *paths):
1193 1212 self.paths = paths
1194 1213
1195 1214 def join(self, obj, fname):
1196 1215 """Used to compute the runtime path of a cached file.
1197 1216
1198 1217 Users should subclass filecache and provide their own version of this
1199 1218 function to call the appropriate join function on 'obj' (an instance
1200 1219 of the class that its member function was decorated).
1201 1220 """
1202 1221 return obj.join(fname)
1203 1222
1204 1223 def __call__(self, func):
1205 1224 self.func = func
1206 1225 self.name = func.__name__
1207 1226 return self
1208 1227
1209 1228 def __get__(self, obj, type=None):
1210 1229 # if accessed on the class, return the descriptor itself.
1211 1230 if obj is None:
1212 1231 return self
1213 1232 # do we need to check if the file changed?
1214 1233 if self.name in obj.__dict__:
1215 1234 assert self.name in obj._filecache, self.name
1216 1235 return obj.__dict__[self.name]
1217 1236
1218 1237 entry = obj._filecache.get(self.name)
1219 1238
1220 1239 if entry:
1221 1240 if entry.changed():
1222 1241 entry.obj = self.func(obj)
1223 1242 else:
1224 1243 paths = [self.join(obj, path) for path in self.paths]
1225 1244
1226 1245 # We stat -before- creating the object so our cache doesn't lie if
1227 1246 # a writer modified between the time we read and stat
1228 1247 entry = filecacheentry(paths, True)
1229 1248 entry.obj = self.func(obj)
1230 1249
1231 1250 obj._filecache[self.name] = entry
1232 1251
1233 1252 obj.__dict__[self.name] = entry.obj
1234 1253 return entry.obj
1235 1254
1236 1255 def __set__(self, obj, value):
1237 1256 if self.name not in obj._filecache:
1238 1257 # we add an entry for the missing value because X in __dict__
1239 1258 # implies X in _filecache
1240 1259 paths = [self.join(obj, path) for path in self.paths]
1241 1260 ce = filecacheentry(paths, False)
1242 1261 obj._filecache[self.name] = ce
1243 1262 else:
1244 1263 ce = obj._filecache[self.name]
1245 1264
1246 1265 ce.obj = value # update cached copy
1247 1266 obj.__dict__[self.name] = value # update copy returned by obj.x
1248 1267
1249 1268 def __delete__(self, obj):
1250 1269 try:
1251 1270 del obj.__dict__[self.name]
1252 1271 except KeyError:
1253 1272 raise AttributeError(self.name)
1254 1273
1255 1274 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1256 1275 if lock is None:
1257 1276 raise error.LockInheritanceContractViolation(
1258 1277 'lock can only be inherited while held')
1259 1278 if environ is None:
1260 1279 environ = {}
1261 1280 with lock.inherit() as locker:
1262 1281 environ[envvar] = locker
1263 1282 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1264 1283
1265 1284 def wlocksub(repo, cmd, *args, **kwargs):
1266 1285 """run cmd as a subprocess that allows inheriting repo's wlock
1267 1286
1268 1287 This can only be called while the wlock is held. This takes all the
1269 1288 arguments that ui.system does, and returns the exit code of the
1270 1289 subprocess."""
1271 1290 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1272 1291 **kwargs)
1273 1292
1274 1293 def gdinitconfig(ui):
1275 1294 """helper function to know if a repo should be created as general delta
1276 1295 """
1277 1296 # experimental config: format.generaldelta
1278 1297 return (ui.configbool('format', 'generaldelta', False)
1279 1298 or ui.configbool('format', 'usegeneraldelta', True))
1280 1299
1281 1300 def gddeltaconfig(ui):
1282 1301 """helper function to know if incoming delta should be optimised
1283 1302 """
1284 1303 # experimental config: format.generaldelta
1285 1304 return ui.configbool('format', 'generaldelta', False)
1286 1305
1287 1306 class delayclosedfile(object):
1288 1307 """Proxy for a file object whose close is delayed.
1289 1308
1290 1309 Do not instantiate outside of the vfs layer.
1291 1310 """
1292 1311
1293 1312 def __init__(self, fh, closer):
1294 1313 object.__setattr__(self, '_origfh', fh)
1295 1314 object.__setattr__(self, '_closer', closer)
1296 1315
1297 1316 def __getattr__(self, attr):
1298 1317 return getattr(self._origfh, attr)
1299 1318
1300 1319 def __setattr__(self, attr, value):
1301 1320 return setattr(self._origfh, attr, value)
1302 1321
1303 1322 def __delattr__(self, attr):
1304 1323 return delattr(self._origfh, attr)
1305 1324
1306 1325 def __enter__(self):
1307 1326 return self._origfh.__enter__()
1308 1327
1309 1328 def __exit__(self, exc_type, exc_value, exc_tb):
1310 1329 self._closer.close(self._origfh)
1311 1330
1312 1331 def close(self):
1313 1332 self._closer.close(self._origfh)
1314 1333
1315 1334 class backgroundfilecloser(object):
1316 1335 """Coordinates background closing of file handles on multiple threads."""
1317 1336 def __init__(self, ui, expectedcount=-1):
1318 1337 self._running = False
1319 1338 self._entered = False
1320 1339 self._threads = []
1321 1340 self._threadexception = None
1322 1341
1323 1342 # Only Windows/NTFS has slow file closing. So only enable by default
1324 1343 # on that platform. But allow to be enabled elsewhere for testing.
1325 1344 defaultenabled = os.name == 'nt'
1326 1345 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1327 1346
1328 1347 if not enabled:
1329 1348 return
1330 1349
1331 1350 # There is overhead to starting and stopping the background threads.
1332 1351 # Don't do background processing unless the file count is large enough
1333 1352 # to justify it.
1334 1353 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1335 1354 2048)
1336 1355 # FUTURE dynamically start background threads after minfilecount closes.
1337 1356 # (We don't currently have any callers that don't know their file count)
1338 1357 if expectedcount > 0 and expectedcount < minfilecount:
1339 1358 return
1340 1359
1341 1360 # Windows defaults to a limit of 512 open files. A buffer of 128
1342 1361 # should give us enough headway.
1343 1362 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1344 1363 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1345 1364
1346 1365 ui.debug('starting %d threads for background file closing\n' %
1347 1366 threadcount)
1348 1367
1349 1368 self._queue = util.queue(maxsize=maxqueue)
1350 1369 self._running = True
1351 1370
1352 1371 for i in range(threadcount):
1353 1372 t = threading.Thread(target=self._worker, name='backgroundcloser')
1354 1373 self._threads.append(t)
1355 1374 t.start()
1356 1375
1357 1376 def __enter__(self):
1358 1377 self._entered = True
1359 1378 return self
1360 1379
1361 1380 def __exit__(self, exc_type, exc_value, exc_tb):
1362 1381 self._running = False
1363 1382
1364 1383 # Wait for threads to finish closing so open files don't linger for
1365 1384 # longer than lifetime of context manager.
1366 1385 for t in self._threads:
1367 1386 t.join()
1368 1387
1369 1388 def _worker(self):
1370 1389 """Main routine for worker thread."""
1371 1390 while True:
1372 1391 try:
1373 1392 fh = self._queue.get(block=True, timeout=0.100)
1374 1393 # Need to catch or the thread will terminate and
1375 1394 # we could orphan file descriptors.
1376 1395 try:
1377 1396 fh.close()
1378 1397 except Exception as e:
1379 1398 # Stash so can re-raise from main thread later.
1380 1399 self._threadexception = e
1381 1400 except util.empty:
1382 1401 if not self._running:
1383 1402 break
1384 1403
1385 1404 def close(self, fh):
1386 1405 """Schedule a file for closing."""
1387 1406 if not self._entered:
1388 1407 raise error.Abort(_('can only call close() when context manager '
1389 1408 'active'))
1390 1409
1391 1410 # If a background thread encountered an exception, raise now so we fail
1392 1411 # fast. Otherwise we may potentially go on for minutes until the error
1393 1412 # is acted on.
1394 1413 if self._threadexception:
1395 1414 e = self._threadexception
1396 1415 self._threadexception = None
1397 1416 raise e
1398 1417
1399 1418 # If we're not actively running, close synchronously.
1400 1419 if not self._running:
1401 1420 fh.close()
1402 1421 return
1403 1422
1404 1423 self._queue.put(fh, block=True, timeout=None)
General Comments 0
You need to be logged in to leave comments. Login now