##// END OF EJS Templates
match: remove ispartial()...
Martin von Zweigbergk -
r32312:0d6b3572 default
parent child Browse files
Show More
@@ -1,2048 +1,2048 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repoview,
54 54 revset,
55 55 revsetlang,
56 56 scmutil,
57 57 store,
58 58 subrepo,
59 59 tags as tagsmod,
60 60 transaction,
61 61 txnutil,
62 62 util,
63 63 vfs as vfsmod,
64 64 )
65 65
66 66 release = lockmod.release
67 67 urlerr = util.urlerr
68 68 urlreq = util.urlreq
69 69
70 70 class repofilecache(scmutil.filecache):
71 71 """All filecache usage on repo are done for logic that should be unfiltered
72 72 """
73 73
74 74 def join(self, obj, fname):
75 75 return obj.vfs.join(fname)
76 76 def __get__(self, repo, type=None):
77 77 if repo is None:
78 78 return self
79 79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 80 def __set__(self, repo, value):
81 81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 82 def __delete__(self, repo):
83 83 return super(repofilecache, self).__delete__(repo.unfiltered())
84 84
85 85 class storecache(repofilecache):
86 86 """filecache for files in the store"""
87 87 def join(self, obj, fname):
88 88 return obj.sjoin(fname)
89 89
90 90 class unfilteredpropertycache(util.propertycache):
91 91 """propertycache that apply to unfiltered repo only"""
92 92
93 93 def __get__(self, repo, type=None):
94 94 unfi = repo.unfiltered()
95 95 if unfi is repo:
96 96 return super(unfilteredpropertycache, self).__get__(unfi)
97 97 return getattr(unfi, self.name)
98 98
99 99 class filteredpropertycache(util.propertycache):
100 100 """propertycache that must take filtering in account"""
101 101
102 102 def cachevalue(self, obj, value):
103 103 object.__setattr__(obj, self.name, value)
104 104
105 105
106 106 def hasunfilteredcache(repo, name):
107 107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 108 return name in vars(repo.unfiltered())
109 109
110 110 def unfilteredmethod(orig):
111 111 """decorate method that always need to be run on unfiltered version"""
112 112 def wrapper(repo, *args, **kwargs):
113 113 return orig(repo.unfiltered(), *args, **kwargs)
114 114 return wrapper
115 115
116 116 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 117 'unbundle'}
118 118 legacycaps = moderncaps.union({'changegroupsubset'})
119 119
120 120 class localpeer(peer.peerrepository):
121 121 '''peer for a local repo; reflects only the most recent API'''
122 122
123 123 def __init__(self, repo, caps=None):
124 124 if caps is None:
125 125 caps = moderncaps.copy()
126 126 peer.peerrepository.__init__(self)
127 127 self._repo = repo.filtered('served')
128 128 self.ui = repo.ui
129 129 self._caps = repo._restrictcapabilities(caps)
130 130 self.requirements = repo.requirements
131 131 self.supportedformats = repo.supportedformats
132 132
133 133 def close(self):
134 134 self._repo.close()
135 135
136 136 def _capabilities(self):
137 137 return self._caps
138 138
139 139 def local(self):
140 140 return self._repo
141 141
142 142 def canpush(self):
143 143 return True
144 144
145 145 def url(self):
146 146 return self._repo.url()
147 147
148 148 def lookup(self, key):
149 149 return self._repo.lookup(key)
150 150
151 151 def branchmap(self):
152 152 return self._repo.branchmap()
153 153
154 154 def heads(self):
155 155 return self._repo.heads()
156 156
157 157 def known(self, nodes):
158 158 return self._repo.known(nodes)
159 159
160 160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 161 **kwargs):
162 162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 163 common=common, bundlecaps=bundlecaps,
164 164 **kwargs)
165 165 cb = util.chunkbuffer(chunks)
166 166
167 167 if exchange.bundle2requested(bundlecaps):
168 168 # When requesting a bundle2, getbundle returns a stream to make the
169 169 # wire level function happier. We need to build a proper object
170 170 # from it in local peer.
171 171 return bundle2.getunbundler(self.ui, cb)
172 172 else:
173 173 return changegroup.getunbundler('01', cb, None)
174 174
175 175 # TODO We might want to move the next two calls into legacypeer and add
176 176 # unbundle instead.
177 177
178 178 def unbundle(self, cg, heads, url):
179 179 """apply a bundle on a repo
180 180
181 181 This function handles the repo locking itself."""
182 182 try:
183 183 try:
184 184 cg = exchange.readbundle(self.ui, cg, None)
185 185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 186 if util.safehasattr(ret, 'getchunks'):
187 187 # This is a bundle20 object, turn it into an unbundler.
188 188 # This little dance should be dropped eventually when the
189 189 # API is finally improved.
190 190 stream = util.chunkbuffer(ret.getchunks())
191 191 ret = bundle2.getunbundler(self.ui, stream)
192 192 return ret
193 193 except Exception as exc:
194 194 # If the exception contains output salvaged from a bundle2
195 195 # reply, we need to make sure it is printed before continuing
196 196 # to fail. So we build a bundle2 with such output and consume
197 197 # it directly.
198 198 #
199 199 # This is not very elegant but allows a "simple" solution for
200 200 # issue4594
201 201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 202 if output:
203 203 bundler = bundle2.bundle20(self._repo.ui)
204 204 for out in output:
205 205 bundler.addpart(out)
206 206 stream = util.chunkbuffer(bundler.getchunks())
207 207 b = bundle2.getunbundler(self.ui, stream)
208 208 bundle2.processbundle(self._repo, b)
209 209 raise
210 210 except error.PushRaced as exc:
211 211 raise error.ResponseError(_('push failed:'), str(exc))
212 212
213 213 def lock(self):
214 214 return self._repo.lock()
215 215
216 216 def addchangegroup(self, cg, source, url):
217 217 return cg.apply(self._repo, source, url)
218 218
219 219 def pushkey(self, namespace, key, old, new):
220 220 return self._repo.pushkey(namespace, key, old, new)
221 221
222 222 def listkeys(self, namespace):
223 223 return self._repo.listkeys(namespace)
224 224
225 225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 226 '''used to test argument passing over the wire'''
227 227 return "%s %s %s %s %s" % (one, two, three, four, five)
228 228
229 229 class locallegacypeer(localpeer):
230 230 '''peer extension which implements legacy methods too; used for tests with
231 231 restricted capabilities'''
232 232
233 233 def __init__(self, repo):
234 234 localpeer.__init__(self, repo, caps=legacycaps)
235 235
236 236 def branches(self, nodes):
237 237 return self._repo.branches(nodes)
238 238
239 239 def between(self, pairs):
240 240 return self._repo.between(pairs)
241 241
242 242 def changegroup(self, basenodes, source):
243 243 return changegroup.changegroup(self._repo, basenodes, source)
244 244
245 245 def changegroupsubset(self, bases, heads, source):
246 246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247 247
248 248 class localrepository(object):
249 249
250 250 supportedformats = {'revlogv1', 'generaldelta', 'treemanifest',
251 251 'manifestv2'}
252 252 _basesupported = supportedformats | {'store', 'fncache', 'shared',
253 253 'relshared', 'dotencode'}
254 254 openerreqs = {'revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'}
255 255 filtername = None
256 256
257 257 # a list of (ui, featureset) functions.
258 258 # only functions defined in module of enabled extensions are invoked
259 259 featuresetupfuncs = set()
260 260
261 261 def __init__(self, baseui, path, create=False):
262 262 self.requirements = set()
263 263 # wvfs: rooted at the repository root, used to access the working copy
264 264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 265 # vfs: rooted at .hg, used to access repo files outside of .hg/store
266 266 self.vfs = None
267 267 # svfs: usually rooted at .hg/store, used to access repository history
268 268 # If this is a shared repository, this vfs may point to another
269 269 # repository's .hg/store directory.
270 270 self.svfs = None
271 271 self.root = self.wvfs.base
272 272 self.path = self.wvfs.join(".hg")
273 273 self.origroot = path
274 274 self.auditor = pathutil.pathauditor(self.root, self._checknested)
275 275 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
276 276 realfs=False)
277 277 self.vfs = vfsmod.vfs(self.path)
278 278 self.baseui = baseui
279 279 self.ui = baseui.copy()
280 280 self.ui.copy = baseui.copy # prevent copying repo configuration
281 281 # A list of callback to shape the phase if no data were found.
282 282 # Callback are in the form: func(repo, roots) --> processed root.
283 283 # This list it to be filled by extension during repo setup
284 284 self._phasedefaults = []
285 285 try:
286 286 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
287 287 self._loadextensions()
288 288 except IOError:
289 289 pass
290 290
291 291 if self.featuresetupfuncs:
292 292 self.supported = set(self._basesupported) # use private copy
293 293 extmods = set(m.__name__ for n, m
294 294 in extensions.extensions(self.ui))
295 295 for setupfunc in self.featuresetupfuncs:
296 296 if setupfunc.__module__ in extmods:
297 297 setupfunc(self.ui, self.supported)
298 298 else:
299 299 self.supported = self._basesupported
300 300 color.setup(self.ui)
301 301
302 302 # Add compression engines.
303 303 for name in util.compengines:
304 304 engine = util.compengines[name]
305 305 if engine.revlogheader():
306 306 self.supported.add('exp-compression-%s' % name)
307 307
308 308 if not self.vfs.isdir():
309 309 if create:
310 310 self.requirements = newreporequirements(self)
311 311
312 312 if not self.wvfs.exists():
313 313 self.wvfs.makedirs()
314 314 self.vfs.makedir(notindexed=True)
315 315
316 316 if 'store' in self.requirements:
317 317 self.vfs.mkdir("store")
318 318
319 319 # create an invalid changelog
320 320 self.vfs.append(
321 321 "00changelog.i",
322 322 '\0\0\0\2' # represents revlogv2
323 323 ' dummy changelog to prevent using the old repo layout'
324 324 )
325 325 else:
326 326 raise error.RepoError(_("repository %s not found") % path)
327 327 elif create:
328 328 raise error.RepoError(_("repository %s already exists") % path)
329 329 else:
330 330 try:
331 331 self.requirements = scmutil.readrequires(
332 332 self.vfs, self.supported)
333 333 except IOError as inst:
334 334 if inst.errno != errno.ENOENT:
335 335 raise
336 336
337 337 self.sharedpath = self.path
338 338 try:
339 339 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
340 340 if 'relshared' in self.requirements:
341 341 sharedpath = self.vfs.join(sharedpath)
342 342 vfs = vfsmod.vfs(sharedpath, realpath=True)
343 343 s = vfs.base
344 344 if not vfs.exists():
345 345 raise error.RepoError(
346 346 _('.hg/sharedpath points to nonexistent directory %s') % s)
347 347 self.sharedpath = s
348 348 except IOError as inst:
349 349 if inst.errno != errno.ENOENT:
350 350 raise
351 351
352 352 self.store = store.store(
353 353 self.requirements, self.sharedpath, vfsmod.vfs)
354 354 self.spath = self.store.path
355 355 self.svfs = self.store.vfs
356 356 self.sjoin = self.store.join
357 357 self.vfs.createmode = self.store.createmode
358 358 self._applyopenerreqs()
359 359 if create:
360 360 self._writerequirements()
361 361
362 362 self._dirstatevalidatewarned = False
363 363
364 364 self._branchcaches = {}
365 365 self._revbranchcache = None
366 366 self.filterpats = {}
367 367 self._datafilters = {}
368 368 self._transref = self._lockref = self._wlockref = None
369 369
370 370 # A cache for various files under .hg/ that tracks file changes,
371 371 # (used by the filecache decorator)
372 372 #
373 373 # Maps a property name to its util.filecacheentry
374 374 self._filecache = {}
375 375
376 376 # hold sets of revision to be filtered
377 377 # should be cleared when something might have changed the filter value:
378 378 # - new changesets,
379 379 # - phase change,
380 380 # - new obsolescence marker,
381 381 # - working directory parent change,
382 382 # - bookmark changes
383 383 self.filteredrevcache = {}
384 384
385 385 # generic mapping between names and nodes
386 386 self.names = namespaces.namespaces()
387 387
388 388 def close(self):
389 389 self._writecaches()
390 390
391 391 def _loadextensions(self):
392 392 extensions.loadall(self.ui)
393 393
394 394 def _writecaches(self):
395 395 if self._revbranchcache:
396 396 self._revbranchcache.write()
397 397
398 398 def _restrictcapabilities(self, caps):
399 399 if self.ui.configbool('experimental', 'bundle2-advertise', True):
400 400 caps = set(caps)
401 401 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
402 402 caps.add('bundle2=' + urlreq.quote(capsblob))
403 403 return caps
404 404
405 405 def _applyopenerreqs(self):
406 406 self.svfs.options = dict((r, 1) for r in self.requirements
407 407 if r in self.openerreqs)
408 408 # experimental config: format.chunkcachesize
409 409 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
410 410 if chunkcachesize is not None:
411 411 self.svfs.options['chunkcachesize'] = chunkcachesize
412 412 # experimental config: format.maxchainlen
413 413 maxchainlen = self.ui.configint('format', 'maxchainlen')
414 414 if maxchainlen is not None:
415 415 self.svfs.options['maxchainlen'] = maxchainlen
416 416 # experimental config: format.manifestcachesize
417 417 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
418 418 if manifestcachesize is not None:
419 419 self.svfs.options['manifestcachesize'] = manifestcachesize
420 420 # experimental config: format.aggressivemergedeltas
421 421 aggressivemergedeltas = self.ui.configbool('format',
422 422 'aggressivemergedeltas', False)
423 423 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
424 424 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
425 425
426 426 for r in self.requirements:
427 427 if r.startswith('exp-compression-'):
428 428 self.svfs.options['compengine'] = r[len('exp-compression-'):]
429 429
430 430 def _writerequirements(self):
431 431 scmutil.writerequires(self.vfs, self.requirements)
432 432
433 433 def _checknested(self, path):
434 434 """Determine if path is a legal nested repository."""
435 435 if not path.startswith(self.root):
436 436 return False
437 437 subpath = path[len(self.root) + 1:]
438 438 normsubpath = util.pconvert(subpath)
439 439
440 440 # XXX: Checking against the current working copy is wrong in
441 441 # the sense that it can reject things like
442 442 #
443 443 # $ hg cat -r 10 sub/x.txt
444 444 #
445 445 # if sub/ is no longer a subrepository in the working copy
446 446 # parent revision.
447 447 #
448 448 # However, it can of course also allow things that would have
449 449 # been rejected before, such as the above cat command if sub/
450 450 # is a subrepository now, but was a normal directory before.
451 451 # The old path auditor would have rejected by mistake since it
452 452 # panics when it sees sub/.hg/.
453 453 #
454 454 # All in all, checking against the working copy seems sensible
455 455 # since we want to prevent access to nested repositories on
456 456 # the filesystem *now*.
457 457 ctx = self[None]
458 458 parts = util.splitpath(subpath)
459 459 while parts:
460 460 prefix = '/'.join(parts)
461 461 if prefix in ctx.substate:
462 462 if prefix == normsubpath:
463 463 return True
464 464 else:
465 465 sub = ctx.sub(prefix)
466 466 return sub.checknested(subpath[len(prefix) + 1:])
467 467 else:
468 468 parts.pop()
469 469 return False
470 470
471 471 def peer(self):
472 472 return localpeer(self) # not cached to avoid reference cycle
473 473
474 474 def unfiltered(self):
475 475 """Return unfiltered version of the repository
476 476
477 477 Intended to be overwritten by filtered repo."""
478 478 return self
479 479
480 480 def filtered(self, name):
481 481 """Return a filtered version of a repository"""
482 482 # build a new class with the mixin and the current class
483 483 # (possibly subclass of the repo)
484 484 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
485 485 pass
486 486 return filteredrepo(self, name)
487 487
488 488 @repofilecache('bookmarks', 'bookmarks.current')
489 489 def _bookmarks(self):
490 490 return bookmarks.bmstore(self)
491 491
492 492 @property
493 493 def _activebookmark(self):
494 494 return self._bookmarks.active
495 495
496 496 def bookmarkheads(self, bookmark):
497 497 name = bookmark.split('@', 1)[0]
498 498 heads = []
499 499 for mark, n in self._bookmarks.iteritems():
500 500 if mark.split('@', 1)[0] == name:
501 501 heads.append(n)
502 502 return heads
503 503
504 504 # _phaserevs and _phasesets depend on changelog. what we need is to
505 505 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
506 506 # can't be easily expressed in filecache mechanism.
507 507 @storecache('phaseroots', '00changelog.i')
508 508 def _phasecache(self):
509 509 return phases.phasecache(self, self._phasedefaults)
510 510
511 511 @storecache('obsstore')
512 512 def obsstore(self):
513 513 # read default format for new obsstore.
514 514 # developer config: format.obsstore-version
515 515 defaultformat = self.ui.configint('format', 'obsstore-version', None)
516 516 # rely on obsstore class default when possible.
517 517 kwargs = {}
518 518 if defaultformat is not None:
519 519 kwargs['defaultformat'] = defaultformat
520 520 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
521 521 store = obsolete.obsstore(self.svfs, readonly=readonly,
522 522 **kwargs)
523 523 if store and readonly:
524 524 self.ui.warn(
525 525 _('obsolete feature not enabled but %i markers found!\n')
526 526 % len(list(store)))
527 527 return store
528 528
529 529 @storecache('00changelog.i')
530 530 def changelog(self):
531 531 return changelog.changelog(self.svfs,
532 532 trypending=txnutil.mayhavepending(self.root))
533 533
534 534 def _constructmanifest(self):
535 535 # This is a temporary function while we migrate from manifest to
536 536 # manifestlog. It allows bundlerepo and unionrepo to intercept the
537 537 # manifest creation.
538 538 return manifest.manifestrevlog(self.svfs)
539 539
540 540 @storecache('00manifest.i')
541 541 def manifestlog(self):
542 542 return manifest.manifestlog(self.svfs, self)
543 543
544 544 @repofilecache('dirstate')
545 545 def dirstate(self):
546 546 return dirstate.dirstate(self.vfs, self.ui, self.root,
547 547 self._dirstatevalidate)
548 548
549 549 def _dirstatevalidate(self, node):
550 550 try:
551 551 self.changelog.rev(node)
552 552 return node
553 553 except error.LookupError:
554 554 if not self._dirstatevalidatewarned:
555 555 self._dirstatevalidatewarned = True
556 556 self.ui.warn(_("warning: ignoring unknown"
557 557 " working parent %s!\n") % short(node))
558 558 return nullid
559 559
560 560 def __getitem__(self, changeid):
561 561 if changeid is None or changeid == wdirrev:
562 562 return context.workingctx(self)
563 563 if isinstance(changeid, slice):
564 564 return [context.changectx(self, i)
565 565 for i in xrange(*changeid.indices(len(self)))
566 566 if i not in self.changelog.filteredrevs]
567 567 return context.changectx(self, changeid)
568 568
569 569 def __contains__(self, changeid):
570 570 try:
571 571 self[changeid]
572 572 return True
573 573 except error.RepoLookupError:
574 574 return False
575 575
576 576 def __nonzero__(self):
577 577 return True
578 578
579 579 __bool__ = __nonzero__
580 580
581 581 def __len__(self):
582 582 return len(self.changelog)
583 583
584 584 def __iter__(self):
585 585 return iter(self.changelog)
586 586
587 587 def revs(self, expr, *args):
588 588 '''Find revisions matching a revset.
589 589
590 590 The revset is specified as a string ``expr`` that may contain
591 591 %-formatting to escape certain types. See ``revsetlang.formatspec``.
592 592
593 593 Revset aliases from the configuration are not expanded. To expand
594 594 user aliases, consider calling ``scmutil.revrange()`` or
595 595 ``repo.anyrevs([expr], user=True)``.
596 596
597 597 Returns a revset.abstractsmartset, which is a list-like interface
598 598 that contains integer revisions.
599 599 '''
600 600 expr = revsetlang.formatspec(expr, *args)
601 601 m = revset.match(None, expr)
602 602 return m(self)
603 603
604 604 def set(self, expr, *args):
605 605 '''Find revisions matching a revset and emit changectx instances.
606 606
607 607 This is a convenience wrapper around ``revs()`` that iterates the
608 608 result and is a generator of changectx instances.
609 609
610 610 Revset aliases from the configuration are not expanded. To expand
611 611 user aliases, consider calling ``scmutil.revrange()``.
612 612 '''
613 613 for r in self.revs(expr, *args):
614 614 yield self[r]
615 615
616 616 def anyrevs(self, specs, user=False):
617 617 '''Find revisions matching one of the given revsets.
618 618
619 619 Revset aliases from the configuration are not expanded by default. To
620 620 expand user aliases, specify ``user=True``.
621 621 '''
622 622 if user:
623 623 m = revset.matchany(self.ui, specs, repo=self)
624 624 else:
625 625 m = revset.matchany(None, specs)
626 626 return m(self)
627 627
628 628 def url(self):
629 629 return 'file:' + self.root
630 630
631 631 def hook(self, name, throw=False, **args):
632 632 """Call a hook, passing this repo instance.
633 633
634 634 This a convenience method to aid invoking hooks. Extensions likely
635 635 won't call this unless they have registered a custom hook or are
636 636 replacing code that is expected to call a hook.
637 637 """
638 638 return hook.hook(self.ui, self, name, throw, **args)
639 639
640 640 @filteredpropertycache
641 641 def _tagscache(self):
642 642 '''Returns a tagscache object that contains various tags related
643 643 caches.'''
644 644
645 645 # This simplifies its cache management by having one decorated
646 646 # function (this one) and the rest simply fetch things from it.
647 647 class tagscache(object):
648 648 def __init__(self):
649 649 # These two define the set of tags for this repository. tags
650 650 # maps tag name to node; tagtypes maps tag name to 'global' or
651 651 # 'local'. (Global tags are defined by .hgtags across all
652 652 # heads, and local tags are defined in .hg/localtags.)
653 653 # They constitute the in-memory cache of tags.
654 654 self.tags = self.tagtypes = None
655 655
656 656 self.nodetagscache = self.tagslist = None
657 657
658 658 cache = tagscache()
659 659 cache.tags, cache.tagtypes = self._findtags()
660 660
661 661 return cache
662 662
663 663 def tags(self):
664 664 '''return a mapping of tag to node'''
665 665 t = {}
666 666 if self.changelog.filteredrevs:
667 667 tags, tt = self._findtags()
668 668 else:
669 669 tags = self._tagscache.tags
670 670 for k, v in tags.iteritems():
671 671 try:
672 672 # ignore tags to unknown nodes
673 673 self.changelog.rev(v)
674 674 t[k] = v
675 675 except (error.LookupError, ValueError):
676 676 pass
677 677 return t
678 678
679 679 def _findtags(self):
680 680 '''Do the hard work of finding tags. Return a pair of dicts
681 681 (tags, tagtypes) where tags maps tag name to node, and tagtypes
682 682 maps tag name to a string like \'global\' or \'local\'.
683 683 Subclasses or extensions are free to add their own tags, but
684 684 should be aware that the returned dicts will be retained for the
685 685 duration of the localrepo object.'''
686 686
687 687 # XXX what tagtype should subclasses/extensions use? Currently
688 688 # mq and bookmarks add tags, but do not set the tagtype at all.
689 689 # Should each extension invent its own tag type? Should there
690 690 # be one tagtype for all such "virtual" tags? Or is the status
691 691 # quo fine?
692 692
693 693
694 694 # map tag name to (node, hist)
695 695 alltags = tagsmod.findglobaltags(self.ui, self)
696 696 # map tag name to tag type
697 697 tagtypes = dict((tag, 'global') for tag in alltags)
698 698
699 699 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
700 700
701 701 # Build the return dicts. Have to re-encode tag names because
702 702 # the tags module always uses UTF-8 (in order not to lose info
703 703 # writing to the cache), but the rest of Mercurial wants them in
704 704 # local encoding.
705 705 tags = {}
706 706 for (name, (node, hist)) in alltags.iteritems():
707 707 if node != nullid:
708 708 tags[encoding.tolocal(name)] = node
709 709 tags['tip'] = self.changelog.tip()
710 710 tagtypes = dict([(encoding.tolocal(name), value)
711 711 for (name, value) in tagtypes.iteritems()])
712 712 return (tags, tagtypes)
713 713
714 714 def tagtype(self, tagname):
715 715 '''
716 716 return the type of the given tag. result can be:
717 717
718 718 'local' : a local tag
719 719 'global' : a global tag
720 720 None : tag does not exist
721 721 '''
722 722
723 723 return self._tagscache.tagtypes.get(tagname)
724 724
725 725 def tagslist(self):
726 726 '''return a list of tags ordered by revision'''
727 727 if not self._tagscache.tagslist:
728 728 l = []
729 729 for t, n in self.tags().iteritems():
730 730 l.append((self.changelog.rev(n), t, n))
731 731 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
732 732
733 733 return self._tagscache.tagslist
734 734
735 735 def nodetags(self, node):
736 736 '''return the tags associated with a node'''
737 737 if not self._tagscache.nodetagscache:
738 738 nodetagscache = {}
739 739 for t, n in self._tagscache.tags.iteritems():
740 740 nodetagscache.setdefault(n, []).append(t)
741 741 for tags in nodetagscache.itervalues():
742 742 tags.sort()
743 743 self._tagscache.nodetagscache = nodetagscache
744 744 return self._tagscache.nodetagscache.get(node, [])
745 745
746 746 def nodebookmarks(self, node):
747 747 """return the list of bookmarks pointing to the specified node"""
748 748 marks = []
749 749 for bookmark, n in self._bookmarks.iteritems():
750 750 if n == node:
751 751 marks.append(bookmark)
752 752 return sorted(marks)
753 753
754 754 def branchmap(self):
755 755 '''returns a dictionary {branch: [branchheads]} with branchheads
756 756 ordered by increasing revision number'''
757 757 branchmap.updatecache(self)
758 758 return self._branchcaches[self.filtername]
759 759
760 760 @unfilteredmethod
761 761 def revbranchcache(self):
762 762 if not self._revbranchcache:
763 763 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
764 764 return self._revbranchcache
765 765
766 766 def branchtip(self, branch, ignoremissing=False):
767 767 '''return the tip node for a given branch
768 768
769 769 If ignoremissing is True, then this method will not raise an error.
770 770 This is helpful for callers that only expect None for a missing branch
771 771 (e.g. namespace).
772 772
773 773 '''
774 774 try:
775 775 return self.branchmap().branchtip(branch)
776 776 except KeyError:
777 777 if not ignoremissing:
778 778 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
779 779 else:
780 780 pass
781 781
782 782 def lookup(self, key):
783 783 return self[key].node()
784 784
785 785 def lookupbranch(self, key, remote=None):
786 786 repo = remote or self
787 787 if key in repo.branchmap():
788 788 return key
789 789
790 790 repo = (remote and remote.local()) and remote or self
791 791 return repo[key].branch()
792 792
793 793 def known(self, nodes):
794 794 cl = self.changelog
795 795 nm = cl.nodemap
796 796 filtered = cl.filteredrevs
797 797 result = []
798 798 for n in nodes:
799 799 r = nm.get(n)
800 800 resp = not (r is None or r in filtered)
801 801 result.append(resp)
802 802 return result
803 803
804 804 def local(self):
805 805 return self
806 806
807 807 def publishing(self):
808 808 # it's safe (and desirable) to trust the publish flag unconditionally
809 809 # so that we don't finalize changes shared between users via ssh or nfs
810 810 return self.ui.configbool('phases', 'publish', True, untrusted=True)
811 811
812 812 def cancopy(self):
813 813 # so statichttprepo's override of local() works
814 814 if not self.local():
815 815 return False
816 816 if not self.publishing():
817 817 return True
818 818 # if publishing we can't copy if there is filtered content
819 819 return not self.filtered('visible').changelog.filteredrevs
820 820
821 821 def shared(self):
822 822 '''the type of shared repository (None if not shared)'''
823 823 if self.sharedpath != self.path:
824 824 return 'store'
825 825 return None
826 826
827 827 def wjoin(self, f, *insidef):
828 828 return self.vfs.reljoin(self.root, f, *insidef)
829 829
830 830 def file(self, f):
831 831 if f[0] == '/':
832 832 f = f[1:]
833 833 return filelog.filelog(self.svfs, f)
834 834
835 835 def changectx(self, changeid):
836 836 return self[changeid]
837 837
838 838 def setparents(self, p1, p2=nullid):
839 839 self.dirstate.beginparentchange()
840 840 copies = self.dirstate.setparents(p1, p2)
841 841 pctx = self[p1]
842 842 if copies:
843 843 # Adjust copy records, the dirstate cannot do it, it
844 844 # requires access to parents manifests. Preserve them
845 845 # only for entries added to first parent.
846 846 for f in copies:
847 847 if f not in pctx and copies[f] in pctx:
848 848 self.dirstate.copy(copies[f], f)
849 849 if p2 == nullid:
850 850 for f, s in sorted(self.dirstate.copies().items()):
851 851 if f not in pctx and s not in pctx:
852 852 self.dirstate.copy(None, f)
853 853 self.dirstate.endparentchange()
854 854
855 855 def filectx(self, path, changeid=None, fileid=None):
856 856 """changeid can be a changeset revision, node, or tag.
857 857 fileid can be a file revision or node."""
858 858 return context.filectx(self, path, changeid, fileid)
859 859
860 860 def getcwd(self):
861 861 return self.dirstate.getcwd()
862 862
863 863 def pathto(self, f, cwd=None):
864 864 return self.dirstate.pathto(f, cwd)
865 865
866 866 def _loadfilter(self, filter):
867 867 if filter not in self.filterpats:
868 868 l = []
869 869 for pat, cmd in self.ui.configitems(filter):
870 870 if cmd == '!':
871 871 continue
872 872 mf = matchmod.match(self.root, '', [pat])
873 873 fn = None
874 874 params = cmd
875 875 for name, filterfn in self._datafilters.iteritems():
876 876 if cmd.startswith(name):
877 877 fn = filterfn
878 878 params = cmd[len(name):].lstrip()
879 879 break
880 880 if not fn:
881 881 fn = lambda s, c, **kwargs: util.filter(s, c)
882 882 # Wrap old filters not supporting keyword arguments
883 883 if not inspect.getargspec(fn)[2]:
884 884 oldfn = fn
885 885 fn = lambda s, c, **kwargs: oldfn(s, c)
886 886 l.append((mf, fn, params))
887 887 self.filterpats[filter] = l
888 888 return self.filterpats[filter]
889 889
890 890 def _filter(self, filterpats, filename, data):
891 891 for mf, fn, cmd in filterpats:
892 892 if mf(filename):
893 893 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 894 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 895 break
896 896
897 897 return data
898 898
899 899 @unfilteredpropertycache
900 900 def _encodefilterpats(self):
901 901 return self._loadfilter('encode')
902 902
903 903 @unfilteredpropertycache
904 904 def _decodefilterpats(self):
905 905 return self._loadfilter('decode')
906 906
907 907 def adddatafilter(self, name, filter):
908 908 self._datafilters[name] = filter
909 909
910 910 def wread(self, filename):
911 911 if self.wvfs.islink(filename):
912 912 data = self.wvfs.readlink(filename)
913 913 else:
914 914 data = self.wvfs.read(filename)
915 915 return self._filter(self._encodefilterpats, filename, data)
916 916
917 917 def wwrite(self, filename, data, flags, backgroundclose=False):
918 918 """write ``data`` into ``filename`` in the working directory
919 919
920 920 This returns length of written (maybe decoded) data.
921 921 """
922 922 data = self._filter(self._decodefilterpats, filename, data)
923 923 if 'l' in flags:
924 924 self.wvfs.symlink(data, filename)
925 925 else:
926 926 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
927 927 if 'x' in flags:
928 928 self.wvfs.setflags(filename, False, True)
929 929 return len(data)
930 930
931 931 def wwritedata(self, filename, data):
932 932 return self._filter(self._decodefilterpats, filename, data)
933 933
934 934 def currenttransaction(self):
935 935 """return the current transaction or None if non exists"""
936 936 if self._transref:
937 937 tr = self._transref()
938 938 else:
939 939 tr = None
940 940
941 941 if tr and tr.running():
942 942 return tr
943 943 return None
944 944
945 945 def transaction(self, desc, report=None):
946 946 if (self.ui.configbool('devel', 'all-warnings')
947 947 or self.ui.configbool('devel', 'check-locks')):
948 948 if self._currentlock(self._lockref) is None:
949 949 raise error.ProgrammingError('transaction requires locking')
950 950 tr = self.currenttransaction()
951 951 if tr is not None:
952 952 return tr.nest()
953 953
954 954 # abort here if the journal already exists
955 955 if self.svfs.exists("journal"):
956 956 raise error.RepoError(
957 957 _("abandoned transaction found"),
958 958 hint=_("run 'hg recover' to clean up transaction"))
959 959
960 960 idbase = "%.40f#%f" % (random.random(), time.time())
961 961 ha = hex(hashlib.sha1(idbase).digest())
962 962 txnid = 'TXN:' + ha
963 963 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
964 964
965 965 self._writejournal(desc)
966 966 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
967 967 if report:
968 968 rp = report
969 969 else:
970 970 rp = self.ui.warn
971 971 vfsmap = {'plain': self.vfs} # root of .hg/
972 972 # we must avoid cyclic reference between repo and transaction.
973 973 reporef = weakref.ref(self)
974 974 # Code to track tag movement
975 975 #
976 976 # Since tags are all handled as file content, it is actually quite hard
977 977 # to track these movement from a code perspective. So we fallback to a
978 978 # tracking at the repository level. One could envision to track changes
979 979 # to the '.hgtags' file through changegroup apply but that fails to
980 980 # cope with case where transaction expose new heads without changegroup
981 981 # being involved (eg: phase movement).
982 982 #
983 983 # For now, We gate the feature behind a flag since this likely comes
984 984 # with performance impacts. The current code run more often than needed
985 985 # and do not use caches as much as it could. The current focus is on
986 986 # the behavior of the feature so we disable it by default. The flag
987 987 # will be removed when we are happy with the performance impact.
988 988 #
989 989 # Once this feature is no longer experimental move the following
990 990 # documentation to the appropriate help section:
991 991 #
992 992 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
993 993 # tags (new or changed or deleted tags). In addition the details of
994 994 # these changes are made available in a file at:
995 995 # ``REPOROOT/.hg/changes/tags.changes``.
996 996 # Make sure you check for HG_TAG_MOVED before reading that file as it
997 997 # might exist from a previous transaction even if no tag were touched
998 998 # in this one. Changes are recorded in a line base format::
999 999 #
1000 1000 # <action> <hex-node> <tag-name>\n
1001 1001 #
1002 1002 # Actions are defined as follow:
1003 1003 # "-R": tag is removed,
1004 1004 # "+A": tag is added,
1005 1005 # "-M": tag is moved (old value),
1006 1006 # "+M": tag is moved (new value),
1007 1007 tracktags = lambda x: None
1008 1008 # experimental config: experimental.hook-track-tags
1009 1009 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1010 1010 False)
1011 1011 if desc != 'strip' and shouldtracktags:
1012 1012 oldheads = self.changelog.headrevs()
1013 1013 def tracktags(tr2):
1014 1014 repo = reporef()
1015 1015 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1016 1016 newheads = repo.changelog.headrevs()
1017 1017 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1018 1018 # notes: we compare lists here.
1019 1019 # As we do it only once buiding set would not be cheaper
1020 1020 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1021 1021 if changes:
1022 1022 tr2.hookargs['tag_moved'] = '1'
1023 1023 with repo.vfs('changes/tags.changes', 'w',
1024 1024 atomictemp=True) as changesfile:
1025 1025 # note: we do not register the file to the transaction
1026 1026 # because we needs it to still exist on the transaction
1027 1027 # is close (for txnclose hooks)
1028 1028 tagsmod.writediff(changesfile, changes)
1029 1029 def validate(tr2):
1030 1030 """will run pre-closing hooks"""
1031 1031 # XXX the transaction API is a bit lacking here so we take a hacky
1032 1032 # path for now
1033 1033 #
1034 1034 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1035 1035 # dict is copied before these run. In addition we needs the data
1036 1036 # available to in memory hooks too.
1037 1037 #
1038 1038 # Moreover, we also need to make sure this runs before txnclose
1039 1039 # hooks and there is no "pending" mechanism that would execute
1040 1040 # logic only if hooks are about to run.
1041 1041 #
1042 1042 # Fixing this limitation of the transaction is also needed to track
1043 1043 # other families of changes (bookmarks, phases, obsolescence).
1044 1044 #
1045 1045 # This will have to be fixed before we remove the experimental
1046 1046 # gating.
1047 1047 tracktags(tr2)
1048 1048 reporef().hook('pretxnclose', throw=True,
1049 1049 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1050 1050 def releasefn(tr, success):
1051 1051 repo = reporef()
1052 1052 if success:
1053 1053 # this should be explicitly invoked here, because
1054 1054 # in-memory changes aren't written out at closing
1055 1055 # transaction, if tr.addfilegenerator (via
1056 1056 # dirstate.write or so) isn't invoked while
1057 1057 # transaction running
1058 1058 repo.dirstate.write(None)
1059 1059 else:
1060 1060 # discard all changes (including ones already written
1061 1061 # out) in this transaction
1062 1062 repo.dirstate.restorebackup(None, prefix='journal.')
1063 1063
1064 1064 repo.invalidate(clearfilecache=True)
1065 1065
1066 1066 tr = transaction.transaction(rp, self.svfs, vfsmap,
1067 1067 "journal",
1068 1068 "undo",
1069 1069 aftertrans(renames),
1070 1070 self.store.createmode,
1071 1071 validator=validate,
1072 1072 releasefn=releasefn)
1073 1073 tr.changes['revs'] = set()
1074 1074
1075 1075 tr.hookargs['txnid'] = txnid
1076 1076 # note: writing the fncache only during finalize mean that the file is
1077 1077 # outdated when running hooks. As fncache is used for streaming clone,
1078 1078 # this is not expected to break anything that happen during the hooks.
1079 1079 tr.addfinalize('flush-fncache', self.store.write)
1080 1080 def txnclosehook(tr2):
1081 1081 """To be run if transaction is successful, will schedule a hook run
1082 1082 """
1083 1083 # Don't reference tr2 in hook() so we don't hold a reference.
1084 1084 # This reduces memory consumption when there are multiple
1085 1085 # transactions per lock. This can likely go away if issue5045
1086 1086 # fixes the function accumulation.
1087 1087 hookargs = tr2.hookargs
1088 1088
1089 1089 def hook():
1090 1090 reporef().hook('txnclose', throw=False, txnname=desc,
1091 1091 **pycompat.strkwargs(hookargs))
1092 1092 reporef()._afterlock(hook)
1093 1093 tr.addfinalize('txnclose-hook', txnclosehook)
1094 1094 def warmscache(tr2):
1095 1095 repo = reporef()
1096 1096 repo.updatecaches(tr2)
1097 1097 tr.addpostclose('warms-cache', warmscache)
1098 1098 def txnaborthook(tr2):
1099 1099 """To be run if transaction is aborted
1100 1100 """
1101 1101 reporef().hook('txnabort', throw=False, txnname=desc,
1102 1102 **tr2.hookargs)
1103 1103 tr.addabort('txnabort-hook', txnaborthook)
1104 1104 # avoid eager cache invalidation. in-memory data should be identical
1105 1105 # to stored data if transaction has no error.
1106 1106 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1107 1107 self._transref = weakref.ref(tr)
1108 1108 return tr
1109 1109
1110 1110 def _journalfiles(self):
1111 1111 return ((self.svfs, 'journal'),
1112 1112 (self.vfs, 'journal.dirstate'),
1113 1113 (self.vfs, 'journal.branch'),
1114 1114 (self.vfs, 'journal.desc'),
1115 1115 (self.vfs, 'journal.bookmarks'),
1116 1116 (self.svfs, 'journal.phaseroots'))
1117 1117
1118 1118 def undofiles(self):
1119 1119 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1120 1120
1121 1121 def _writejournal(self, desc):
1122 1122 self.dirstate.savebackup(None, prefix='journal.')
1123 1123 self.vfs.write("journal.branch",
1124 1124 encoding.fromlocal(self.dirstate.branch()))
1125 1125 self.vfs.write("journal.desc",
1126 1126 "%d\n%s\n" % (len(self), desc))
1127 1127 self.vfs.write("journal.bookmarks",
1128 1128 self.vfs.tryread("bookmarks"))
1129 1129 self.svfs.write("journal.phaseroots",
1130 1130 self.svfs.tryread("phaseroots"))
1131 1131
1132 1132 def recover(self):
1133 1133 with self.lock():
1134 1134 if self.svfs.exists("journal"):
1135 1135 self.ui.status(_("rolling back interrupted transaction\n"))
1136 1136 vfsmap = {'': self.svfs,
1137 1137 'plain': self.vfs,}
1138 1138 transaction.rollback(self.svfs, vfsmap, "journal",
1139 1139 self.ui.warn)
1140 1140 self.invalidate()
1141 1141 return True
1142 1142 else:
1143 1143 self.ui.warn(_("no interrupted transaction available\n"))
1144 1144 return False
1145 1145
1146 1146 def rollback(self, dryrun=False, force=False):
1147 1147 wlock = lock = dsguard = None
1148 1148 try:
1149 1149 wlock = self.wlock()
1150 1150 lock = self.lock()
1151 1151 if self.svfs.exists("undo"):
1152 1152 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1153 1153
1154 1154 return self._rollback(dryrun, force, dsguard)
1155 1155 else:
1156 1156 self.ui.warn(_("no rollback information available\n"))
1157 1157 return 1
1158 1158 finally:
1159 1159 release(dsguard, lock, wlock)
1160 1160
1161 1161 @unfilteredmethod # Until we get smarter cache management
1162 1162 def _rollback(self, dryrun, force, dsguard):
1163 1163 ui = self.ui
1164 1164 try:
1165 1165 args = self.vfs.read('undo.desc').splitlines()
1166 1166 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1167 1167 if len(args) >= 3:
1168 1168 detail = args[2]
1169 1169 oldtip = oldlen - 1
1170 1170
1171 1171 if detail and ui.verbose:
1172 1172 msg = (_('repository tip rolled back to revision %s'
1173 1173 ' (undo %s: %s)\n')
1174 1174 % (oldtip, desc, detail))
1175 1175 else:
1176 1176 msg = (_('repository tip rolled back to revision %s'
1177 1177 ' (undo %s)\n')
1178 1178 % (oldtip, desc))
1179 1179 except IOError:
1180 1180 msg = _('rolling back unknown transaction\n')
1181 1181 desc = None
1182 1182
1183 1183 if not force and self['.'] != self['tip'] and desc == 'commit':
1184 1184 raise error.Abort(
1185 1185 _('rollback of last commit while not checked out '
1186 1186 'may lose data'), hint=_('use -f to force'))
1187 1187
1188 1188 ui.status(msg)
1189 1189 if dryrun:
1190 1190 return 0
1191 1191
1192 1192 parents = self.dirstate.parents()
1193 1193 self.destroying()
1194 1194 vfsmap = {'plain': self.vfs, '': self.svfs}
1195 1195 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1196 1196 if self.vfs.exists('undo.bookmarks'):
1197 1197 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1198 1198 if self.svfs.exists('undo.phaseroots'):
1199 1199 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1200 1200 self.invalidate()
1201 1201
1202 1202 parentgone = (parents[0] not in self.changelog.nodemap or
1203 1203 parents[1] not in self.changelog.nodemap)
1204 1204 if parentgone:
1205 1205 # prevent dirstateguard from overwriting already restored one
1206 1206 dsguard.close()
1207 1207
1208 1208 self.dirstate.restorebackup(None, prefix='undo.')
1209 1209 try:
1210 1210 branch = self.vfs.read('undo.branch')
1211 1211 self.dirstate.setbranch(encoding.tolocal(branch))
1212 1212 except IOError:
1213 1213 ui.warn(_('named branch could not be reset: '
1214 1214 'current branch is still \'%s\'\n')
1215 1215 % self.dirstate.branch())
1216 1216
1217 1217 parents = tuple([p.rev() for p in self[None].parents()])
1218 1218 if len(parents) > 1:
1219 1219 ui.status(_('working directory now based on '
1220 1220 'revisions %d and %d\n') % parents)
1221 1221 else:
1222 1222 ui.status(_('working directory now based on '
1223 1223 'revision %d\n') % parents)
1224 1224 mergemod.mergestate.clean(self, self['.'].node())
1225 1225
1226 1226 # TODO: if we know which new heads may result from this rollback, pass
1227 1227 # them to destroy(), which will prevent the branchhead cache from being
1228 1228 # invalidated.
1229 1229 self.destroyed()
1230 1230 return 0
1231 1231
1232 1232 @unfilteredmethod
1233 1233 def updatecaches(self, tr=None):
1234 1234 """warm appropriate caches
1235 1235
1236 1236 If this function is called after a transaction closed. The transaction
1237 1237 will be available in the 'tr' argument. This can be used to selectively
1238 1238 update caches relevant to the changes in that transaction.
1239 1239 """
1240 1240 if tr is not None and tr.hookargs.get('source') == 'strip':
1241 1241 # During strip, many caches are invalid but
1242 1242 # later call to `destroyed` will refresh them.
1243 1243 return
1244 1244
1245 1245 if tr is None or tr.changes['revs']:
1246 1246 # updating the unfiltered branchmap should refresh all the others,
1247 1247 self.ui.debug('updating the branch cache\n')
1248 1248 branchmap.updatecache(self.filtered('served'))
1249 1249
1250 1250 def invalidatecaches(self):
1251 1251
1252 1252 if '_tagscache' in vars(self):
1253 1253 # can't use delattr on proxy
1254 1254 del self.__dict__['_tagscache']
1255 1255
1256 1256 self.unfiltered()._branchcaches.clear()
1257 1257 self.invalidatevolatilesets()
1258 1258
1259 1259 def invalidatevolatilesets(self):
1260 1260 self.filteredrevcache.clear()
1261 1261 obsolete.clearobscaches(self)
1262 1262
1263 1263 def invalidatedirstate(self):
1264 1264 '''Invalidates the dirstate, causing the next call to dirstate
1265 1265 to check if it was modified since the last time it was read,
1266 1266 rereading it if it has.
1267 1267
1268 1268 This is different to dirstate.invalidate() that it doesn't always
1269 1269 rereads the dirstate. Use dirstate.invalidate() if you want to
1270 1270 explicitly read the dirstate again (i.e. restoring it to a previous
1271 1271 known good state).'''
1272 1272 if hasunfilteredcache(self, 'dirstate'):
1273 1273 for k in self.dirstate._filecache:
1274 1274 try:
1275 1275 delattr(self.dirstate, k)
1276 1276 except AttributeError:
1277 1277 pass
1278 1278 delattr(self.unfiltered(), 'dirstate')
1279 1279
1280 1280 def invalidate(self, clearfilecache=False):
1281 1281 '''Invalidates both store and non-store parts other than dirstate
1282 1282
1283 1283 If a transaction is running, invalidation of store is omitted,
1284 1284 because discarding in-memory changes might cause inconsistency
1285 1285 (e.g. incomplete fncache causes unintentional failure, but
1286 1286 redundant one doesn't).
1287 1287 '''
1288 1288 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1289 1289 for k in list(self._filecache.keys()):
1290 1290 # dirstate is invalidated separately in invalidatedirstate()
1291 1291 if k == 'dirstate':
1292 1292 continue
1293 1293
1294 1294 if clearfilecache:
1295 1295 del self._filecache[k]
1296 1296 try:
1297 1297 delattr(unfiltered, k)
1298 1298 except AttributeError:
1299 1299 pass
1300 1300 self.invalidatecaches()
1301 1301 if not self.currenttransaction():
1302 1302 # TODO: Changing contents of store outside transaction
1303 1303 # causes inconsistency. We should make in-memory store
1304 1304 # changes detectable, and abort if changed.
1305 1305 self.store.invalidatecaches()
1306 1306
1307 1307 def invalidateall(self):
1308 1308 '''Fully invalidates both store and non-store parts, causing the
1309 1309 subsequent operation to reread any outside changes.'''
1310 1310 # extension should hook this to invalidate its caches
1311 1311 self.invalidate()
1312 1312 self.invalidatedirstate()
1313 1313
1314 1314 @unfilteredmethod
1315 1315 def _refreshfilecachestats(self, tr):
1316 1316 """Reload stats of cached files so that they are flagged as valid"""
1317 1317 for k, ce in self._filecache.items():
1318 1318 if k == 'dirstate' or k not in self.__dict__:
1319 1319 continue
1320 1320 ce.refresh()
1321 1321
1322 1322 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1323 1323 inheritchecker=None, parentenvvar=None):
1324 1324 parentlock = None
1325 1325 # the contents of parentenvvar are used by the underlying lock to
1326 1326 # determine whether it can be inherited
1327 1327 if parentenvvar is not None:
1328 1328 parentlock = encoding.environ.get(parentenvvar)
1329 1329 try:
1330 1330 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1331 1331 acquirefn=acquirefn, desc=desc,
1332 1332 inheritchecker=inheritchecker,
1333 1333 parentlock=parentlock)
1334 1334 except error.LockHeld as inst:
1335 1335 if not wait:
1336 1336 raise
1337 1337 # show more details for new-style locks
1338 1338 if ':' in inst.locker:
1339 1339 host, pid = inst.locker.split(":", 1)
1340 1340 self.ui.warn(
1341 1341 _("waiting for lock on %s held by process %r "
1342 1342 "on host %r\n") % (desc, pid, host))
1343 1343 else:
1344 1344 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1345 1345 (desc, inst.locker))
1346 1346 # default to 600 seconds timeout
1347 1347 l = lockmod.lock(vfs, lockname,
1348 1348 int(self.ui.config("ui", "timeout", "600")),
1349 1349 releasefn=releasefn, acquirefn=acquirefn,
1350 1350 desc=desc)
1351 1351 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1352 1352 return l
1353 1353
1354 1354 def _afterlock(self, callback):
1355 1355 """add a callback to be run when the repository is fully unlocked
1356 1356
1357 1357 The callback will be executed when the outermost lock is released
1358 1358 (with wlock being higher level than 'lock')."""
1359 1359 for ref in (self._wlockref, self._lockref):
1360 1360 l = ref and ref()
1361 1361 if l and l.held:
1362 1362 l.postrelease.append(callback)
1363 1363 break
1364 1364 else: # no lock have been found.
1365 1365 callback()
1366 1366
1367 1367 def lock(self, wait=True):
1368 1368 '''Lock the repository store (.hg/store) and return a weak reference
1369 1369 to the lock. Use this before modifying the store (e.g. committing or
1370 1370 stripping). If you are opening a transaction, get a lock as well.)
1371 1371
1372 1372 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1373 1373 'wlock' first to avoid a dead-lock hazard.'''
1374 1374 l = self._currentlock(self._lockref)
1375 1375 if l is not None:
1376 1376 l.lock()
1377 1377 return l
1378 1378
1379 1379 l = self._lock(self.svfs, "lock", wait, None,
1380 1380 self.invalidate, _('repository %s') % self.origroot)
1381 1381 self._lockref = weakref.ref(l)
1382 1382 return l
1383 1383
1384 1384 def _wlockchecktransaction(self):
1385 1385 if self.currenttransaction() is not None:
1386 1386 raise error.LockInheritanceContractViolation(
1387 1387 'wlock cannot be inherited in the middle of a transaction')
1388 1388
1389 1389 def wlock(self, wait=True):
1390 1390 '''Lock the non-store parts of the repository (everything under
1391 1391 .hg except .hg/store) and return a weak reference to the lock.
1392 1392
1393 1393 Use this before modifying files in .hg.
1394 1394
1395 1395 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1396 1396 'wlock' first to avoid a dead-lock hazard.'''
1397 1397 l = self._wlockref and self._wlockref()
1398 1398 if l is not None and l.held:
1399 1399 l.lock()
1400 1400 return l
1401 1401
1402 1402 # We do not need to check for non-waiting lock acquisition. Such
1403 1403 # acquisition would not cause dead-lock as they would just fail.
1404 1404 if wait and (self.ui.configbool('devel', 'all-warnings')
1405 1405 or self.ui.configbool('devel', 'check-locks')):
1406 1406 if self._currentlock(self._lockref) is not None:
1407 1407 self.ui.develwarn('"wlock" acquired after "lock"')
1408 1408
1409 1409 def unlock():
1410 1410 if self.dirstate.pendingparentchange():
1411 1411 self.dirstate.invalidate()
1412 1412 else:
1413 1413 self.dirstate.write(None)
1414 1414
1415 1415 self._filecache['dirstate'].refresh()
1416 1416
1417 1417 l = self._lock(self.vfs, "wlock", wait, unlock,
1418 1418 self.invalidatedirstate, _('working directory of %s') %
1419 1419 self.origroot,
1420 1420 inheritchecker=self._wlockchecktransaction,
1421 1421 parentenvvar='HG_WLOCK_LOCKER')
1422 1422 self._wlockref = weakref.ref(l)
1423 1423 return l
1424 1424
1425 1425 def _currentlock(self, lockref):
1426 1426 """Returns the lock if it's held, or None if it's not."""
1427 1427 if lockref is None:
1428 1428 return None
1429 1429 l = lockref()
1430 1430 if l is None or not l.held:
1431 1431 return None
1432 1432 return l
1433 1433
1434 1434 def currentwlock(self):
1435 1435 """Returns the wlock if it's held, or None if it's not."""
1436 1436 return self._currentlock(self._wlockref)
1437 1437
1438 1438 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1439 1439 """
1440 1440 commit an individual file as part of a larger transaction
1441 1441 """
1442 1442
1443 1443 fname = fctx.path()
1444 1444 fparent1 = manifest1.get(fname, nullid)
1445 1445 fparent2 = manifest2.get(fname, nullid)
1446 1446 if isinstance(fctx, context.filectx):
1447 1447 node = fctx.filenode()
1448 1448 if node in [fparent1, fparent2]:
1449 1449 self.ui.debug('reusing %s filelog entry\n' % fname)
1450 1450 if manifest1.flags(fname) != fctx.flags():
1451 1451 changelist.append(fname)
1452 1452 return node
1453 1453
1454 1454 flog = self.file(fname)
1455 1455 meta = {}
1456 1456 copy = fctx.renamed()
1457 1457 if copy and copy[0] != fname:
1458 1458 # Mark the new revision of this file as a copy of another
1459 1459 # file. This copy data will effectively act as a parent
1460 1460 # of this new revision. If this is a merge, the first
1461 1461 # parent will be the nullid (meaning "look up the copy data")
1462 1462 # and the second one will be the other parent. For example:
1463 1463 #
1464 1464 # 0 --- 1 --- 3 rev1 changes file foo
1465 1465 # \ / rev2 renames foo to bar and changes it
1466 1466 # \- 2 -/ rev3 should have bar with all changes and
1467 1467 # should record that bar descends from
1468 1468 # bar in rev2 and foo in rev1
1469 1469 #
1470 1470 # this allows this merge to succeed:
1471 1471 #
1472 1472 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1473 1473 # \ / merging rev3 and rev4 should use bar@rev2
1474 1474 # \- 2 --- 4 as the merge base
1475 1475 #
1476 1476
1477 1477 cfname = copy[0]
1478 1478 crev = manifest1.get(cfname)
1479 1479 newfparent = fparent2
1480 1480
1481 1481 if manifest2: # branch merge
1482 1482 if fparent2 == nullid or crev is None: # copied on remote side
1483 1483 if cfname in manifest2:
1484 1484 crev = manifest2[cfname]
1485 1485 newfparent = fparent1
1486 1486
1487 1487 # Here, we used to search backwards through history to try to find
1488 1488 # where the file copy came from if the source of a copy was not in
1489 1489 # the parent directory. However, this doesn't actually make sense to
1490 1490 # do (what does a copy from something not in your working copy even
1491 1491 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1492 1492 # the user that copy information was dropped, so if they didn't
1493 1493 # expect this outcome it can be fixed, but this is the correct
1494 1494 # behavior in this circumstance.
1495 1495
1496 1496 if crev:
1497 1497 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1498 1498 meta["copy"] = cfname
1499 1499 meta["copyrev"] = hex(crev)
1500 1500 fparent1, fparent2 = nullid, newfparent
1501 1501 else:
1502 1502 self.ui.warn(_("warning: can't find ancestor for '%s' "
1503 1503 "copied from '%s'!\n") % (fname, cfname))
1504 1504
1505 1505 elif fparent1 == nullid:
1506 1506 fparent1, fparent2 = fparent2, nullid
1507 1507 elif fparent2 != nullid:
1508 1508 # is one parent an ancestor of the other?
1509 1509 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1510 1510 if fparent1 in fparentancestors:
1511 1511 fparent1, fparent2 = fparent2, nullid
1512 1512 elif fparent2 in fparentancestors:
1513 1513 fparent2 = nullid
1514 1514
1515 1515 # is the file changed?
1516 1516 text = fctx.data()
1517 1517 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1518 1518 changelist.append(fname)
1519 1519 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1520 1520 # are just the flags changed during merge?
1521 1521 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1522 1522 changelist.append(fname)
1523 1523
1524 1524 return fparent1
1525 1525
1526 1526 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1527 1527 """check for commit arguments that aren't committable"""
1528 1528 if match.isexact() or match.prefix():
1529 1529 matched = set(status.modified + status.added + status.removed)
1530 1530
1531 1531 for f in match.files():
1532 1532 f = self.dirstate.normalize(f)
1533 1533 if f == '.' or f in matched or f in wctx.substate:
1534 1534 continue
1535 1535 if f in status.deleted:
1536 1536 fail(f, _('file not found!'))
1537 1537 if f in vdirs: # visited directory
1538 1538 d = f + '/'
1539 1539 for mf in matched:
1540 1540 if mf.startswith(d):
1541 1541 break
1542 1542 else:
1543 1543 fail(f, _("no match under directory!"))
1544 1544 elif f not in self.dirstate:
1545 1545 fail(f, _("file not tracked!"))
1546 1546
1547 1547 @unfilteredmethod
1548 1548 def commit(self, text="", user=None, date=None, match=None, force=False,
1549 1549 editor=False, extra=None):
1550 1550 """Add a new revision to current repository.
1551 1551
1552 1552 Revision information is gathered from the working directory,
1553 1553 match can be used to filter the committed files. If editor is
1554 1554 supplied, it is called to get a commit message.
1555 1555 """
1556 1556 if extra is None:
1557 1557 extra = {}
1558 1558
1559 1559 def fail(f, msg):
1560 1560 raise error.Abort('%s: %s' % (f, msg))
1561 1561
1562 1562 if not match:
1563 1563 match = matchmod.always(self.root, '')
1564 1564
1565 1565 if not force:
1566 1566 vdirs = []
1567 1567 match.explicitdir = vdirs.append
1568 1568 match.bad = fail
1569 1569
1570 1570 wlock = lock = tr = None
1571 1571 try:
1572 1572 wlock = self.wlock()
1573 1573 lock = self.lock() # for recent changelog (see issue4368)
1574 1574
1575 1575 wctx = self[None]
1576 1576 merge = len(wctx.parents()) > 1
1577 1577
1578 if not force and merge and match.ispartial():
1578 if not force and merge and not match.always():
1579 1579 raise error.Abort(_('cannot partially commit a merge '
1580 1580 '(do not specify files or patterns)'))
1581 1581
1582 1582 status = self.status(match=match, clean=force)
1583 1583 if force:
1584 1584 status.modified.extend(status.clean) # mq may commit clean files
1585 1585
1586 1586 # check subrepos
1587 1587 subs = []
1588 1588 commitsubs = set()
1589 1589 newstate = wctx.substate.copy()
1590 1590 # only manage subrepos and .hgsubstate if .hgsub is present
1591 1591 if '.hgsub' in wctx:
1592 1592 # we'll decide whether to track this ourselves, thanks
1593 1593 for c in status.modified, status.added, status.removed:
1594 1594 if '.hgsubstate' in c:
1595 1595 c.remove('.hgsubstate')
1596 1596
1597 1597 # compare current state to last committed state
1598 1598 # build new substate based on last committed state
1599 1599 oldstate = wctx.p1().substate
1600 1600 for s in sorted(newstate.keys()):
1601 1601 if not match(s):
1602 1602 # ignore working copy, use old state if present
1603 1603 if s in oldstate:
1604 1604 newstate[s] = oldstate[s]
1605 1605 continue
1606 1606 if not force:
1607 1607 raise error.Abort(
1608 1608 _("commit with new subrepo %s excluded") % s)
1609 1609 dirtyreason = wctx.sub(s).dirtyreason(True)
1610 1610 if dirtyreason:
1611 1611 if not self.ui.configbool('ui', 'commitsubrepos'):
1612 1612 raise error.Abort(dirtyreason,
1613 1613 hint=_("use --subrepos for recursive commit"))
1614 1614 subs.append(s)
1615 1615 commitsubs.add(s)
1616 1616 else:
1617 1617 bs = wctx.sub(s).basestate()
1618 1618 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1619 1619 if oldstate.get(s, (None, None, None))[1] != bs:
1620 1620 subs.append(s)
1621 1621
1622 1622 # check for removed subrepos
1623 1623 for p in wctx.parents():
1624 1624 r = [s for s in p.substate if s not in newstate]
1625 1625 subs += [s for s in r if match(s)]
1626 1626 if subs:
1627 1627 if (not match('.hgsub') and
1628 1628 '.hgsub' in (wctx.modified() + wctx.added())):
1629 1629 raise error.Abort(
1630 1630 _("can't commit subrepos without .hgsub"))
1631 1631 status.modified.insert(0, '.hgsubstate')
1632 1632
1633 1633 elif '.hgsub' in status.removed:
1634 1634 # clean up .hgsubstate when .hgsub is removed
1635 1635 if ('.hgsubstate' in wctx and
1636 1636 '.hgsubstate' not in (status.modified + status.added +
1637 1637 status.removed)):
1638 1638 status.removed.insert(0, '.hgsubstate')
1639 1639
1640 1640 # make sure all explicit patterns are matched
1641 1641 if not force:
1642 1642 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1643 1643
1644 1644 cctx = context.workingcommitctx(self, status,
1645 1645 text, user, date, extra)
1646 1646
1647 1647 # internal config: ui.allowemptycommit
1648 1648 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1649 1649 or extra.get('close') or merge or cctx.files()
1650 1650 or self.ui.configbool('ui', 'allowemptycommit'))
1651 1651 if not allowemptycommit:
1652 1652 return None
1653 1653
1654 1654 if merge and cctx.deleted():
1655 1655 raise error.Abort(_("cannot commit merge with missing files"))
1656 1656
1657 1657 ms = mergemod.mergestate.read(self)
1658 1658 mergeutil.checkunresolved(ms)
1659 1659
1660 1660 if editor:
1661 1661 cctx._text = editor(self, cctx, subs)
1662 1662 edited = (text != cctx._text)
1663 1663
1664 1664 # Save commit message in case this transaction gets rolled back
1665 1665 # (e.g. by a pretxncommit hook). Leave the content alone on
1666 1666 # the assumption that the user will use the same editor again.
1667 1667 msgfn = self.savecommitmessage(cctx._text)
1668 1668
1669 1669 # commit subs and write new state
1670 1670 if subs:
1671 1671 for s in sorted(commitsubs):
1672 1672 sub = wctx.sub(s)
1673 1673 self.ui.status(_('committing subrepository %s\n') %
1674 1674 subrepo.subrelpath(sub))
1675 1675 sr = sub.commit(cctx._text, user, date)
1676 1676 newstate[s] = (newstate[s][0], sr)
1677 1677 subrepo.writestate(self, newstate)
1678 1678
1679 1679 p1, p2 = self.dirstate.parents()
1680 1680 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1681 1681 try:
1682 1682 self.hook("precommit", throw=True, parent1=hookp1,
1683 1683 parent2=hookp2)
1684 1684 tr = self.transaction('commit')
1685 1685 ret = self.commitctx(cctx, True)
1686 1686 except: # re-raises
1687 1687 if edited:
1688 1688 self.ui.write(
1689 1689 _('note: commit message saved in %s\n') % msgfn)
1690 1690 raise
1691 1691 # update bookmarks, dirstate and mergestate
1692 1692 bookmarks.update(self, [p1, p2], ret)
1693 1693 cctx.markcommitted(ret)
1694 1694 ms.reset()
1695 1695 tr.close()
1696 1696
1697 1697 finally:
1698 1698 lockmod.release(tr, lock, wlock)
1699 1699
1700 1700 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1701 1701 # hack for command that use a temporary commit (eg: histedit)
1702 1702 # temporary commit got stripped before hook release
1703 1703 if self.changelog.hasnode(ret):
1704 1704 self.hook("commit", node=node, parent1=parent1,
1705 1705 parent2=parent2)
1706 1706 self._afterlock(commithook)
1707 1707 return ret
1708 1708
1709 1709 @unfilteredmethod
1710 1710 def commitctx(self, ctx, error=False):
1711 1711 """Add a new revision to current repository.
1712 1712 Revision information is passed via the context argument.
1713 1713 """
1714 1714
1715 1715 tr = None
1716 1716 p1, p2 = ctx.p1(), ctx.p2()
1717 1717 user = ctx.user()
1718 1718
1719 1719 lock = self.lock()
1720 1720 try:
1721 1721 tr = self.transaction("commit")
1722 1722 trp = weakref.proxy(tr)
1723 1723
1724 1724 if ctx.manifestnode():
1725 1725 # reuse an existing manifest revision
1726 1726 mn = ctx.manifestnode()
1727 1727 files = ctx.files()
1728 1728 elif ctx.files():
1729 1729 m1ctx = p1.manifestctx()
1730 1730 m2ctx = p2.manifestctx()
1731 1731 mctx = m1ctx.copy()
1732 1732
1733 1733 m = mctx.read()
1734 1734 m1 = m1ctx.read()
1735 1735 m2 = m2ctx.read()
1736 1736
1737 1737 # check in files
1738 1738 added = []
1739 1739 changed = []
1740 1740 removed = list(ctx.removed())
1741 1741 linkrev = len(self)
1742 1742 self.ui.note(_("committing files:\n"))
1743 1743 for f in sorted(ctx.modified() + ctx.added()):
1744 1744 self.ui.note(f + "\n")
1745 1745 try:
1746 1746 fctx = ctx[f]
1747 1747 if fctx is None:
1748 1748 removed.append(f)
1749 1749 else:
1750 1750 added.append(f)
1751 1751 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1752 1752 trp, changed)
1753 1753 m.setflag(f, fctx.flags())
1754 1754 except OSError as inst:
1755 1755 self.ui.warn(_("trouble committing %s!\n") % f)
1756 1756 raise
1757 1757 except IOError as inst:
1758 1758 errcode = getattr(inst, 'errno', errno.ENOENT)
1759 1759 if error or errcode and errcode != errno.ENOENT:
1760 1760 self.ui.warn(_("trouble committing %s!\n") % f)
1761 1761 raise
1762 1762
1763 1763 # update manifest
1764 1764 self.ui.note(_("committing manifest\n"))
1765 1765 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1766 1766 drop = [f for f in removed if f in m]
1767 1767 for f in drop:
1768 1768 del m[f]
1769 1769 mn = mctx.write(trp, linkrev,
1770 1770 p1.manifestnode(), p2.manifestnode(),
1771 1771 added, drop)
1772 1772 files = changed + removed
1773 1773 else:
1774 1774 mn = p1.manifestnode()
1775 1775 files = []
1776 1776
1777 1777 # update changelog
1778 1778 self.ui.note(_("committing changelog\n"))
1779 1779 self.changelog.delayupdate(tr)
1780 1780 n = self.changelog.add(mn, files, ctx.description(),
1781 1781 trp, p1.node(), p2.node(),
1782 1782 user, ctx.date(), ctx.extra().copy())
1783 1783 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1784 1784 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1785 1785 parent2=xp2)
1786 1786 # set the new commit is proper phase
1787 1787 targetphase = subrepo.newcommitphase(self.ui, ctx)
1788 1788 if targetphase:
1789 1789 # retract boundary do not alter parent changeset.
1790 1790 # if a parent have higher the resulting phase will
1791 1791 # be compliant anyway
1792 1792 #
1793 1793 # if minimal phase was 0 we don't need to retract anything
1794 1794 phases.retractboundary(self, tr, targetphase, [n])
1795 1795 tr.close()
1796 1796 return n
1797 1797 finally:
1798 1798 if tr:
1799 1799 tr.release()
1800 1800 lock.release()
1801 1801
1802 1802 @unfilteredmethod
1803 1803 def destroying(self):
1804 1804 '''Inform the repository that nodes are about to be destroyed.
1805 1805 Intended for use by strip and rollback, so there's a common
1806 1806 place for anything that has to be done before destroying history.
1807 1807
1808 1808 This is mostly useful for saving state that is in memory and waiting
1809 1809 to be flushed when the current lock is released. Because a call to
1810 1810 destroyed is imminent, the repo will be invalidated causing those
1811 1811 changes to stay in memory (waiting for the next unlock), or vanish
1812 1812 completely.
1813 1813 '''
1814 1814 # When using the same lock to commit and strip, the phasecache is left
1815 1815 # dirty after committing. Then when we strip, the repo is invalidated,
1816 1816 # causing those changes to disappear.
1817 1817 if '_phasecache' in vars(self):
1818 1818 self._phasecache.write()
1819 1819
1820 1820 @unfilteredmethod
1821 1821 def destroyed(self):
1822 1822 '''Inform the repository that nodes have been destroyed.
1823 1823 Intended for use by strip and rollback, so there's a common
1824 1824 place for anything that has to be done after destroying history.
1825 1825 '''
1826 1826 # When one tries to:
1827 1827 # 1) destroy nodes thus calling this method (e.g. strip)
1828 1828 # 2) use phasecache somewhere (e.g. commit)
1829 1829 #
1830 1830 # then 2) will fail because the phasecache contains nodes that were
1831 1831 # removed. We can either remove phasecache from the filecache,
1832 1832 # causing it to reload next time it is accessed, or simply filter
1833 1833 # the removed nodes now and write the updated cache.
1834 1834 self._phasecache.filterunknown(self)
1835 1835 self._phasecache.write()
1836 1836
1837 1837 # refresh all repository caches
1838 1838 self.updatecaches()
1839 1839
1840 1840 # Ensure the persistent tag cache is updated. Doing it now
1841 1841 # means that the tag cache only has to worry about destroyed
1842 1842 # heads immediately after a strip/rollback. That in turn
1843 1843 # guarantees that "cachetip == currenttip" (comparing both rev
1844 1844 # and node) always means no nodes have been added or destroyed.
1845 1845
1846 1846 # XXX this is suboptimal when qrefresh'ing: we strip the current
1847 1847 # head, refresh the tag cache, then immediately add a new head.
1848 1848 # But I think doing it this way is necessary for the "instant
1849 1849 # tag cache retrieval" case to work.
1850 1850 self.invalidate()
1851 1851
1852 1852 def walk(self, match, node=None):
1853 1853 '''
1854 1854 walk recursively through the directory tree or a given
1855 1855 changeset, finding all files matched by the match
1856 1856 function
1857 1857 '''
1858 1858 return self[node].walk(match)
1859 1859
1860 1860 def status(self, node1='.', node2=None, match=None,
1861 1861 ignored=False, clean=False, unknown=False,
1862 1862 listsubrepos=False):
1863 1863 '''a convenience method that calls node1.status(node2)'''
1864 1864 return self[node1].status(node2, match, ignored, clean, unknown,
1865 1865 listsubrepos)
1866 1866
1867 1867 def heads(self, start=None):
1868 1868 if start is None:
1869 1869 cl = self.changelog
1870 1870 headrevs = reversed(cl.headrevs())
1871 1871 return [cl.node(rev) for rev in headrevs]
1872 1872
1873 1873 heads = self.changelog.heads(start)
1874 1874 # sort the output in rev descending order
1875 1875 return sorted(heads, key=self.changelog.rev, reverse=True)
1876 1876
1877 1877 def branchheads(self, branch=None, start=None, closed=False):
1878 1878 '''return a (possibly filtered) list of heads for the given branch
1879 1879
1880 1880 Heads are returned in topological order, from newest to oldest.
1881 1881 If branch is None, use the dirstate branch.
1882 1882 If start is not None, return only heads reachable from start.
1883 1883 If closed is True, return heads that are marked as closed as well.
1884 1884 '''
1885 1885 if branch is None:
1886 1886 branch = self[None].branch()
1887 1887 branches = self.branchmap()
1888 1888 if branch not in branches:
1889 1889 return []
1890 1890 # the cache returns heads ordered lowest to highest
1891 1891 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1892 1892 if start is not None:
1893 1893 # filter out the heads that cannot be reached from startrev
1894 1894 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1895 1895 bheads = [h for h in bheads if h in fbheads]
1896 1896 return bheads
1897 1897
1898 1898 def branches(self, nodes):
1899 1899 if not nodes:
1900 1900 nodes = [self.changelog.tip()]
1901 1901 b = []
1902 1902 for n in nodes:
1903 1903 t = n
1904 1904 while True:
1905 1905 p = self.changelog.parents(n)
1906 1906 if p[1] != nullid or p[0] == nullid:
1907 1907 b.append((t, n, p[0], p[1]))
1908 1908 break
1909 1909 n = p[0]
1910 1910 return b
1911 1911
1912 1912 def between(self, pairs):
1913 1913 r = []
1914 1914
1915 1915 for top, bottom in pairs:
1916 1916 n, l, i = top, [], 0
1917 1917 f = 1
1918 1918
1919 1919 while n != bottom and n != nullid:
1920 1920 p = self.changelog.parents(n)[0]
1921 1921 if i == f:
1922 1922 l.append(n)
1923 1923 f = f * 2
1924 1924 n = p
1925 1925 i += 1
1926 1926
1927 1927 r.append(l)
1928 1928
1929 1929 return r
1930 1930
1931 1931 def checkpush(self, pushop):
1932 1932 """Extensions can override this function if additional checks have
1933 1933 to be performed before pushing, or call it if they override push
1934 1934 command.
1935 1935 """
1936 1936 pass
1937 1937
1938 1938 @unfilteredpropertycache
1939 1939 def prepushoutgoinghooks(self):
1940 1940 """Return util.hooks consists of a pushop with repo, remote, outgoing
1941 1941 methods, which are called before pushing changesets.
1942 1942 """
1943 1943 return util.hooks()
1944 1944
1945 1945 def pushkey(self, namespace, key, old, new):
1946 1946 try:
1947 1947 tr = self.currenttransaction()
1948 1948 hookargs = {}
1949 1949 if tr is not None:
1950 1950 hookargs.update(tr.hookargs)
1951 1951 hookargs['namespace'] = namespace
1952 1952 hookargs['key'] = key
1953 1953 hookargs['old'] = old
1954 1954 hookargs['new'] = new
1955 1955 self.hook('prepushkey', throw=True, **hookargs)
1956 1956 except error.HookAbort as exc:
1957 1957 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1958 1958 if exc.hint:
1959 1959 self.ui.write_err(_("(%s)\n") % exc.hint)
1960 1960 return False
1961 1961 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1962 1962 ret = pushkey.push(self, namespace, key, old, new)
1963 1963 def runhook():
1964 1964 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1965 1965 ret=ret)
1966 1966 self._afterlock(runhook)
1967 1967 return ret
1968 1968
1969 1969 def listkeys(self, namespace):
1970 1970 self.hook('prelistkeys', throw=True, namespace=namespace)
1971 1971 self.ui.debug('listing keys for "%s"\n' % namespace)
1972 1972 values = pushkey.list(self, namespace)
1973 1973 self.hook('listkeys', namespace=namespace, values=values)
1974 1974 return values
1975 1975
1976 1976 def debugwireargs(self, one, two, three=None, four=None, five=None):
1977 1977 '''used to test argument passing over the wire'''
1978 1978 return "%s %s %s %s %s" % (one, two, three, four, five)
1979 1979
1980 1980 def savecommitmessage(self, text):
1981 1981 fp = self.vfs('last-message.txt', 'wb')
1982 1982 try:
1983 1983 fp.write(text)
1984 1984 finally:
1985 1985 fp.close()
1986 1986 return self.pathto(fp.name[len(self.root) + 1:])
1987 1987
1988 1988 # used to avoid circular references so destructors work
1989 1989 def aftertrans(files):
1990 1990 renamefiles = [tuple(t) for t in files]
1991 1991 def a():
1992 1992 for vfs, src, dest in renamefiles:
1993 1993 # if src and dest refer to a same file, vfs.rename is a no-op,
1994 1994 # leaving both src and dest on disk. delete dest to make sure
1995 1995 # the rename couldn't be such a no-op.
1996 1996 vfs.tryunlink(dest)
1997 1997 try:
1998 1998 vfs.rename(src, dest)
1999 1999 except OSError: # journal file does not yet exist
2000 2000 pass
2001 2001 return a
2002 2002
2003 2003 def undoname(fn):
2004 2004 base, name = os.path.split(fn)
2005 2005 assert name.startswith('journal')
2006 2006 return os.path.join(base, name.replace('journal', 'undo', 1))
2007 2007
2008 2008 def instance(ui, path, create):
2009 2009 return localrepository(ui, util.urllocalpath(path), create)
2010 2010
2011 2011 def islocal(path):
2012 2012 return True
2013 2013
2014 2014 def newreporequirements(repo):
2015 2015 """Determine the set of requirements for a new local repository.
2016 2016
2017 2017 Extensions can wrap this function to specify custom requirements for
2018 2018 new repositories.
2019 2019 """
2020 2020 ui = repo.ui
2021 2021 requirements = {'revlogv1'}
2022 2022 if ui.configbool('format', 'usestore', True):
2023 2023 requirements.add('store')
2024 2024 if ui.configbool('format', 'usefncache', True):
2025 2025 requirements.add('fncache')
2026 2026 if ui.configbool('format', 'dotencode', True):
2027 2027 requirements.add('dotencode')
2028 2028
2029 2029 compengine = ui.config('experimental', 'format.compression', 'zlib')
2030 2030 if compengine not in util.compengines:
2031 2031 raise error.Abort(_('compression engine %s defined by '
2032 2032 'experimental.format.compression not available') %
2033 2033 compengine,
2034 2034 hint=_('run "hg debuginstall" to list available '
2035 2035 'compression engines'))
2036 2036
2037 2037 # zlib is the historical default and doesn't need an explicit requirement.
2038 2038 if compengine != 'zlib':
2039 2039 requirements.add('exp-compression-%s' % compengine)
2040 2040
2041 2041 if scmutil.gdinitconfig(ui):
2042 2042 requirements.add('generaldelta')
2043 2043 if ui.configbool('experimental', 'treemanifest', False):
2044 2044 requirements.add('treemanifest')
2045 2045 if ui.configbool('experimental', 'manifestv2', False):
2046 2046 requirements.add('manifestv2')
2047 2047
2048 2048 return requirements
@@ -1,797 +1,789 b''
1 1 # match.py - filename matching
2 2 #
3 3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy
11 11 import os
12 12 import re
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 error,
17 17 pathutil,
18 18 util,
19 19 )
20 20
21 21 propertycache = util.propertycache
22 22
23 23 def _rematcher(regex):
24 24 '''compile the regexp with the best available regexp engine and return a
25 25 matcher function'''
26 26 m = util.re.compile(regex)
27 27 try:
28 28 # slightly faster, provided by facebook's re2 bindings
29 29 return m.test_match
30 30 except AttributeError:
31 31 return m.match
32 32
33 33 def _expandsets(kindpats, ctx, listsubrepos):
34 34 '''Returns the kindpats list with the 'set' patterns expanded.'''
35 35 fset = set()
36 36 other = []
37 37
38 38 for kind, pat, source in kindpats:
39 39 if kind == 'set':
40 40 if not ctx:
41 41 raise error.Abort(_("fileset expression with no context"))
42 42 s = ctx.getfileset(pat)
43 43 fset.update(s)
44 44
45 45 if listsubrepos:
46 46 for subpath in ctx.substate:
47 47 s = ctx.sub(subpath).getfileset(pat)
48 48 fset.update(subpath + '/' + f for f in s)
49 49
50 50 continue
51 51 other.append((kind, pat, source))
52 52 return fset, other
53 53
54 54 def _expandsubinclude(kindpats, root):
55 55 '''Returns the list of subinclude matcher args and the kindpats without the
56 56 subincludes in it.'''
57 57 relmatchers = []
58 58 other = []
59 59
60 60 for kind, pat, source in kindpats:
61 61 if kind == 'subinclude':
62 62 sourceroot = pathutil.dirname(util.normpath(source))
63 63 pat = util.pconvert(pat)
64 64 path = pathutil.join(sourceroot, pat)
65 65
66 66 newroot = pathutil.dirname(path)
67 67 matcherargs = (newroot, '', [], ['include:%s' % path])
68 68
69 69 prefix = pathutil.canonpath(root, root, newroot)
70 70 if prefix:
71 71 prefix += '/'
72 72 relmatchers.append((prefix, matcherargs))
73 73 else:
74 74 other.append((kind, pat, source))
75 75
76 76 return relmatchers, other
77 77
78 78 def _kindpatsalwaysmatch(kindpats):
79 79 """"Checks whether the kindspats match everything, as e.g.
80 80 'relpath:.' does.
81 81 """
82 82 for kind, pat, source in kindpats:
83 83 if pat != '' or kind not in ['relpath', 'glob']:
84 84 return False
85 85 return True
86 86
87 87 class match(object):
88 88 def __init__(self, root, cwd, patterns, include=None, exclude=None,
89 89 default='glob', exact=False, auditor=None, ctx=None,
90 90 listsubrepos=False, warn=None, badfn=None):
91 91 """build an object to match a set of file patterns
92 92
93 93 arguments:
94 94 root - the canonical root of the tree you're matching against
95 95 cwd - the current working directory, if relevant
96 96 patterns - patterns to find
97 97 include - patterns to include (unless they are excluded)
98 98 exclude - patterns to exclude (even if they are included)
99 99 default - if a pattern in patterns has no explicit type, assume this one
100 100 exact - patterns are actually filenames (include/exclude still apply)
101 101 warn - optional function used for printing warnings
102 102 badfn - optional bad() callback for this matcher instead of the default
103 103
104 104 a pattern is one of:
105 105 'glob:<glob>' - a glob relative to cwd
106 106 're:<regexp>' - a regular expression
107 107 'path:<path>' - a path relative to repository root, which is matched
108 108 recursively
109 109 'rootfilesin:<path>' - a path relative to repository root, which is
110 110 matched non-recursively (will not match subdirectories)
111 111 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
112 112 'relpath:<path>' - a path relative to cwd
113 113 'relre:<regexp>' - a regexp that needn't match the start of a name
114 114 'set:<fileset>' - a fileset expression
115 115 'include:<path>' - a file of patterns to read and include
116 116 'subinclude:<path>' - a file of patterns to match against files under
117 117 the same directory
118 118 '<something>' - a pattern of the specified default type
119 119 """
120 120 if include is None:
121 121 include = []
122 122 if exclude is None:
123 123 exclude = []
124 124
125 125 self._root = root
126 126 self._cwd = cwd
127 127 self._files = [] # exact files and roots of patterns
128 128 self._anypats = bool(include or exclude)
129 129 self._always = False
130 130 self._pathrestricted = bool(include or exclude or patterns)
131 131 self._warn = warn
132 132
133 133 # roots are directories which are recursively included/excluded.
134 134 self._includeroots = set()
135 135 self._excluderoots = set()
136 136 # dirs are directories which are non-recursively included.
137 137 self._includedirs = set()
138 138
139 139 if badfn is not None:
140 140 self.bad = badfn
141 141
142 142 matchfns = []
143 143 if include:
144 144 kindpats = self._normalize(include, 'glob', root, cwd, auditor)
145 145 self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)',
146 146 listsubrepos, root)
147 147 roots, dirs = _rootsanddirs(kindpats)
148 148 self._includeroots.update(roots)
149 149 self._includedirs.update(dirs)
150 150 matchfns.append(im)
151 151 if exclude:
152 152 kindpats = self._normalize(exclude, 'glob', root, cwd, auditor)
153 153 self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)',
154 154 listsubrepos, root)
155 155 if not _anypats(kindpats):
156 156 # Only consider recursive excludes as such - if a non-recursive
157 157 # exclude is used, we must still recurse into the excluded
158 158 # directory, at least to find subdirectories. In such a case,
159 159 # the regex still won't match the non-recursively-excluded
160 160 # files.
161 161 self._excluderoots.update(_roots(kindpats))
162 162 matchfns.append(lambda f: not em(f))
163 163 if exact:
164 164 if isinstance(patterns, list):
165 165 self._files = patterns
166 166 else:
167 167 self._files = list(patterns)
168 168 matchfns.append(self.exact)
169 169 elif patterns:
170 170 kindpats = self._normalize(patterns, default, root, cwd, auditor)
171 171 if not _kindpatsalwaysmatch(kindpats):
172 172 self._files = _explicitfiles(kindpats)
173 173 self._anypats = self._anypats or _anypats(kindpats)
174 174 self.patternspat, pm = _buildmatch(ctx, kindpats, '$',
175 175 listsubrepos, root)
176 176 matchfns.append(pm)
177 177
178 178 if not matchfns:
179 179 m = util.always
180 180 self._always = True
181 181 elif len(matchfns) == 1:
182 182 m = matchfns[0]
183 183 else:
184 184 def m(f):
185 185 for matchfn in matchfns:
186 186 if not matchfn(f):
187 187 return False
188 188 return True
189 189
190 190 self.matchfn = m
191 191 self._fileroots = set(self._files)
192 192
193 193 def __call__(self, fn):
194 194 return self.matchfn(fn)
195 195 def __iter__(self):
196 196 for f in self._files:
197 197 yield f
198 198
199 199 # Callbacks related to how the matcher is used by dirstate.walk.
200 200 # Subscribers to these events must monkeypatch the matcher object.
201 201 def bad(self, f, msg):
202 202 '''Callback from dirstate.walk for each explicit file that can't be
203 203 found/accessed, with an error message.'''
204 204 pass
205 205
206 206 # If an explicitdir is set, it will be called when an explicitly listed
207 207 # directory is visited.
208 208 explicitdir = None
209 209
210 210 # If an traversedir is set, it will be called when a directory discovered
211 211 # by recursive traversal is visited.
212 212 traversedir = None
213 213
214 214 def abs(self, f):
215 215 '''Convert a repo path back to path that is relative to the root of the
216 216 matcher.'''
217 217 return f
218 218
219 219 def rel(self, f):
220 220 '''Convert repo path back to path that is relative to cwd of matcher.'''
221 221 return util.pathto(self._root, self._cwd, f)
222 222
223 223 def uipath(self, f):
224 224 '''Convert repo path to a display path. If patterns or -I/-X were used
225 225 to create this matcher, the display path will be relative to cwd.
226 226 Otherwise it is relative to the root of the repo.'''
227 227 return (self._pathrestricted and self.rel(f)) or self.abs(f)
228 228
229 229 def files(self):
230 230 '''Explicitly listed files or patterns or roots:
231 231 if no patterns or .always(): empty list,
232 232 if exact: list exact files,
233 233 if not .anypats(): list all files and dirs,
234 234 else: optimal roots'''
235 235 return self._files
236 236
237 237 @propertycache
238 238 def _dirs(self):
239 239 return set(util.dirs(self._fileroots)) | {'.'}
240 240
241 241 def visitdir(self, dir):
242 242 '''Decides whether a directory should be visited based on whether it
243 243 has potential matches in it or one of its subdirectories. This is
244 244 based on the match's primary, included, and excluded patterns.
245 245
246 246 Returns the string 'all' if the given directory and all subdirectories
247 247 should be visited. Otherwise returns True or False indicating whether
248 248 the given directory should be visited.
249 249
250 250 This function's behavior is undefined if it has returned False for
251 251 one of the dir's parent directories.
252 252 '''
253 253 if self.prefix() and dir in self._fileroots:
254 254 return 'all'
255 255 if dir in self._excluderoots:
256 256 return False
257 257 if ((self._includeroots or self._includedirs) and
258 258 '.' not in self._includeroots and
259 259 dir not in self._includeroots and
260 260 dir not in self._includedirs and
261 261 not any(parent in self._includeroots
262 262 for parent in util.finddirs(dir))):
263 263 return False
264 264 return (not self._fileroots or
265 265 '.' in self._fileroots or
266 266 dir in self._fileroots or
267 267 dir in self._dirs or
268 268 any(parentdir in self._fileroots
269 269 for parentdir in util.finddirs(dir)))
270 270
271 271 def exact(self, f):
272 272 '''Returns True if f is in .files().'''
273 273 return f in self._fileroots
274 274
275 275 def anypats(self):
276 276 '''Matcher uses patterns or include/exclude.'''
277 277 return self._anypats
278 278
279 279 def always(self):
280 280 '''Matcher will match everything and .files() will be empty
281 281 - optimization might be possible and necessary.'''
282 282 return self._always
283 283
284 def ispartial(self):
285 '''True if the matcher won't always match.
286
287 Although it's just the inverse of _always in this implementation,
288 an extension such as narrowhg might make it return something
289 slightly different.'''
290 return not self._always
291
292 284 def isexact(self):
293 285 return self.matchfn == self.exact
294 286
295 287 def prefix(self):
296 288 return not self.always() and not self.isexact() and not self.anypats()
297 289
298 290 def _normalize(self, patterns, default, root, cwd, auditor):
299 291 '''Convert 'kind:pat' from the patterns list to tuples with kind and
300 292 normalized and rooted patterns and with listfiles expanded.'''
301 293 kindpats = []
302 294 for kind, pat in [_patsplit(p, default) for p in patterns]:
303 295 if kind in ('glob', 'relpath'):
304 296 pat = pathutil.canonpath(root, cwd, pat, auditor)
305 297 elif kind in ('relglob', 'path', 'rootfilesin'):
306 298 pat = util.normpath(pat)
307 299 elif kind in ('listfile', 'listfile0'):
308 300 try:
309 301 files = util.readfile(pat)
310 302 if kind == 'listfile0':
311 303 files = files.split('\0')
312 304 else:
313 305 files = files.splitlines()
314 306 files = [f for f in files if f]
315 307 except EnvironmentError:
316 308 raise error.Abort(_("unable to read file list (%s)") % pat)
317 309 for k, p, source in self._normalize(files, default, root, cwd,
318 310 auditor):
319 311 kindpats.append((k, p, pat))
320 312 continue
321 313 elif kind == 'include':
322 314 try:
323 315 fullpath = os.path.join(root, util.localpath(pat))
324 316 includepats = readpatternfile(fullpath, self._warn)
325 317 for k, p, source in self._normalize(includepats, default,
326 318 root, cwd, auditor):
327 319 kindpats.append((k, p, source or pat))
328 320 except error.Abort as inst:
329 321 raise error.Abort('%s: %s' % (pat, inst[0]))
330 322 except IOError as inst:
331 323 if self._warn:
332 324 self._warn(_("skipping unreadable pattern file "
333 325 "'%s': %s\n") % (pat, inst.strerror))
334 326 continue
335 327 # else: re or relre - which cannot be normalized
336 328 kindpats.append((kind, pat, ''))
337 329 return kindpats
338 330
339 331 def exact(root, cwd, files, badfn=None):
340 332 return match(root, cwd, files, exact=True, badfn=badfn)
341 333
342 334 def always(root, cwd):
343 335 return match(root, cwd, [])
344 336
345 337 def badmatch(match, badfn):
346 338 """Make a copy of the given matcher, replacing its bad method with the given
347 339 one.
348 340 """
349 341 m = copy.copy(match)
350 342 m.bad = badfn
351 343 return m
352 344
353 345 class subdirmatcher(match):
354 346 """Adapt a matcher to work on a subdirectory only.
355 347
356 348 The paths are remapped to remove/insert the path as needed:
357 349
358 350 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
359 351 >>> m2 = subdirmatcher('sub', m1)
360 352 >>> bool(m2('a.txt'))
361 353 False
362 354 >>> bool(m2('b.txt'))
363 355 True
364 356 >>> bool(m2.matchfn('a.txt'))
365 357 False
366 358 >>> bool(m2.matchfn('b.txt'))
367 359 True
368 360 >>> m2.files()
369 361 ['b.txt']
370 362 >>> m2.exact('b.txt')
371 363 True
372 364 >>> util.pconvert(m2.rel('b.txt'))
373 365 'sub/b.txt'
374 366 >>> def bad(f, msg):
375 367 ... print "%s: %s" % (f, msg)
376 368 >>> m1.bad = bad
377 369 >>> m2.bad('x.txt', 'No such file')
378 370 sub/x.txt: No such file
379 371 >>> m2.abs('c.txt')
380 372 'sub/c.txt'
381 373 """
382 374
383 375 def __init__(self, path, matcher):
384 376 self._root = matcher._root
385 377 self._cwd = matcher._cwd
386 378 self._path = path
387 379 self._matcher = matcher
388 380 self._always = matcher._always
389 381 self._pathrestricted = matcher._pathrestricted
390 382
391 383 self._files = [f[len(path) + 1:] for f in matcher._files
392 384 if f.startswith(path + "/")]
393 385
394 386 # If the parent repo had a path to this subrepo and no patterns are
395 387 # specified, this submatcher always matches.
396 388 if not self._always and not matcher._anypats:
397 389 self._always = any(f == path for f in matcher._files)
398 390
399 391 self._anypats = matcher._anypats
400 392 # Some information is lost in the superclass's constructor, so we
401 393 # can not accurately create the matching function for the subdirectory
402 394 # from the inputs. Instead, we override matchfn() and visitdir() to
403 395 # call the original matcher with the subdirectory path prepended.
404 396 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
405 397 def visitdir(dir):
406 398 if dir == '.':
407 399 return matcher.visitdir(self._path)
408 400 return matcher.visitdir(self._path + "/" + dir)
409 401 self.visitdir = visitdir
410 402 self._fileroots = set(self._files)
411 403
412 404 def abs(self, f):
413 405 return self._matcher.abs(self._path + "/" + f)
414 406
415 407 def bad(self, f, msg):
416 408 self._matcher.bad(self._path + "/" + f, msg)
417 409
418 410 def rel(self, f):
419 411 return self._matcher.rel(self._path + "/" + f)
420 412
421 413 class icasefsmatcher(match):
422 414 """A matcher for wdir on case insensitive filesystems, which normalizes the
423 415 given patterns to the case in the filesystem.
424 416 """
425 417
426 418 def __init__(self, root, cwd, patterns, include, exclude, default, auditor,
427 419 ctx, listsubrepos=False, badfn=None):
428 420 init = super(icasefsmatcher, self).__init__
429 421 self._dirstate = ctx.repo().dirstate
430 422 self._dsnormalize = self._dirstate.normalize
431 423
432 424 init(root, cwd, patterns, include, exclude, default, auditor=auditor,
433 425 ctx=ctx, listsubrepos=listsubrepos, badfn=badfn)
434 426
435 427 # m.exact(file) must be based off of the actual user input, otherwise
436 428 # inexact case matches are treated as exact, and not noted without -v.
437 429 if self._files:
438 430 roots, dirs = _rootsanddirs(self._kp)
439 431 self._fileroots = set(roots)
440 432 self._fileroots.update(dirs)
441 433
442 434 def _normalize(self, patterns, default, root, cwd, auditor):
443 435 self._kp = super(icasefsmatcher, self)._normalize(patterns, default,
444 436 root, cwd, auditor)
445 437 kindpats = []
446 438 for kind, pats, source in self._kp:
447 439 if kind not in ('re', 'relre'): # regex can't be normalized
448 440 p = pats
449 441 pats = self._dsnormalize(pats)
450 442
451 443 # Preserve the original to handle a case only rename.
452 444 if p != pats and p in self._dirstate:
453 445 kindpats.append((kind, p, source))
454 446
455 447 kindpats.append((kind, pats, source))
456 448 return kindpats
457 449
458 450 def patkind(pattern, default=None):
459 451 '''If pattern is 'kind:pat' with a known kind, return kind.'''
460 452 return _patsplit(pattern, default)[0]
461 453
462 454 def _patsplit(pattern, default):
463 455 """Split a string into the optional pattern kind prefix and the actual
464 456 pattern."""
465 457 if ':' in pattern:
466 458 kind, pat = pattern.split(':', 1)
467 459 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
468 460 'listfile', 'listfile0', 'set', 'include', 'subinclude',
469 461 'rootfilesin'):
470 462 return kind, pat
471 463 return default, pattern
472 464
473 465 def _globre(pat):
474 466 r'''Convert an extended glob string to a regexp string.
475 467
476 468 >>> print _globre(r'?')
477 469 .
478 470 >>> print _globre(r'*')
479 471 [^/]*
480 472 >>> print _globre(r'**')
481 473 .*
482 474 >>> print _globre(r'**/a')
483 475 (?:.*/)?a
484 476 >>> print _globre(r'a/**/b')
485 477 a\/(?:.*/)?b
486 478 >>> print _globre(r'[a*?!^][^b][!c]')
487 479 [a*?!^][\^b][^c]
488 480 >>> print _globre(r'{a,b}')
489 481 (?:a|b)
490 482 >>> print _globre(r'.\*\?')
491 483 \.\*\?
492 484 '''
493 485 i, n = 0, len(pat)
494 486 res = ''
495 487 group = 0
496 488 escape = util.re.escape
497 489 def peek():
498 490 return i < n and pat[i:i + 1]
499 491 while i < n:
500 492 c = pat[i:i + 1]
501 493 i += 1
502 494 if c not in '*?[{},\\':
503 495 res += escape(c)
504 496 elif c == '*':
505 497 if peek() == '*':
506 498 i += 1
507 499 if peek() == '/':
508 500 i += 1
509 501 res += '(?:.*/)?'
510 502 else:
511 503 res += '.*'
512 504 else:
513 505 res += '[^/]*'
514 506 elif c == '?':
515 507 res += '.'
516 508 elif c == '[':
517 509 j = i
518 510 if j < n and pat[j:j + 1] in '!]':
519 511 j += 1
520 512 while j < n and pat[j:j + 1] != ']':
521 513 j += 1
522 514 if j >= n:
523 515 res += '\\['
524 516 else:
525 517 stuff = pat[i:j].replace('\\','\\\\')
526 518 i = j + 1
527 519 if stuff[0:1] == '!':
528 520 stuff = '^' + stuff[1:]
529 521 elif stuff[0:1] == '^':
530 522 stuff = '\\' + stuff
531 523 res = '%s[%s]' % (res, stuff)
532 524 elif c == '{':
533 525 group += 1
534 526 res += '(?:'
535 527 elif c == '}' and group:
536 528 res += ')'
537 529 group -= 1
538 530 elif c == ',' and group:
539 531 res += '|'
540 532 elif c == '\\':
541 533 p = peek()
542 534 if p:
543 535 i += 1
544 536 res += escape(p)
545 537 else:
546 538 res += escape(c)
547 539 else:
548 540 res += escape(c)
549 541 return res
550 542
551 543 def _regex(kind, pat, globsuffix):
552 544 '''Convert a (normalized) pattern of any kind into a regular expression.
553 545 globsuffix is appended to the regexp of globs.'''
554 546 if not pat:
555 547 return ''
556 548 if kind == 're':
557 549 return pat
558 550 if kind == 'path':
559 551 if pat == '.':
560 552 return ''
561 553 return '^' + util.re.escape(pat) + '(?:/|$)'
562 554 if kind == 'rootfilesin':
563 555 if pat == '.':
564 556 escaped = ''
565 557 else:
566 558 # Pattern is a directory name.
567 559 escaped = util.re.escape(pat) + '/'
568 560 # Anything after the pattern must be a non-directory.
569 561 return '^' + escaped + '[^/]+$'
570 562 if kind == 'relglob':
571 563 return '(?:|.*/)' + _globre(pat) + globsuffix
572 564 if kind == 'relpath':
573 565 return util.re.escape(pat) + '(?:/|$)'
574 566 if kind == 'relre':
575 567 if pat.startswith('^'):
576 568 return pat
577 569 return '.*' + pat
578 570 return _globre(pat) + globsuffix
579 571
580 572 def _buildmatch(ctx, kindpats, globsuffix, listsubrepos, root):
581 573 '''Return regexp string and a matcher function for kindpats.
582 574 globsuffix is appended to the regexp of globs.'''
583 575 matchfuncs = []
584 576
585 577 subincludes, kindpats = _expandsubinclude(kindpats, root)
586 578 if subincludes:
587 579 submatchers = {}
588 580 def matchsubinclude(f):
589 581 for prefix, matcherargs in subincludes:
590 582 if f.startswith(prefix):
591 583 mf = submatchers.get(prefix)
592 584 if mf is None:
593 585 mf = match(*matcherargs)
594 586 submatchers[prefix] = mf
595 587
596 588 if mf(f[len(prefix):]):
597 589 return True
598 590 return False
599 591 matchfuncs.append(matchsubinclude)
600 592
601 593 fset, kindpats = _expandsets(kindpats, ctx, listsubrepos)
602 594 if fset:
603 595 matchfuncs.append(fset.__contains__)
604 596
605 597 regex = ''
606 598 if kindpats:
607 599 regex, mf = _buildregexmatch(kindpats, globsuffix)
608 600 matchfuncs.append(mf)
609 601
610 602 if len(matchfuncs) == 1:
611 603 return regex, matchfuncs[0]
612 604 else:
613 605 return regex, lambda f: any(mf(f) for mf in matchfuncs)
614 606
615 607 def _buildregexmatch(kindpats, globsuffix):
616 608 """Build a match function from a list of kinds and kindpats,
617 609 return regexp string and a matcher function."""
618 610 try:
619 611 regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
620 612 for (k, p, s) in kindpats])
621 613 if len(regex) > 20000:
622 614 raise OverflowError
623 615 return regex, _rematcher(regex)
624 616 except OverflowError:
625 617 # We're using a Python with a tiny regex engine and we
626 618 # made it explode, so we'll divide the pattern list in two
627 619 # until it works
628 620 l = len(kindpats)
629 621 if l < 2:
630 622 raise
631 623 regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
632 624 regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
633 625 return regex, lambda s: a(s) or b(s)
634 626 except re.error:
635 627 for k, p, s in kindpats:
636 628 try:
637 629 _rematcher('(?:%s)' % _regex(k, p, globsuffix))
638 630 except re.error:
639 631 if s:
640 632 raise error.Abort(_("%s: invalid pattern (%s): %s") %
641 633 (s, k, p))
642 634 else:
643 635 raise error.Abort(_("invalid pattern (%s): %s") % (k, p))
644 636 raise error.Abort(_("invalid pattern"))
645 637
646 638 def _patternrootsanddirs(kindpats):
647 639 '''Returns roots and directories corresponding to each pattern.
648 640
649 641 This calculates the roots and directories exactly matching the patterns and
650 642 returns a tuple of (roots, dirs) for each. It does not return other
651 643 directories which may also need to be considered, like the parent
652 644 directories.
653 645 '''
654 646 r = []
655 647 d = []
656 648 for kind, pat, source in kindpats:
657 649 if kind == 'glob': # find the non-glob prefix
658 650 root = []
659 651 for p in pat.split('/'):
660 652 if '[' in p or '{' in p or '*' in p or '?' in p:
661 653 break
662 654 root.append(p)
663 655 r.append('/'.join(root) or '.')
664 656 elif kind in ('relpath', 'path'):
665 657 r.append(pat or '.')
666 658 elif kind in ('rootfilesin',):
667 659 d.append(pat or '.')
668 660 else: # relglob, re, relre
669 661 r.append('.')
670 662 return r, d
671 663
672 664 def _roots(kindpats):
673 665 '''Returns root directories to match recursively from the given patterns.'''
674 666 roots, dirs = _patternrootsanddirs(kindpats)
675 667 return roots
676 668
677 669 def _rootsanddirs(kindpats):
678 670 '''Returns roots and exact directories from patterns.
679 671
680 672 roots are directories to match recursively, whereas exact directories should
681 673 be matched non-recursively. The returned (roots, dirs) tuple will also
682 674 include directories that need to be implicitly considered as either, such as
683 675 parent directories.
684 676
685 677 >>> _rootsanddirs(\
686 678 [('glob', 'g/h/*', ''), ('glob', 'g/h', ''), ('glob', 'g*', '')])
687 679 (['g/h', 'g/h', '.'], ['g', '.'])
688 680 >>> _rootsanddirs(\
689 681 [('rootfilesin', 'g/h', ''), ('rootfilesin', '', '')])
690 682 ([], ['g/h', '.', 'g', '.'])
691 683 >>> _rootsanddirs(\
692 684 [('relpath', 'r', ''), ('path', 'p/p', ''), ('path', '', '')])
693 685 (['r', 'p/p', '.'], ['p', '.'])
694 686 >>> _rootsanddirs(\
695 687 [('relglob', 'rg*', ''), ('re', 're/', ''), ('relre', 'rr', '')])
696 688 (['.', '.', '.'], ['.'])
697 689 '''
698 690 r, d = _patternrootsanddirs(kindpats)
699 691
700 692 # Append the parents as non-recursive/exact directories, since they must be
701 693 # scanned to get to either the roots or the other exact directories.
702 694 d.extend(util.dirs(d))
703 695 d.extend(util.dirs(r))
704 696 # util.dirs() does not include the root directory, so add it manually
705 697 d.append('.')
706 698
707 699 return r, d
708 700
709 701 def _explicitfiles(kindpats):
710 702 '''Returns the potential explicit filenames from the patterns.
711 703
712 704 >>> _explicitfiles([('path', 'foo/bar', '')])
713 705 ['foo/bar']
714 706 >>> _explicitfiles([('rootfilesin', 'foo/bar', '')])
715 707 []
716 708 '''
717 709 # Keep only the pattern kinds where one can specify filenames (vs only
718 710 # directory names).
719 711 filable = [kp for kp in kindpats if kp[0] not in ('rootfilesin',)]
720 712 return _roots(filable)
721 713
722 714 def _anypats(kindpats):
723 715 for kind, pat, source in kindpats:
724 716 if kind in ('glob', 're', 'relglob', 'relre', 'set', 'rootfilesin'):
725 717 return True
726 718
727 719 _commentre = None
728 720
729 721 def readpatternfile(filepath, warn, sourceinfo=False):
730 722 '''parse a pattern file, returning a list of
731 723 patterns. These patterns should be given to compile()
732 724 to be validated and converted into a match function.
733 725
734 726 trailing white space is dropped.
735 727 the escape character is backslash.
736 728 comments start with #.
737 729 empty lines are skipped.
738 730
739 731 lines can be of the following formats:
740 732
741 733 syntax: regexp # defaults following lines to non-rooted regexps
742 734 syntax: glob # defaults following lines to non-rooted globs
743 735 re:pattern # non-rooted regular expression
744 736 glob:pattern # non-rooted glob
745 737 pattern # pattern of the current default type
746 738
747 739 if sourceinfo is set, returns a list of tuples:
748 740 (pattern, lineno, originalline). This is useful to debug ignore patterns.
749 741 '''
750 742
751 743 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:',
752 744 'include': 'include', 'subinclude': 'subinclude'}
753 745 syntax = 'relre:'
754 746 patterns = []
755 747
756 748 fp = open(filepath, 'rb')
757 749 for lineno, line in enumerate(util.iterfile(fp), start=1):
758 750 if "#" in line:
759 751 global _commentre
760 752 if not _commentre:
761 753 _commentre = util.re.compile(br'((?:^|[^\\])(?:\\\\)*)#.*')
762 754 # remove comments prefixed by an even number of escapes
763 755 m = _commentre.search(line)
764 756 if m:
765 757 line = line[:m.end(1)]
766 758 # fixup properly escaped comments that survived the above
767 759 line = line.replace("\\#", "#")
768 760 line = line.rstrip()
769 761 if not line:
770 762 continue
771 763
772 764 if line.startswith('syntax:'):
773 765 s = line[7:].strip()
774 766 try:
775 767 syntax = syntaxes[s]
776 768 except KeyError:
777 769 if warn:
778 770 warn(_("%s: ignoring invalid syntax '%s'\n") %
779 771 (filepath, s))
780 772 continue
781 773
782 774 linesyntax = syntax
783 775 for s, rels in syntaxes.iteritems():
784 776 if line.startswith(rels):
785 777 linesyntax = rels
786 778 line = line[len(rels):]
787 779 break
788 780 elif line.startswith(s+':'):
789 781 linesyntax = rels
790 782 line = line[len(s) + 1:]
791 783 break
792 784 if sourceinfo:
793 785 patterns.append((linesyntax + line, lineno, line))
794 786 else:
795 787 patterns.append(linesyntax + line)
796 788 fp.close()
797 789 return patterns
General Comments 0
You need to be logged in to leave comments. Login now