##// END OF EJS Templates
localrepo: deprecate 'repo.opener' (API)...
Pierre-Yves David -
r31148:3eaff87a default
parent child Browse files
Show More
@@ -1,2061 +1,2065 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 color,
32 32 context,
33 33 dirstate,
34 34 dirstateguard,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 repoview,
53 53 revset,
54 54 revsetlang,
55 55 scmutil,
56 56 store,
57 57 subrepo,
58 58 tags as tagsmod,
59 59 transaction,
60 60 txnutil,
61 61 util,
62 62 )
63 63
64 64 release = lockmod.release
65 65 urlerr = util.urlerr
66 66 urlreq = util.urlreq
67 67
68 68 class repofilecache(scmutil.filecache):
69 69 """All filecache usage on repo are done for logic that should be unfiltered
70 70 """
71 71
72 72 def __get__(self, repo, type=None):
73 73 if repo is None:
74 74 return self
75 75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
76 76 def __set__(self, repo, value):
77 77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
78 78 def __delete__(self, repo):
79 79 return super(repofilecache, self).__delete__(repo.unfiltered())
80 80
81 81 class storecache(repofilecache):
82 82 """filecache for files in the store"""
83 83 def join(self, obj, fname):
84 84 return obj.sjoin(fname)
85 85
86 86 class unfilteredpropertycache(util.propertycache):
87 87 """propertycache that apply to unfiltered repo only"""
88 88
89 89 def __get__(self, repo, type=None):
90 90 unfi = repo.unfiltered()
91 91 if unfi is repo:
92 92 return super(unfilteredpropertycache, self).__get__(unfi)
93 93 return getattr(unfi, self.name)
94 94
95 95 class filteredpropertycache(util.propertycache):
96 96 """propertycache that must take filtering in account"""
97 97
98 98 def cachevalue(self, obj, value):
99 99 object.__setattr__(obj, self.name, value)
100 100
101 101
102 102 def hasunfilteredcache(repo, name):
103 103 """check if a repo has an unfilteredpropertycache value for <name>"""
104 104 return name in vars(repo.unfiltered())
105 105
106 106 def unfilteredmethod(orig):
107 107 """decorate method that always need to be run on unfiltered version"""
108 108 def wrapper(repo, *args, **kwargs):
109 109 return orig(repo.unfiltered(), *args, **kwargs)
110 110 return wrapper
111 111
112 112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
113 113 'unbundle'))
114 114 legacycaps = moderncaps.union(set(['changegroupsubset']))
115 115
116 116 class localpeer(peer.peerrepository):
117 117 '''peer for a local repo; reflects only the most recent API'''
118 118
119 119 def __init__(self, repo, caps=moderncaps):
120 120 peer.peerrepository.__init__(self)
121 121 self._repo = repo.filtered('served')
122 122 self.ui = repo.ui
123 123 self._caps = repo._restrictcapabilities(caps)
124 124 self.requirements = repo.requirements
125 125 self.supportedformats = repo.supportedformats
126 126
127 127 def close(self):
128 128 self._repo.close()
129 129
130 130 def _capabilities(self):
131 131 return self._caps
132 132
133 133 def local(self):
134 134 return self._repo
135 135
136 136 def canpush(self):
137 137 return True
138 138
139 139 def url(self):
140 140 return self._repo.url()
141 141
142 142 def lookup(self, key):
143 143 return self._repo.lookup(key)
144 144
145 145 def branchmap(self):
146 146 return self._repo.branchmap()
147 147
148 148 def heads(self):
149 149 return self._repo.heads()
150 150
151 151 def known(self, nodes):
152 152 return self._repo.known(nodes)
153 153
154 154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
155 155 **kwargs):
156 156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
157 157 common=common, bundlecaps=bundlecaps,
158 158 **kwargs)
159 159 cb = util.chunkbuffer(chunks)
160 160
161 161 if bundlecaps is not None and 'HG20' in bundlecaps:
162 162 # When requesting a bundle2, getbundle returns a stream to make the
163 163 # wire level function happier. We need to build a proper object
164 164 # from it in local peer.
165 165 return bundle2.getunbundler(self.ui, cb)
166 166 else:
167 167 return changegroup.getunbundler('01', cb, None)
168 168
169 169 # TODO We might want to move the next two calls into legacypeer and add
170 170 # unbundle instead.
171 171
172 172 def unbundle(self, cg, heads, url):
173 173 """apply a bundle on a repo
174 174
175 175 This function handles the repo locking itself."""
176 176 try:
177 177 try:
178 178 cg = exchange.readbundle(self.ui, cg, None)
179 179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
180 180 if util.safehasattr(ret, 'getchunks'):
181 181 # This is a bundle20 object, turn it into an unbundler.
182 182 # This little dance should be dropped eventually when the
183 183 # API is finally improved.
184 184 stream = util.chunkbuffer(ret.getchunks())
185 185 ret = bundle2.getunbundler(self.ui, stream)
186 186 return ret
187 187 except Exception as exc:
188 188 # If the exception contains output salvaged from a bundle2
189 189 # reply, we need to make sure it is printed before continuing
190 190 # to fail. So we build a bundle2 with such output and consume
191 191 # it directly.
192 192 #
193 193 # This is not very elegant but allows a "simple" solution for
194 194 # issue4594
195 195 output = getattr(exc, '_bundle2salvagedoutput', ())
196 196 if output:
197 197 bundler = bundle2.bundle20(self._repo.ui)
198 198 for out in output:
199 199 bundler.addpart(out)
200 200 stream = util.chunkbuffer(bundler.getchunks())
201 201 b = bundle2.getunbundler(self.ui, stream)
202 202 bundle2.processbundle(self._repo, b)
203 203 raise
204 204 except error.PushRaced as exc:
205 205 raise error.ResponseError(_('push failed:'), str(exc))
206 206
207 207 def lock(self):
208 208 return self._repo.lock()
209 209
210 210 def addchangegroup(self, cg, source, url):
211 211 return cg.apply(self._repo, source, url)
212 212
213 213 def pushkey(self, namespace, key, old, new):
214 214 return self._repo.pushkey(namespace, key, old, new)
215 215
216 216 def listkeys(self, namespace):
217 217 return self._repo.listkeys(namespace)
218 218
219 219 def debugwireargs(self, one, two, three=None, four=None, five=None):
220 220 '''used to test argument passing over the wire'''
221 221 return "%s %s %s %s %s" % (one, two, three, four, five)
222 222
223 223 class locallegacypeer(localpeer):
224 224 '''peer extension which implements legacy methods too; used for tests with
225 225 restricted capabilities'''
226 226
227 227 def __init__(self, repo):
228 228 localpeer.__init__(self, repo, caps=legacycaps)
229 229
230 230 def branches(self, nodes):
231 231 return self._repo.branches(nodes)
232 232
233 233 def between(self, pairs):
234 234 return self._repo.between(pairs)
235 235
236 236 def changegroup(self, basenodes, source):
237 237 return changegroup.changegroup(self._repo, basenodes, source)
238 238
239 239 def changegroupsubset(self, bases, heads, source):
240 240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
241 241
242 242 class localrepository(object):
243 243
244 244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
245 245 'manifestv2'))
246 246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
247 247 'relshared', 'dotencode'))
248 248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
249 249 filtername = None
250 250
251 251 # a list of (ui, featureset) functions.
252 252 # only functions defined in module of enabled extensions are invoked
253 253 featuresetupfuncs = set()
254 254
255 255 def __init__(self, baseui, path, create=False):
256 256 self.requirements = set()
257 257 # vfs to access the working copy
258 258 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
259 259 # vfs to access the content of the repository
260 260 self.vfs = None
261 261 # vfs to access the store part of the repository
262 262 self.svfs = None
263 263 self.root = self.wvfs.base
264 264 self.path = self.wvfs.join(".hg")
265 265 self.origroot = path
266 266 self.auditor = pathutil.pathauditor(self.root, self._checknested)
267 267 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
268 268 realfs=False)
269 269 self.vfs = scmutil.vfs(self.path)
270 self.opener = self.vfs
271 270 self.baseui = baseui
272 271 self.ui = baseui.copy()
273 272 self.ui.copy = baseui.copy # prevent copying repo configuration
274 273 # A list of callback to shape the phase if no data were found.
275 274 # Callback are in the form: func(repo, roots) --> processed root.
276 275 # This list it to be filled by extension during repo setup
277 276 self._phasedefaults = []
278 277 try:
279 278 self.ui.readconfig(self.join("hgrc"), self.root)
280 279 self._loadextensions()
281 280 except IOError:
282 281 pass
283 282
284 283 if self.featuresetupfuncs:
285 284 self.supported = set(self._basesupported) # use private copy
286 285 extmods = set(m.__name__ for n, m
287 286 in extensions.extensions(self.ui))
288 287 for setupfunc in self.featuresetupfuncs:
289 288 if setupfunc.__module__ in extmods:
290 289 setupfunc(self.ui, self.supported)
291 290 else:
292 291 self.supported = self._basesupported
293 292 color.setup(self.ui)
294 293
295 294 # Add compression engines.
296 295 for name in util.compengines:
297 296 engine = util.compengines[name]
298 297 if engine.revlogheader():
299 298 self.supported.add('exp-compression-%s' % name)
300 299
301 300 if not self.vfs.isdir():
302 301 if create:
303 302 self.requirements = newreporequirements(self)
304 303
305 304 if not self.wvfs.exists():
306 305 self.wvfs.makedirs()
307 306 self.vfs.makedir(notindexed=True)
308 307
309 308 if 'store' in self.requirements:
310 309 self.vfs.mkdir("store")
311 310
312 311 # create an invalid changelog
313 312 self.vfs.append(
314 313 "00changelog.i",
315 314 '\0\0\0\2' # represents revlogv2
316 315 ' dummy changelog to prevent using the old repo layout'
317 316 )
318 317 else:
319 318 raise error.RepoError(_("repository %s not found") % path)
320 319 elif create:
321 320 raise error.RepoError(_("repository %s already exists") % path)
322 321 else:
323 322 try:
324 323 self.requirements = scmutil.readrequires(
325 324 self.vfs, self.supported)
326 325 except IOError as inst:
327 326 if inst.errno != errno.ENOENT:
328 327 raise
329 328
330 329 self.sharedpath = self.path
331 330 try:
332 331 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
333 332 if 'relshared' in self.requirements:
334 333 sharedpath = self.vfs.join(sharedpath)
335 334 vfs = scmutil.vfs(sharedpath, realpath=True)
336 335
337 336 s = vfs.base
338 337 if not vfs.exists():
339 338 raise error.RepoError(
340 339 _('.hg/sharedpath points to nonexistent directory %s') % s)
341 340 self.sharedpath = s
342 341 except IOError as inst:
343 342 if inst.errno != errno.ENOENT:
344 343 raise
345 344
346 345 self.store = store.store(
347 346 self.requirements, self.sharedpath, scmutil.vfs)
348 347 self.spath = self.store.path
349 348 self.svfs = self.store.vfs
350 349 self.sjoin = self.store.join
351 350 self.vfs.createmode = self.store.createmode
352 351 self._applyopenerreqs()
353 352 if create:
354 353 self._writerequirements()
355 354
356 355 self._dirstatevalidatewarned = False
357 356
358 357 self._branchcaches = {}
359 358 self._revbranchcache = None
360 359 self.filterpats = {}
361 360 self._datafilters = {}
362 361 self._transref = self._lockref = self._wlockref = None
363 362
364 363 # A cache for various files under .hg/ that tracks file changes,
365 364 # (used by the filecache decorator)
366 365 #
367 366 # Maps a property name to its util.filecacheentry
368 367 self._filecache = {}
369 368
370 369 # hold sets of revision to be filtered
371 370 # should be cleared when something might have changed the filter value:
372 371 # - new changesets,
373 372 # - phase change,
374 373 # - new obsolescence marker,
375 374 # - working directory parent change,
376 375 # - bookmark changes
377 376 self.filteredrevcache = {}
378 377
379 378 # generic mapping between names and nodes
380 379 self.names = namespaces.namespaces()
381 380
382 381 @property
383 382 def wopener(self):
384 383 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
385 384 return self.wvfs
386 385
386 @property
387 def opener(self):
388 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
389 return self.vfs
390
387 391 def close(self):
388 392 self._writecaches()
389 393
390 394 def _loadextensions(self):
391 395 extensions.loadall(self.ui)
392 396
393 397 def _writecaches(self):
394 398 if self._revbranchcache:
395 399 self._revbranchcache.write()
396 400
397 401 def _restrictcapabilities(self, caps):
398 402 if self.ui.configbool('experimental', 'bundle2-advertise', True):
399 403 caps = set(caps)
400 404 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
401 405 caps.add('bundle2=' + urlreq.quote(capsblob))
402 406 return caps
403 407
404 408 def _applyopenerreqs(self):
405 409 self.svfs.options = dict((r, 1) for r in self.requirements
406 410 if r in self.openerreqs)
407 411 # experimental config: format.chunkcachesize
408 412 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
409 413 if chunkcachesize is not None:
410 414 self.svfs.options['chunkcachesize'] = chunkcachesize
411 415 # experimental config: format.maxchainlen
412 416 maxchainlen = self.ui.configint('format', 'maxchainlen')
413 417 if maxchainlen is not None:
414 418 self.svfs.options['maxchainlen'] = maxchainlen
415 419 # experimental config: format.manifestcachesize
416 420 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
417 421 if manifestcachesize is not None:
418 422 self.svfs.options['manifestcachesize'] = manifestcachesize
419 423 # experimental config: format.aggressivemergedeltas
420 424 aggressivemergedeltas = self.ui.configbool('format',
421 425 'aggressivemergedeltas', False)
422 426 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
423 427 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
424 428
425 429 for r in self.requirements:
426 430 if r.startswith('exp-compression-'):
427 431 self.svfs.options['compengine'] = r[len('exp-compression-'):]
428 432
429 433 def _writerequirements(self):
430 434 scmutil.writerequires(self.vfs, self.requirements)
431 435
432 436 def _checknested(self, path):
433 437 """Determine if path is a legal nested repository."""
434 438 if not path.startswith(self.root):
435 439 return False
436 440 subpath = path[len(self.root) + 1:]
437 441 normsubpath = util.pconvert(subpath)
438 442
439 443 # XXX: Checking against the current working copy is wrong in
440 444 # the sense that it can reject things like
441 445 #
442 446 # $ hg cat -r 10 sub/x.txt
443 447 #
444 448 # if sub/ is no longer a subrepository in the working copy
445 449 # parent revision.
446 450 #
447 451 # However, it can of course also allow things that would have
448 452 # been rejected before, such as the above cat command if sub/
449 453 # is a subrepository now, but was a normal directory before.
450 454 # The old path auditor would have rejected by mistake since it
451 455 # panics when it sees sub/.hg/.
452 456 #
453 457 # All in all, checking against the working copy seems sensible
454 458 # since we want to prevent access to nested repositories on
455 459 # the filesystem *now*.
456 460 ctx = self[None]
457 461 parts = util.splitpath(subpath)
458 462 while parts:
459 463 prefix = '/'.join(parts)
460 464 if prefix in ctx.substate:
461 465 if prefix == normsubpath:
462 466 return True
463 467 else:
464 468 sub = ctx.sub(prefix)
465 469 return sub.checknested(subpath[len(prefix) + 1:])
466 470 else:
467 471 parts.pop()
468 472 return False
469 473
470 474 def peer(self):
471 475 return localpeer(self) # not cached to avoid reference cycle
472 476
473 477 def unfiltered(self):
474 478 """Return unfiltered version of the repository
475 479
476 480 Intended to be overwritten by filtered repo."""
477 481 return self
478 482
479 483 def filtered(self, name):
480 484 """Return a filtered version of a repository"""
481 485 # build a new class with the mixin and the current class
482 486 # (possibly subclass of the repo)
483 487 class proxycls(repoview.repoview, self.unfiltered().__class__):
484 488 pass
485 489 return proxycls(self, name)
486 490
487 491 @repofilecache('bookmarks', 'bookmarks.current')
488 492 def _bookmarks(self):
489 493 return bookmarks.bmstore(self)
490 494
491 495 @property
492 496 def _activebookmark(self):
493 497 return self._bookmarks.active
494 498
495 499 def bookmarkheads(self, bookmark):
496 500 name = bookmark.split('@', 1)[0]
497 501 heads = []
498 502 for mark, n in self._bookmarks.iteritems():
499 503 if mark.split('@', 1)[0] == name:
500 504 heads.append(n)
501 505 return heads
502 506
503 507 # _phaserevs and _phasesets depend on changelog. what we need is to
504 508 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
505 509 # can't be easily expressed in filecache mechanism.
506 510 @storecache('phaseroots', '00changelog.i')
507 511 def _phasecache(self):
508 512 return phases.phasecache(self, self._phasedefaults)
509 513
510 514 @storecache('obsstore')
511 515 def obsstore(self):
512 516 # read default format for new obsstore.
513 517 # developer config: format.obsstore-version
514 518 defaultformat = self.ui.configint('format', 'obsstore-version', None)
515 519 # rely on obsstore class default when possible.
516 520 kwargs = {}
517 521 if defaultformat is not None:
518 522 kwargs['defaultformat'] = defaultformat
519 523 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
520 524 store = obsolete.obsstore(self.svfs, readonly=readonly,
521 525 **kwargs)
522 526 if store and readonly:
523 527 self.ui.warn(
524 528 _('obsolete feature not enabled but %i markers found!\n')
525 529 % len(list(store)))
526 530 return store
527 531
528 532 @storecache('00changelog.i')
529 533 def changelog(self):
530 534 c = changelog.changelog(self.svfs)
531 535 if txnutil.mayhavepending(self.root):
532 536 c.readpending('00changelog.i.a')
533 537 return c
534 538
535 539 def _constructmanifest(self):
536 540 # This is a temporary function while we migrate from manifest to
537 541 # manifestlog. It allows bundlerepo and unionrepo to intercept the
538 542 # manifest creation.
539 543 return manifest.manifestrevlog(self.svfs)
540 544
541 545 @storecache('00manifest.i')
542 546 def manifestlog(self):
543 547 return manifest.manifestlog(self.svfs, self)
544 548
545 549 @repofilecache('dirstate')
546 550 def dirstate(self):
547 551 return dirstate.dirstate(self.vfs, self.ui, self.root,
548 552 self._dirstatevalidate)
549 553
550 554 def _dirstatevalidate(self, node):
551 555 try:
552 556 self.changelog.rev(node)
553 557 return node
554 558 except error.LookupError:
555 559 if not self._dirstatevalidatewarned:
556 560 self._dirstatevalidatewarned = True
557 561 self.ui.warn(_("warning: ignoring unknown"
558 562 " working parent %s!\n") % short(node))
559 563 return nullid
560 564
561 565 def __getitem__(self, changeid):
562 566 if changeid is None or changeid == wdirrev:
563 567 return context.workingctx(self)
564 568 if isinstance(changeid, slice):
565 569 return [context.changectx(self, i)
566 570 for i in xrange(*changeid.indices(len(self)))
567 571 if i not in self.changelog.filteredrevs]
568 572 return context.changectx(self, changeid)
569 573
570 574 def __contains__(self, changeid):
571 575 try:
572 576 self[changeid]
573 577 return True
574 578 except error.RepoLookupError:
575 579 return False
576 580
577 581 def __nonzero__(self):
578 582 return True
579 583
580 584 def __len__(self):
581 585 return len(self.changelog)
582 586
583 587 def __iter__(self):
584 588 return iter(self.changelog)
585 589
586 590 def revs(self, expr, *args):
587 591 '''Find revisions matching a revset.
588 592
589 593 The revset is specified as a string ``expr`` that may contain
590 594 %-formatting to escape certain types. See ``revsetlang.formatspec``.
591 595
592 596 Revset aliases from the configuration are not expanded. To expand
593 597 user aliases, consider calling ``scmutil.revrange()`` or
594 598 ``repo.anyrevs([expr], user=True)``.
595 599
596 600 Returns a revset.abstractsmartset, which is a list-like interface
597 601 that contains integer revisions.
598 602 '''
599 603 expr = revsetlang.formatspec(expr, *args)
600 604 m = revset.match(None, expr)
601 605 return m(self)
602 606
603 607 def set(self, expr, *args):
604 608 '''Find revisions matching a revset and emit changectx instances.
605 609
606 610 This is a convenience wrapper around ``revs()`` that iterates the
607 611 result and is a generator of changectx instances.
608 612
609 613 Revset aliases from the configuration are not expanded. To expand
610 614 user aliases, consider calling ``scmutil.revrange()``.
611 615 '''
612 616 for r in self.revs(expr, *args):
613 617 yield self[r]
614 618
615 619 def anyrevs(self, specs, user=False):
616 620 '''Find revisions matching one of the given revsets.
617 621
618 622 Revset aliases from the configuration are not expanded by default. To
619 623 expand user aliases, specify ``user=True``.
620 624 '''
621 625 if user:
622 626 m = revset.matchany(self.ui, specs, repo=self)
623 627 else:
624 628 m = revset.matchany(None, specs)
625 629 return m(self)
626 630
627 631 def url(self):
628 632 return 'file:' + self.root
629 633
630 634 def hook(self, name, throw=False, **args):
631 635 """Call a hook, passing this repo instance.
632 636
633 637 This a convenience method to aid invoking hooks. Extensions likely
634 638 won't call this unless they have registered a custom hook or are
635 639 replacing code that is expected to call a hook.
636 640 """
637 641 return hook.hook(self.ui, self, name, throw, **args)
638 642
639 643 @unfilteredmethod
640 644 def _tag(self, names, node, message, local, user, date, extra=None,
641 645 editor=False):
642 646 if isinstance(names, str):
643 647 names = (names,)
644 648
645 649 branches = self.branchmap()
646 650 for name in names:
647 651 self.hook('pretag', throw=True, node=hex(node), tag=name,
648 652 local=local)
649 653 if name in branches:
650 654 self.ui.warn(_("warning: tag %s conflicts with existing"
651 655 " branch name\n") % name)
652 656
653 657 def writetags(fp, names, munge, prevtags):
654 658 fp.seek(0, 2)
655 659 if prevtags and prevtags[-1] != '\n':
656 660 fp.write('\n')
657 661 for name in names:
658 662 if munge:
659 663 m = munge(name)
660 664 else:
661 665 m = name
662 666
663 667 if (self._tagscache.tagtypes and
664 668 name in self._tagscache.tagtypes):
665 669 old = self.tags().get(name, nullid)
666 670 fp.write('%s %s\n' % (hex(old), m))
667 671 fp.write('%s %s\n' % (hex(node), m))
668 672 fp.close()
669 673
670 674 prevtags = ''
671 675 if local:
672 676 try:
673 677 fp = self.vfs('localtags', 'r+')
674 678 except IOError:
675 679 fp = self.vfs('localtags', 'a')
676 680 else:
677 681 prevtags = fp.read()
678 682
679 683 # local tags are stored in the current charset
680 684 writetags(fp, names, None, prevtags)
681 685 for name in names:
682 686 self.hook('tag', node=hex(node), tag=name, local=local)
683 687 return
684 688
685 689 try:
686 690 fp = self.wfile('.hgtags', 'rb+')
687 691 except IOError as e:
688 692 if e.errno != errno.ENOENT:
689 693 raise
690 694 fp = self.wfile('.hgtags', 'ab')
691 695 else:
692 696 prevtags = fp.read()
693 697
694 698 # committed tags are stored in UTF-8
695 699 writetags(fp, names, encoding.fromlocal, prevtags)
696 700
697 701 fp.close()
698 702
699 703 self.invalidatecaches()
700 704
701 705 if '.hgtags' not in self.dirstate:
702 706 self[None].add(['.hgtags'])
703 707
704 708 m = matchmod.exact(self.root, '', ['.hgtags'])
705 709 tagnode = self.commit(message, user, date, extra=extra, match=m,
706 710 editor=editor)
707 711
708 712 for name in names:
709 713 self.hook('tag', node=hex(node), tag=name, local=local)
710 714
711 715 return tagnode
712 716
713 717 def tag(self, names, node, message, local, user, date, editor=False):
714 718 '''tag a revision with one or more symbolic names.
715 719
716 720 names is a list of strings or, when adding a single tag, names may be a
717 721 string.
718 722
719 723 if local is True, the tags are stored in a per-repository file.
720 724 otherwise, they are stored in the .hgtags file, and a new
721 725 changeset is committed with the change.
722 726
723 727 keyword arguments:
724 728
725 729 local: whether to store tags in non-version-controlled file
726 730 (default False)
727 731
728 732 message: commit message to use if committing
729 733
730 734 user: name of user to use if committing
731 735
732 736 date: date tuple to use if committing'''
733 737
734 738 if not local:
735 739 m = matchmod.exact(self.root, '', ['.hgtags'])
736 740 if any(self.status(match=m, unknown=True, ignored=True)):
737 741 raise error.Abort(_('working copy of .hgtags is changed'),
738 742 hint=_('please commit .hgtags manually'))
739 743
740 744 self.tags() # instantiate the cache
741 745 self._tag(names, node, message, local, user, date, editor=editor)
742 746
743 747 @filteredpropertycache
744 748 def _tagscache(self):
745 749 '''Returns a tagscache object that contains various tags related
746 750 caches.'''
747 751
748 752 # This simplifies its cache management by having one decorated
749 753 # function (this one) and the rest simply fetch things from it.
750 754 class tagscache(object):
751 755 def __init__(self):
752 756 # These two define the set of tags for this repository. tags
753 757 # maps tag name to node; tagtypes maps tag name to 'global' or
754 758 # 'local'. (Global tags are defined by .hgtags across all
755 759 # heads, and local tags are defined in .hg/localtags.)
756 760 # They constitute the in-memory cache of tags.
757 761 self.tags = self.tagtypes = None
758 762
759 763 self.nodetagscache = self.tagslist = None
760 764
761 765 cache = tagscache()
762 766 cache.tags, cache.tagtypes = self._findtags()
763 767
764 768 return cache
765 769
766 770 def tags(self):
767 771 '''return a mapping of tag to node'''
768 772 t = {}
769 773 if self.changelog.filteredrevs:
770 774 tags, tt = self._findtags()
771 775 else:
772 776 tags = self._tagscache.tags
773 777 for k, v in tags.iteritems():
774 778 try:
775 779 # ignore tags to unknown nodes
776 780 self.changelog.rev(v)
777 781 t[k] = v
778 782 except (error.LookupError, ValueError):
779 783 pass
780 784 return t
781 785
782 786 def _findtags(self):
783 787 '''Do the hard work of finding tags. Return a pair of dicts
784 788 (tags, tagtypes) where tags maps tag name to node, and tagtypes
785 789 maps tag name to a string like \'global\' or \'local\'.
786 790 Subclasses or extensions are free to add their own tags, but
787 791 should be aware that the returned dicts will be retained for the
788 792 duration of the localrepo object.'''
789 793
790 794 # XXX what tagtype should subclasses/extensions use? Currently
791 795 # mq and bookmarks add tags, but do not set the tagtype at all.
792 796 # Should each extension invent its own tag type? Should there
793 797 # be one tagtype for all such "virtual" tags? Or is the status
794 798 # quo fine?
795 799
796 800 alltags = {} # map tag name to (node, hist)
797 801 tagtypes = {}
798 802
799 803 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
800 804 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
801 805
802 806 # Build the return dicts. Have to re-encode tag names because
803 807 # the tags module always uses UTF-8 (in order not to lose info
804 808 # writing to the cache), but the rest of Mercurial wants them in
805 809 # local encoding.
806 810 tags = {}
807 811 for (name, (node, hist)) in alltags.iteritems():
808 812 if node != nullid:
809 813 tags[encoding.tolocal(name)] = node
810 814 tags['tip'] = self.changelog.tip()
811 815 tagtypes = dict([(encoding.tolocal(name), value)
812 816 for (name, value) in tagtypes.iteritems()])
813 817 return (tags, tagtypes)
814 818
815 819 def tagtype(self, tagname):
816 820 '''
817 821 return the type of the given tag. result can be:
818 822
819 823 'local' : a local tag
820 824 'global' : a global tag
821 825 None : tag does not exist
822 826 '''
823 827
824 828 return self._tagscache.tagtypes.get(tagname)
825 829
826 830 def tagslist(self):
827 831 '''return a list of tags ordered by revision'''
828 832 if not self._tagscache.tagslist:
829 833 l = []
830 834 for t, n in self.tags().iteritems():
831 835 l.append((self.changelog.rev(n), t, n))
832 836 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
833 837
834 838 return self._tagscache.tagslist
835 839
836 840 def nodetags(self, node):
837 841 '''return the tags associated with a node'''
838 842 if not self._tagscache.nodetagscache:
839 843 nodetagscache = {}
840 844 for t, n in self._tagscache.tags.iteritems():
841 845 nodetagscache.setdefault(n, []).append(t)
842 846 for tags in nodetagscache.itervalues():
843 847 tags.sort()
844 848 self._tagscache.nodetagscache = nodetagscache
845 849 return self._tagscache.nodetagscache.get(node, [])
846 850
847 851 def nodebookmarks(self, node):
848 852 """return the list of bookmarks pointing to the specified node"""
849 853 marks = []
850 854 for bookmark, n in self._bookmarks.iteritems():
851 855 if n == node:
852 856 marks.append(bookmark)
853 857 return sorted(marks)
854 858
855 859 def branchmap(self):
856 860 '''returns a dictionary {branch: [branchheads]} with branchheads
857 861 ordered by increasing revision number'''
858 862 branchmap.updatecache(self)
859 863 return self._branchcaches[self.filtername]
860 864
861 865 @unfilteredmethod
862 866 def revbranchcache(self):
863 867 if not self._revbranchcache:
864 868 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
865 869 return self._revbranchcache
866 870
867 871 def branchtip(self, branch, ignoremissing=False):
868 872 '''return the tip node for a given branch
869 873
870 874 If ignoremissing is True, then this method will not raise an error.
871 875 This is helpful for callers that only expect None for a missing branch
872 876 (e.g. namespace).
873 877
874 878 '''
875 879 try:
876 880 return self.branchmap().branchtip(branch)
877 881 except KeyError:
878 882 if not ignoremissing:
879 883 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
880 884 else:
881 885 pass
882 886
883 887 def lookup(self, key):
884 888 return self[key].node()
885 889
886 890 def lookupbranch(self, key, remote=None):
887 891 repo = remote or self
888 892 if key in repo.branchmap():
889 893 return key
890 894
891 895 repo = (remote and remote.local()) and remote or self
892 896 return repo[key].branch()
893 897
894 898 def known(self, nodes):
895 899 cl = self.changelog
896 900 nm = cl.nodemap
897 901 filtered = cl.filteredrevs
898 902 result = []
899 903 for n in nodes:
900 904 r = nm.get(n)
901 905 resp = not (r is None or r in filtered)
902 906 result.append(resp)
903 907 return result
904 908
905 909 def local(self):
906 910 return self
907 911
908 912 def publishing(self):
909 913 # it's safe (and desirable) to trust the publish flag unconditionally
910 914 # so that we don't finalize changes shared between users via ssh or nfs
911 915 return self.ui.configbool('phases', 'publish', True, untrusted=True)
912 916
913 917 def cancopy(self):
914 918 # so statichttprepo's override of local() works
915 919 if not self.local():
916 920 return False
917 921 if not self.publishing():
918 922 return True
919 923 # if publishing we can't copy if there is filtered content
920 924 return not self.filtered('visible').changelog.filteredrevs
921 925
922 926 def shared(self):
923 927 '''the type of shared repository (None if not shared)'''
924 928 if self.sharedpath != self.path:
925 929 return 'store'
926 930 return None
927 931
928 932 def join(self, f, *insidef):
929 933 return self.vfs.join(os.path.join(f, *insidef))
930 934
931 935 def wjoin(self, f, *insidef):
932 936 return self.vfs.reljoin(self.root, f, *insidef)
933 937
934 938 def file(self, f):
935 939 if f[0] == '/':
936 940 f = f[1:]
937 941 return filelog.filelog(self.svfs, f)
938 942
939 943 def changectx(self, changeid):
940 944 return self[changeid]
941 945
942 946 def setparents(self, p1, p2=nullid):
943 947 self.dirstate.beginparentchange()
944 948 copies = self.dirstate.setparents(p1, p2)
945 949 pctx = self[p1]
946 950 if copies:
947 951 # Adjust copy records, the dirstate cannot do it, it
948 952 # requires access to parents manifests. Preserve them
949 953 # only for entries added to first parent.
950 954 for f in copies:
951 955 if f not in pctx and copies[f] in pctx:
952 956 self.dirstate.copy(copies[f], f)
953 957 if p2 == nullid:
954 958 for f, s in sorted(self.dirstate.copies().items()):
955 959 if f not in pctx and s not in pctx:
956 960 self.dirstate.copy(None, f)
957 961 self.dirstate.endparentchange()
958 962
959 963 def filectx(self, path, changeid=None, fileid=None):
960 964 """changeid can be a changeset revision, node, or tag.
961 965 fileid can be a file revision or node."""
962 966 return context.filectx(self, path, changeid, fileid)
963 967
964 968 def getcwd(self):
965 969 return self.dirstate.getcwd()
966 970
967 971 def pathto(self, f, cwd=None):
968 972 return self.dirstate.pathto(f, cwd)
969 973
970 974 def wfile(self, f, mode='r'):
971 975 return self.wvfs(f, mode)
972 976
973 977 def _link(self, f):
974 978 return self.wvfs.islink(f)
975 979
976 980 def _loadfilter(self, filter):
977 981 if filter not in self.filterpats:
978 982 l = []
979 983 for pat, cmd in self.ui.configitems(filter):
980 984 if cmd == '!':
981 985 continue
982 986 mf = matchmod.match(self.root, '', [pat])
983 987 fn = None
984 988 params = cmd
985 989 for name, filterfn in self._datafilters.iteritems():
986 990 if cmd.startswith(name):
987 991 fn = filterfn
988 992 params = cmd[len(name):].lstrip()
989 993 break
990 994 if not fn:
991 995 fn = lambda s, c, **kwargs: util.filter(s, c)
992 996 # Wrap old filters not supporting keyword arguments
993 997 if not inspect.getargspec(fn)[2]:
994 998 oldfn = fn
995 999 fn = lambda s, c, **kwargs: oldfn(s, c)
996 1000 l.append((mf, fn, params))
997 1001 self.filterpats[filter] = l
998 1002 return self.filterpats[filter]
999 1003
1000 1004 def _filter(self, filterpats, filename, data):
1001 1005 for mf, fn, cmd in filterpats:
1002 1006 if mf(filename):
1003 1007 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1004 1008 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1005 1009 break
1006 1010
1007 1011 return data
1008 1012
1009 1013 @unfilteredpropertycache
1010 1014 def _encodefilterpats(self):
1011 1015 return self._loadfilter('encode')
1012 1016
1013 1017 @unfilteredpropertycache
1014 1018 def _decodefilterpats(self):
1015 1019 return self._loadfilter('decode')
1016 1020
1017 1021 def adddatafilter(self, name, filter):
1018 1022 self._datafilters[name] = filter
1019 1023
1020 1024 def wread(self, filename):
1021 1025 if self._link(filename):
1022 1026 data = self.wvfs.readlink(filename)
1023 1027 else:
1024 1028 data = self.wvfs.read(filename)
1025 1029 return self._filter(self._encodefilterpats, filename, data)
1026 1030
1027 1031 def wwrite(self, filename, data, flags, backgroundclose=False):
1028 1032 """write ``data`` into ``filename`` in the working directory
1029 1033
1030 1034 This returns length of written (maybe decoded) data.
1031 1035 """
1032 1036 data = self._filter(self._decodefilterpats, filename, data)
1033 1037 if 'l' in flags:
1034 1038 self.wvfs.symlink(data, filename)
1035 1039 else:
1036 1040 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1037 1041 if 'x' in flags:
1038 1042 self.wvfs.setflags(filename, False, True)
1039 1043 return len(data)
1040 1044
1041 1045 def wwritedata(self, filename, data):
1042 1046 return self._filter(self._decodefilterpats, filename, data)
1043 1047
1044 1048 def currenttransaction(self):
1045 1049 """return the current transaction or None if non exists"""
1046 1050 if self._transref:
1047 1051 tr = self._transref()
1048 1052 else:
1049 1053 tr = None
1050 1054
1051 1055 if tr and tr.running():
1052 1056 return tr
1053 1057 return None
1054 1058
1055 1059 def transaction(self, desc, report=None):
1056 1060 if (self.ui.configbool('devel', 'all-warnings')
1057 1061 or self.ui.configbool('devel', 'check-locks')):
1058 1062 if self._currentlock(self._lockref) is None:
1059 1063 raise error.ProgrammingError('transaction requires locking')
1060 1064 tr = self.currenttransaction()
1061 1065 if tr is not None:
1062 1066 return tr.nest()
1063 1067
1064 1068 # abort here if the journal already exists
1065 1069 if self.svfs.exists("journal"):
1066 1070 raise error.RepoError(
1067 1071 _("abandoned transaction found"),
1068 1072 hint=_("run 'hg recover' to clean up transaction"))
1069 1073
1070 1074 idbase = "%.40f#%f" % (random.random(), time.time())
1071 1075 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1072 1076 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1073 1077
1074 1078 self._writejournal(desc)
1075 1079 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1076 1080 if report:
1077 1081 rp = report
1078 1082 else:
1079 1083 rp = self.ui.warn
1080 1084 vfsmap = {'plain': self.vfs} # root of .hg/
1081 1085 # we must avoid cyclic reference between repo and transaction.
1082 1086 reporef = weakref.ref(self)
1083 1087 def validate(tr):
1084 1088 """will run pre-closing hooks"""
1085 1089 reporef().hook('pretxnclose', throw=True,
1086 1090 txnname=desc, **tr.hookargs)
1087 1091 def releasefn(tr, success):
1088 1092 repo = reporef()
1089 1093 if success:
1090 1094 # this should be explicitly invoked here, because
1091 1095 # in-memory changes aren't written out at closing
1092 1096 # transaction, if tr.addfilegenerator (via
1093 1097 # dirstate.write or so) isn't invoked while
1094 1098 # transaction running
1095 1099 repo.dirstate.write(None)
1096 1100 else:
1097 1101 # discard all changes (including ones already written
1098 1102 # out) in this transaction
1099 1103 repo.dirstate.restorebackup(None, prefix='journal.')
1100 1104
1101 1105 repo.invalidate(clearfilecache=True)
1102 1106
1103 1107 tr = transaction.transaction(rp, self.svfs, vfsmap,
1104 1108 "journal",
1105 1109 "undo",
1106 1110 aftertrans(renames),
1107 1111 self.store.createmode,
1108 1112 validator=validate,
1109 1113 releasefn=releasefn)
1110 1114
1111 1115 tr.hookargs['txnid'] = txnid
1112 1116 # note: writing the fncache only during finalize mean that the file is
1113 1117 # outdated when running hooks. As fncache is used for streaming clone,
1114 1118 # this is not expected to break anything that happen during the hooks.
1115 1119 tr.addfinalize('flush-fncache', self.store.write)
1116 1120 def txnclosehook(tr2):
1117 1121 """To be run if transaction is successful, will schedule a hook run
1118 1122 """
1119 1123 # Don't reference tr2 in hook() so we don't hold a reference.
1120 1124 # This reduces memory consumption when there are multiple
1121 1125 # transactions per lock. This can likely go away if issue5045
1122 1126 # fixes the function accumulation.
1123 1127 hookargs = tr2.hookargs
1124 1128
1125 1129 def hook():
1126 1130 reporef().hook('txnclose', throw=False, txnname=desc,
1127 1131 **hookargs)
1128 1132 reporef()._afterlock(hook)
1129 1133 tr.addfinalize('txnclose-hook', txnclosehook)
1130 1134 def txnaborthook(tr2):
1131 1135 """To be run if transaction is aborted
1132 1136 """
1133 1137 reporef().hook('txnabort', throw=False, txnname=desc,
1134 1138 **tr2.hookargs)
1135 1139 tr.addabort('txnabort-hook', txnaborthook)
1136 1140 # avoid eager cache invalidation. in-memory data should be identical
1137 1141 # to stored data if transaction has no error.
1138 1142 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1139 1143 self._transref = weakref.ref(tr)
1140 1144 return tr
1141 1145
1142 1146 def _journalfiles(self):
1143 1147 return ((self.svfs, 'journal'),
1144 1148 (self.vfs, 'journal.dirstate'),
1145 1149 (self.vfs, 'journal.branch'),
1146 1150 (self.vfs, 'journal.desc'),
1147 1151 (self.vfs, 'journal.bookmarks'),
1148 1152 (self.svfs, 'journal.phaseroots'))
1149 1153
1150 1154 def undofiles(self):
1151 1155 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1152 1156
1153 1157 def _writejournal(self, desc):
1154 1158 self.dirstate.savebackup(None, prefix='journal.')
1155 1159 self.vfs.write("journal.branch",
1156 1160 encoding.fromlocal(self.dirstate.branch()))
1157 1161 self.vfs.write("journal.desc",
1158 1162 "%d\n%s\n" % (len(self), desc))
1159 1163 self.vfs.write("journal.bookmarks",
1160 1164 self.vfs.tryread("bookmarks"))
1161 1165 self.svfs.write("journal.phaseroots",
1162 1166 self.svfs.tryread("phaseroots"))
1163 1167
1164 1168 def recover(self):
1165 1169 with self.lock():
1166 1170 if self.svfs.exists("journal"):
1167 1171 self.ui.status(_("rolling back interrupted transaction\n"))
1168 1172 vfsmap = {'': self.svfs,
1169 1173 'plain': self.vfs,}
1170 1174 transaction.rollback(self.svfs, vfsmap, "journal",
1171 1175 self.ui.warn)
1172 1176 self.invalidate()
1173 1177 return True
1174 1178 else:
1175 1179 self.ui.warn(_("no interrupted transaction available\n"))
1176 1180 return False
1177 1181
1178 1182 def rollback(self, dryrun=False, force=False):
1179 1183 wlock = lock = dsguard = None
1180 1184 try:
1181 1185 wlock = self.wlock()
1182 1186 lock = self.lock()
1183 1187 if self.svfs.exists("undo"):
1184 1188 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1185 1189
1186 1190 return self._rollback(dryrun, force, dsguard)
1187 1191 else:
1188 1192 self.ui.warn(_("no rollback information available\n"))
1189 1193 return 1
1190 1194 finally:
1191 1195 release(dsguard, lock, wlock)
1192 1196
1193 1197 @unfilteredmethod # Until we get smarter cache management
1194 1198 def _rollback(self, dryrun, force, dsguard):
1195 1199 ui = self.ui
1196 1200 try:
1197 1201 args = self.vfs.read('undo.desc').splitlines()
1198 1202 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1199 1203 if len(args) >= 3:
1200 1204 detail = args[2]
1201 1205 oldtip = oldlen - 1
1202 1206
1203 1207 if detail and ui.verbose:
1204 1208 msg = (_('repository tip rolled back to revision %s'
1205 1209 ' (undo %s: %s)\n')
1206 1210 % (oldtip, desc, detail))
1207 1211 else:
1208 1212 msg = (_('repository tip rolled back to revision %s'
1209 1213 ' (undo %s)\n')
1210 1214 % (oldtip, desc))
1211 1215 except IOError:
1212 1216 msg = _('rolling back unknown transaction\n')
1213 1217 desc = None
1214 1218
1215 1219 if not force and self['.'] != self['tip'] and desc == 'commit':
1216 1220 raise error.Abort(
1217 1221 _('rollback of last commit while not checked out '
1218 1222 'may lose data'), hint=_('use -f to force'))
1219 1223
1220 1224 ui.status(msg)
1221 1225 if dryrun:
1222 1226 return 0
1223 1227
1224 1228 parents = self.dirstate.parents()
1225 1229 self.destroying()
1226 1230 vfsmap = {'plain': self.vfs, '': self.svfs}
1227 1231 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1228 1232 if self.vfs.exists('undo.bookmarks'):
1229 1233 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1230 1234 if self.svfs.exists('undo.phaseroots'):
1231 1235 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1232 1236 self.invalidate()
1233 1237
1234 1238 parentgone = (parents[0] not in self.changelog.nodemap or
1235 1239 parents[1] not in self.changelog.nodemap)
1236 1240 if parentgone:
1237 1241 # prevent dirstateguard from overwriting already restored one
1238 1242 dsguard.close()
1239 1243
1240 1244 self.dirstate.restorebackup(None, prefix='undo.')
1241 1245 try:
1242 1246 branch = self.vfs.read('undo.branch')
1243 1247 self.dirstate.setbranch(encoding.tolocal(branch))
1244 1248 except IOError:
1245 1249 ui.warn(_('named branch could not be reset: '
1246 1250 'current branch is still \'%s\'\n')
1247 1251 % self.dirstate.branch())
1248 1252
1249 1253 parents = tuple([p.rev() for p in self[None].parents()])
1250 1254 if len(parents) > 1:
1251 1255 ui.status(_('working directory now based on '
1252 1256 'revisions %d and %d\n') % parents)
1253 1257 else:
1254 1258 ui.status(_('working directory now based on '
1255 1259 'revision %d\n') % parents)
1256 1260 mergemod.mergestate.clean(self, self['.'].node())
1257 1261
1258 1262 # TODO: if we know which new heads may result from this rollback, pass
1259 1263 # them to destroy(), which will prevent the branchhead cache from being
1260 1264 # invalidated.
1261 1265 self.destroyed()
1262 1266 return 0
1263 1267
1264 1268 def invalidatecaches(self):
1265 1269
1266 1270 if '_tagscache' in vars(self):
1267 1271 # can't use delattr on proxy
1268 1272 del self.__dict__['_tagscache']
1269 1273
1270 1274 self.unfiltered()._branchcaches.clear()
1271 1275 self.invalidatevolatilesets()
1272 1276
1273 1277 def invalidatevolatilesets(self):
1274 1278 self.filteredrevcache.clear()
1275 1279 obsolete.clearobscaches(self)
1276 1280
1277 1281 def invalidatedirstate(self):
1278 1282 '''Invalidates the dirstate, causing the next call to dirstate
1279 1283 to check if it was modified since the last time it was read,
1280 1284 rereading it if it has.
1281 1285
1282 1286 This is different to dirstate.invalidate() that it doesn't always
1283 1287 rereads the dirstate. Use dirstate.invalidate() if you want to
1284 1288 explicitly read the dirstate again (i.e. restoring it to a previous
1285 1289 known good state).'''
1286 1290 if hasunfilteredcache(self, 'dirstate'):
1287 1291 for k in self.dirstate._filecache:
1288 1292 try:
1289 1293 delattr(self.dirstate, k)
1290 1294 except AttributeError:
1291 1295 pass
1292 1296 delattr(self.unfiltered(), 'dirstate')
1293 1297
1294 1298 def invalidate(self, clearfilecache=False):
1295 1299 '''Invalidates both store and non-store parts other than dirstate
1296 1300
1297 1301 If a transaction is running, invalidation of store is omitted,
1298 1302 because discarding in-memory changes might cause inconsistency
1299 1303 (e.g. incomplete fncache causes unintentional failure, but
1300 1304 redundant one doesn't).
1301 1305 '''
1302 1306 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1303 1307 for k in self._filecache.keys():
1304 1308 # dirstate is invalidated separately in invalidatedirstate()
1305 1309 if k == 'dirstate':
1306 1310 continue
1307 1311
1308 1312 if clearfilecache:
1309 1313 del self._filecache[k]
1310 1314 try:
1311 1315 delattr(unfiltered, k)
1312 1316 except AttributeError:
1313 1317 pass
1314 1318 self.invalidatecaches()
1315 1319 if not self.currenttransaction():
1316 1320 # TODO: Changing contents of store outside transaction
1317 1321 # causes inconsistency. We should make in-memory store
1318 1322 # changes detectable, and abort if changed.
1319 1323 self.store.invalidatecaches()
1320 1324
1321 1325 def invalidateall(self):
1322 1326 '''Fully invalidates both store and non-store parts, causing the
1323 1327 subsequent operation to reread any outside changes.'''
1324 1328 # extension should hook this to invalidate its caches
1325 1329 self.invalidate()
1326 1330 self.invalidatedirstate()
1327 1331
1328 1332 @unfilteredmethod
1329 1333 def _refreshfilecachestats(self, tr):
1330 1334 """Reload stats of cached files so that they are flagged as valid"""
1331 1335 for k, ce in self._filecache.items():
1332 1336 if k == 'dirstate' or k not in self.__dict__:
1333 1337 continue
1334 1338 ce.refresh()
1335 1339
1336 1340 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1337 1341 inheritchecker=None, parentenvvar=None):
1338 1342 parentlock = None
1339 1343 # the contents of parentenvvar are used by the underlying lock to
1340 1344 # determine whether it can be inherited
1341 1345 if parentenvvar is not None:
1342 1346 parentlock = encoding.environ.get(parentenvvar)
1343 1347 try:
1344 1348 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1345 1349 acquirefn=acquirefn, desc=desc,
1346 1350 inheritchecker=inheritchecker,
1347 1351 parentlock=parentlock)
1348 1352 except error.LockHeld as inst:
1349 1353 if not wait:
1350 1354 raise
1351 1355 # show more details for new-style locks
1352 1356 if ':' in inst.locker:
1353 1357 host, pid = inst.locker.split(":", 1)
1354 1358 self.ui.warn(
1355 1359 _("waiting for lock on %s held by process %r "
1356 1360 "on host %r\n") % (desc, pid, host))
1357 1361 else:
1358 1362 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1359 1363 (desc, inst.locker))
1360 1364 # default to 600 seconds timeout
1361 1365 l = lockmod.lock(vfs, lockname,
1362 1366 int(self.ui.config("ui", "timeout", "600")),
1363 1367 releasefn=releasefn, acquirefn=acquirefn,
1364 1368 desc=desc)
1365 1369 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1366 1370 return l
1367 1371
1368 1372 def _afterlock(self, callback):
1369 1373 """add a callback to be run when the repository is fully unlocked
1370 1374
1371 1375 The callback will be executed when the outermost lock is released
1372 1376 (with wlock being higher level than 'lock')."""
1373 1377 for ref in (self._wlockref, self._lockref):
1374 1378 l = ref and ref()
1375 1379 if l and l.held:
1376 1380 l.postrelease.append(callback)
1377 1381 break
1378 1382 else: # no lock have been found.
1379 1383 callback()
1380 1384
1381 1385 def lock(self, wait=True):
1382 1386 '''Lock the repository store (.hg/store) and return a weak reference
1383 1387 to the lock. Use this before modifying the store (e.g. committing or
1384 1388 stripping). If you are opening a transaction, get a lock as well.)
1385 1389
1386 1390 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1387 1391 'wlock' first to avoid a dead-lock hazard.'''
1388 1392 l = self._currentlock(self._lockref)
1389 1393 if l is not None:
1390 1394 l.lock()
1391 1395 return l
1392 1396
1393 1397 l = self._lock(self.svfs, "lock", wait, None,
1394 1398 self.invalidate, _('repository %s') % self.origroot)
1395 1399 self._lockref = weakref.ref(l)
1396 1400 return l
1397 1401
1398 1402 def _wlockchecktransaction(self):
1399 1403 if self.currenttransaction() is not None:
1400 1404 raise error.LockInheritanceContractViolation(
1401 1405 'wlock cannot be inherited in the middle of a transaction')
1402 1406
1403 1407 def wlock(self, wait=True):
1404 1408 '''Lock the non-store parts of the repository (everything under
1405 1409 .hg except .hg/store) and return a weak reference to the lock.
1406 1410
1407 1411 Use this before modifying files in .hg.
1408 1412
1409 1413 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1410 1414 'wlock' first to avoid a dead-lock hazard.'''
1411 1415 l = self._wlockref and self._wlockref()
1412 1416 if l is not None and l.held:
1413 1417 l.lock()
1414 1418 return l
1415 1419
1416 1420 # We do not need to check for non-waiting lock acquisition. Such
1417 1421 # acquisition would not cause dead-lock as they would just fail.
1418 1422 if wait and (self.ui.configbool('devel', 'all-warnings')
1419 1423 or self.ui.configbool('devel', 'check-locks')):
1420 1424 if self._currentlock(self._lockref) is not None:
1421 1425 self.ui.develwarn('"wlock" acquired after "lock"')
1422 1426
1423 1427 def unlock():
1424 1428 if self.dirstate.pendingparentchange():
1425 1429 self.dirstate.invalidate()
1426 1430 else:
1427 1431 self.dirstate.write(None)
1428 1432
1429 1433 self._filecache['dirstate'].refresh()
1430 1434
1431 1435 l = self._lock(self.vfs, "wlock", wait, unlock,
1432 1436 self.invalidatedirstate, _('working directory of %s') %
1433 1437 self.origroot,
1434 1438 inheritchecker=self._wlockchecktransaction,
1435 1439 parentenvvar='HG_WLOCK_LOCKER')
1436 1440 self._wlockref = weakref.ref(l)
1437 1441 return l
1438 1442
1439 1443 def _currentlock(self, lockref):
1440 1444 """Returns the lock if it's held, or None if it's not."""
1441 1445 if lockref is None:
1442 1446 return None
1443 1447 l = lockref()
1444 1448 if l is None or not l.held:
1445 1449 return None
1446 1450 return l
1447 1451
1448 1452 def currentwlock(self):
1449 1453 """Returns the wlock if it's held, or None if it's not."""
1450 1454 return self._currentlock(self._wlockref)
1451 1455
1452 1456 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1453 1457 """
1454 1458 commit an individual file as part of a larger transaction
1455 1459 """
1456 1460
1457 1461 fname = fctx.path()
1458 1462 fparent1 = manifest1.get(fname, nullid)
1459 1463 fparent2 = manifest2.get(fname, nullid)
1460 1464 if isinstance(fctx, context.filectx):
1461 1465 node = fctx.filenode()
1462 1466 if node in [fparent1, fparent2]:
1463 1467 self.ui.debug('reusing %s filelog entry\n' % fname)
1464 1468 if manifest1.flags(fname) != fctx.flags():
1465 1469 changelist.append(fname)
1466 1470 return node
1467 1471
1468 1472 flog = self.file(fname)
1469 1473 meta = {}
1470 1474 copy = fctx.renamed()
1471 1475 if copy and copy[0] != fname:
1472 1476 # Mark the new revision of this file as a copy of another
1473 1477 # file. This copy data will effectively act as a parent
1474 1478 # of this new revision. If this is a merge, the first
1475 1479 # parent will be the nullid (meaning "look up the copy data")
1476 1480 # and the second one will be the other parent. For example:
1477 1481 #
1478 1482 # 0 --- 1 --- 3 rev1 changes file foo
1479 1483 # \ / rev2 renames foo to bar and changes it
1480 1484 # \- 2 -/ rev3 should have bar with all changes and
1481 1485 # should record that bar descends from
1482 1486 # bar in rev2 and foo in rev1
1483 1487 #
1484 1488 # this allows this merge to succeed:
1485 1489 #
1486 1490 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1487 1491 # \ / merging rev3 and rev4 should use bar@rev2
1488 1492 # \- 2 --- 4 as the merge base
1489 1493 #
1490 1494
1491 1495 cfname = copy[0]
1492 1496 crev = manifest1.get(cfname)
1493 1497 newfparent = fparent2
1494 1498
1495 1499 if manifest2: # branch merge
1496 1500 if fparent2 == nullid or crev is None: # copied on remote side
1497 1501 if cfname in manifest2:
1498 1502 crev = manifest2[cfname]
1499 1503 newfparent = fparent1
1500 1504
1501 1505 # Here, we used to search backwards through history to try to find
1502 1506 # where the file copy came from if the source of a copy was not in
1503 1507 # the parent directory. However, this doesn't actually make sense to
1504 1508 # do (what does a copy from something not in your working copy even
1505 1509 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1506 1510 # the user that copy information was dropped, so if they didn't
1507 1511 # expect this outcome it can be fixed, but this is the correct
1508 1512 # behavior in this circumstance.
1509 1513
1510 1514 if crev:
1511 1515 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1512 1516 meta["copy"] = cfname
1513 1517 meta["copyrev"] = hex(crev)
1514 1518 fparent1, fparent2 = nullid, newfparent
1515 1519 else:
1516 1520 self.ui.warn(_("warning: can't find ancestor for '%s' "
1517 1521 "copied from '%s'!\n") % (fname, cfname))
1518 1522
1519 1523 elif fparent1 == nullid:
1520 1524 fparent1, fparent2 = fparent2, nullid
1521 1525 elif fparent2 != nullid:
1522 1526 # is one parent an ancestor of the other?
1523 1527 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1524 1528 if fparent1 in fparentancestors:
1525 1529 fparent1, fparent2 = fparent2, nullid
1526 1530 elif fparent2 in fparentancestors:
1527 1531 fparent2 = nullid
1528 1532
1529 1533 # is the file changed?
1530 1534 text = fctx.data()
1531 1535 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1532 1536 changelist.append(fname)
1533 1537 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1534 1538 # are just the flags changed during merge?
1535 1539 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1536 1540 changelist.append(fname)
1537 1541
1538 1542 return fparent1
1539 1543
1540 1544 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1541 1545 """check for commit arguments that aren't committable"""
1542 1546 if match.isexact() or match.prefix():
1543 1547 matched = set(status.modified + status.added + status.removed)
1544 1548
1545 1549 for f in match.files():
1546 1550 f = self.dirstate.normalize(f)
1547 1551 if f == '.' or f in matched or f in wctx.substate:
1548 1552 continue
1549 1553 if f in status.deleted:
1550 1554 fail(f, _('file not found!'))
1551 1555 if f in vdirs: # visited directory
1552 1556 d = f + '/'
1553 1557 for mf in matched:
1554 1558 if mf.startswith(d):
1555 1559 break
1556 1560 else:
1557 1561 fail(f, _("no match under directory!"))
1558 1562 elif f not in self.dirstate:
1559 1563 fail(f, _("file not tracked!"))
1560 1564
1561 1565 @unfilteredmethod
1562 1566 def commit(self, text="", user=None, date=None, match=None, force=False,
1563 1567 editor=False, extra=None):
1564 1568 """Add a new revision to current repository.
1565 1569
1566 1570 Revision information is gathered from the working directory,
1567 1571 match can be used to filter the committed files. If editor is
1568 1572 supplied, it is called to get a commit message.
1569 1573 """
1570 1574 if extra is None:
1571 1575 extra = {}
1572 1576
1573 1577 def fail(f, msg):
1574 1578 raise error.Abort('%s: %s' % (f, msg))
1575 1579
1576 1580 if not match:
1577 1581 match = matchmod.always(self.root, '')
1578 1582
1579 1583 if not force:
1580 1584 vdirs = []
1581 1585 match.explicitdir = vdirs.append
1582 1586 match.bad = fail
1583 1587
1584 1588 wlock = lock = tr = None
1585 1589 try:
1586 1590 wlock = self.wlock()
1587 1591 lock = self.lock() # for recent changelog (see issue4368)
1588 1592
1589 1593 wctx = self[None]
1590 1594 merge = len(wctx.parents()) > 1
1591 1595
1592 1596 if not force and merge and match.ispartial():
1593 1597 raise error.Abort(_('cannot partially commit a merge '
1594 1598 '(do not specify files or patterns)'))
1595 1599
1596 1600 status = self.status(match=match, clean=force)
1597 1601 if force:
1598 1602 status.modified.extend(status.clean) # mq may commit clean files
1599 1603
1600 1604 # check subrepos
1601 1605 subs = []
1602 1606 commitsubs = set()
1603 1607 newstate = wctx.substate.copy()
1604 1608 # only manage subrepos and .hgsubstate if .hgsub is present
1605 1609 if '.hgsub' in wctx:
1606 1610 # we'll decide whether to track this ourselves, thanks
1607 1611 for c in status.modified, status.added, status.removed:
1608 1612 if '.hgsubstate' in c:
1609 1613 c.remove('.hgsubstate')
1610 1614
1611 1615 # compare current state to last committed state
1612 1616 # build new substate based on last committed state
1613 1617 oldstate = wctx.p1().substate
1614 1618 for s in sorted(newstate.keys()):
1615 1619 if not match(s):
1616 1620 # ignore working copy, use old state if present
1617 1621 if s in oldstate:
1618 1622 newstate[s] = oldstate[s]
1619 1623 continue
1620 1624 if not force:
1621 1625 raise error.Abort(
1622 1626 _("commit with new subrepo %s excluded") % s)
1623 1627 dirtyreason = wctx.sub(s).dirtyreason(True)
1624 1628 if dirtyreason:
1625 1629 if not self.ui.configbool('ui', 'commitsubrepos'):
1626 1630 raise error.Abort(dirtyreason,
1627 1631 hint=_("use --subrepos for recursive commit"))
1628 1632 subs.append(s)
1629 1633 commitsubs.add(s)
1630 1634 else:
1631 1635 bs = wctx.sub(s).basestate()
1632 1636 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1633 1637 if oldstate.get(s, (None, None, None))[1] != bs:
1634 1638 subs.append(s)
1635 1639
1636 1640 # check for removed subrepos
1637 1641 for p in wctx.parents():
1638 1642 r = [s for s in p.substate if s not in newstate]
1639 1643 subs += [s for s in r if match(s)]
1640 1644 if subs:
1641 1645 if (not match('.hgsub') and
1642 1646 '.hgsub' in (wctx.modified() + wctx.added())):
1643 1647 raise error.Abort(
1644 1648 _("can't commit subrepos without .hgsub"))
1645 1649 status.modified.insert(0, '.hgsubstate')
1646 1650
1647 1651 elif '.hgsub' in status.removed:
1648 1652 # clean up .hgsubstate when .hgsub is removed
1649 1653 if ('.hgsubstate' in wctx and
1650 1654 '.hgsubstate' not in (status.modified + status.added +
1651 1655 status.removed)):
1652 1656 status.removed.insert(0, '.hgsubstate')
1653 1657
1654 1658 # make sure all explicit patterns are matched
1655 1659 if not force:
1656 1660 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1657 1661
1658 1662 cctx = context.workingcommitctx(self, status,
1659 1663 text, user, date, extra)
1660 1664
1661 1665 # internal config: ui.allowemptycommit
1662 1666 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1663 1667 or extra.get('close') or merge or cctx.files()
1664 1668 or self.ui.configbool('ui', 'allowemptycommit'))
1665 1669 if not allowemptycommit:
1666 1670 return None
1667 1671
1668 1672 if merge and cctx.deleted():
1669 1673 raise error.Abort(_("cannot commit merge with missing files"))
1670 1674
1671 1675 ms = mergemod.mergestate.read(self)
1672 1676 mergeutil.checkunresolved(ms)
1673 1677
1674 1678 if editor:
1675 1679 cctx._text = editor(self, cctx, subs)
1676 1680 edited = (text != cctx._text)
1677 1681
1678 1682 # Save commit message in case this transaction gets rolled back
1679 1683 # (e.g. by a pretxncommit hook). Leave the content alone on
1680 1684 # the assumption that the user will use the same editor again.
1681 1685 msgfn = self.savecommitmessage(cctx._text)
1682 1686
1683 1687 # commit subs and write new state
1684 1688 if subs:
1685 1689 for s in sorted(commitsubs):
1686 1690 sub = wctx.sub(s)
1687 1691 self.ui.status(_('committing subrepository %s\n') %
1688 1692 subrepo.subrelpath(sub))
1689 1693 sr = sub.commit(cctx._text, user, date)
1690 1694 newstate[s] = (newstate[s][0], sr)
1691 1695 subrepo.writestate(self, newstate)
1692 1696
1693 1697 p1, p2 = self.dirstate.parents()
1694 1698 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1695 1699 try:
1696 1700 self.hook("precommit", throw=True, parent1=hookp1,
1697 1701 parent2=hookp2)
1698 1702 tr = self.transaction('commit')
1699 1703 ret = self.commitctx(cctx, True)
1700 1704 except: # re-raises
1701 1705 if edited:
1702 1706 self.ui.write(
1703 1707 _('note: commit message saved in %s\n') % msgfn)
1704 1708 raise
1705 1709 # update bookmarks, dirstate and mergestate
1706 1710 bookmarks.update(self, [p1, p2], ret)
1707 1711 cctx.markcommitted(ret)
1708 1712 ms.reset()
1709 1713 tr.close()
1710 1714
1711 1715 finally:
1712 1716 lockmod.release(tr, lock, wlock)
1713 1717
1714 1718 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1715 1719 # hack for command that use a temporary commit (eg: histedit)
1716 1720 # temporary commit got stripped before hook release
1717 1721 if self.changelog.hasnode(ret):
1718 1722 self.hook("commit", node=node, parent1=parent1,
1719 1723 parent2=parent2)
1720 1724 self._afterlock(commithook)
1721 1725 return ret
1722 1726
1723 1727 @unfilteredmethod
1724 1728 def commitctx(self, ctx, error=False):
1725 1729 """Add a new revision to current repository.
1726 1730 Revision information is passed via the context argument.
1727 1731 """
1728 1732
1729 1733 tr = None
1730 1734 p1, p2 = ctx.p1(), ctx.p2()
1731 1735 user = ctx.user()
1732 1736
1733 1737 lock = self.lock()
1734 1738 try:
1735 1739 tr = self.transaction("commit")
1736 1740 trp = weakref.proxy(tr)
1737 1741
1738 1742 if ctx.manifestnode():
1739 1743 # reuse an existing manifest revision
1740 1744 mn = ctx.manifestnode()
1741 1745 files = ctx.files()
1742 1746 elif ctx.files():
1743 1747 m1ctx = p1.manifestctx()
1744 1748 m2ctx = p2.manifestctx()
1745 1749 mctx = m1ctx.copy()
1746 1750
1747 1751 m = mctx.read()
1748 1752 m1 = m1ctx.read()
1749 1753 m2 = m2ctx.read()
1750 1754
1751 1755 # check in files
1752 1756 added = []
1753 1757 changed = []
1754 1758 removed = list(ctx.removed())
1755 1759 linkrev = len(self)
1756 1760 self.ui.note(_("committing files:\n"))
1757 1761 for f in sorted(ctx.modified() + ctx.added()):
1758 1762 self.ui.note(f + "\n")
1759 1763 try:
1760 1764 fctx = ctx[f]
1761 1765 if fctx is None:
1762 1766 removed.append(f)
1763 1767 else:
1764 1768 added.append(f)
1765 1769 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1766 1770 trp, changed)
1767 1771 m.setflag(f, fctx.flags())
1768 1772 except OSError as inst:
1769 1773 self.ui.warn(_("trouble committing %s!\n") % f)
1770 1774 raise
1771 1775 except IOError as inst:
1772 1776 errcode = getattr(inst, 'errno', errno.ENOENT)
1773 1777 if error or errcode and errcode != errno.ENOENT:
1774 1778 self.ui.warn(_("trouble committing %s!\n") % f)
1775 1779 raise
1776 1780
1777 1781 # update manifest
1778 1782 self.ui.note(_("committing manifest\n"))
1779 1783 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1780 1784 drop = [f for f in removed if f in m]
1781 1785 for f in drop:
1782 1786 del m[f]
1783 1787 mn = mctx.write(trp, linkrev,
1784 1788 p1.manifestnode(), p2.manifestnode(),
1785 1789 added, drop)
1786 1790 files = changed + removed
1787 1791 else:
1788 1792 mn = p1.manifestnode()
1789 1793 files = []
1790 1794
1791 1795 # update changelog
1792 1796 self.ui.note(_("committing changelog\n"))
1793 1797 self.changelog.delayupdate(tr)
1794 1798 n = self.changelog.add(mn, files, ctx.description(),
1795 1799 trp, p1.node(), p2.node(),
1796 1800 user, ctx.date(), ctx.extra().copy())
1797 1801 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1798 1802 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1799 1803 parent2=xp2)
1800 1804 # set the new commit is proper phase
1801 1805 targetphase = subrepo.newcommitphase(self.ui, ctx)
1802 1806 if targetphase:
1803 1807 # retract boundary do not alter parent changeset.
1804 1808 # if a parent have higher the resulting phase will
1805 1809 # be compliant anyway
1806 1810 #
1807 1811 # if minimal phase was 0 we don't need to retract anything
1808 1812 phases.retractboundary(self, tr, targetphase, [n])
1809 1813 tr.close()
1810 1814 branchmap.updatecache(self.filtered('served'))
1811 1815 return n
1812 1816 finally:
1813 1817 if tr:
1814 1818 tr.release()
1815 1819 lock.release()
1816 1820
1817 1821 @unfilteredmethod
1818 1822 def destroying(self):
1819 1823 '''Inform the repository that nodes are about to be destroyed.
1820 1824 Intended for use by strip and rollback, so there's a common
1821 1825 place for anything that has to be done before destroying history.
1822 1826
1823 1827 This is mostly useful for saving state that is in memory and waiting
1824 1828 to be flushed when the current lock is released. Because a call to
1825 1829 destroyed is imminent, the repo will be invalidated causing those
1826 1830 changes to stay in memory (waiting for the next unlock), or vanish
1827 1831 completely.
1828 1832 '''
1829 1833 # When using the same lock to commit and strip, the phasecache is left
1830 1834 # dirty after committing. Then when we strip, the repo is invalidated,
1831 1835 # causing those changes to disappear.
1832 1836 if '_phasecache' in vars(self):
1833 1837 self._phasecache.write()
1834 1838
1835 1839 @unfilteredmethod
1836 1840 def destroyed(self):
1837 1841 '''Inform the repository that nodes have been destroyed.
1838 1842 Intended for use by strip and rollback, so there's a common
1839 1843 place for anything that has to be done after destroying history.
1840 1844 '''
1841 1845 # When one tries to:
1842 1846 # 1) destroy nodes thus calling this method (e.g. strip)
1843 1847 # 2) use phasecache somewhere (e.g. commit)
1844 1848 #
1845 1849 # then 2) will fail because the phasecache contains nodes that were
1846 1850 # removed. We can either remove phasecache from the filecache,
1847 1851 # causing it to reload next time it is accessed, or simply filter
1848 1852 # the removed nodes now and write the updated cache.
1849 1853 self._phasecache.filterunknown(self)
1850 1854 self._phasecache.write()
1851 1855
1852 1856 # update the 'served' branch cache to help read only server process
1853 1857 # Thanks to branchcache collaboration this is done from the nearest
1854 1858 # filtered subset and it is expected to be fast.
1855 1859 branchmap.updatecache(self.filtered('served'))
1856 1860
1857 1861 # Ensure the persistent tag cache is updated. Doing it now
1858 1862 # means that the tag cache only has to worry about destroyed
1859 1863 # heads immediately after a strip/rollback. That in turn
1860 1864 # guarantees that "cachetip == currenttip" (comparing both rev
1861 1865 # and node) always means no nodes have been added or destroyed.
1862 1866
1863 1867 # XXX this is suboptimal when qrefresh'ing: we strip the current
1864 1868 # head, refresh the tag cache, then immediately add a new head.
1865 1869 # But I think doing it this way is necessary for the "instant
1866 1870 # tag cache retrieval" case to work.
1867 1871 self.invalidate()
1868 1872
1869 1873 def walk(self, match, node=None):
1870 1874 '''
1871 1875 walk recursively through the directory tree or a given
1872 1876 changeset, finding all files matched by the match
1873 1877 function
1874 1878 '''
1875 1879 return self[node].walk(match)
1876 1880
1877 1881 def status(self, node1='.', node2=None, match=None,
1878 1882 ignored=False, clean=False, unknown=False,
1879 1883 listsubrepos=False):
1880 1884 '''a convenience method that calls node1.status(node2)'''
1881 1885 return self[node1].status(node2, match, ignored, clean, unknown,
1882 1886 listsubrepos)
1883 1887
1884 1888 def heads(self, start=None):
1885 1889 if start is None:
1886 1890 cl = self.changelog
1887 1891 headrevs = reversed(cl.headrevs())
1888 1892 return [cl.node(rev) for rev in headrevs]
1889 1893
1890 1894 heads = self.changelog.heads(start)
1891 1895 # sort the output in rev descending order
1892 1896 return sorted(heads, key=self.changelog.rev, reverse=True)
1893 1897
1894 1898 def branchheads(self, branch=None, start=None, closed=False):
1895 1899 '''return a (possibly filtered) list of heads for the given branch
1896 1900
1897 1901 Heads are returned in topological order, from newest to oldest.
1898 1902 If branch is None, use the dirstate branch.
1899 1903 If start is not None, return only heads reachable from start.
1900 1904 If closed is True, return heads that are marked as closed as well.
1901 1905 '''
1902 1906 if branch is None:
1903 1907 branch = self[None].branch()
1904 1908 branches = self.branchmap()
1905 1909 if branch not in branches:
1906 1910 return []
1907 1911 # the cache returns heads ordered lowest to highest
1908 1912 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1909 1913 if start is not None:
1910 1914 # filter out the heads that cannot be reached from startrev
1911 1915 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1912 1916 bheads = [h for h in bheads if h in fbheads]
1913 1917 return bheads
1914 1918
1915 1919 def branches(self, nodes):
1916 1920 if not nodes:
1917 1921 nodes = [self.changelog.tip()]
1918 1922 b = []
1919 1923 for n in nodes:
1920 1924 t = n
1921 1925 while True:
1922 1926 p = self.changelog.parents(n)
1923 1927 if p[1] != nullid or p[0] == nullid:
1924 1928 b.append((t, n, p[0], p[1]))
1925 1929 break
1926 1930 n = p[0]
1927 1931 return b
1928 1932
1929 1933 def between(self, pairs):
1930 1934 r = []
1931 1935
1932 1936 for top, bottom in pairs:
1933 1937 n, l, i = top, [], 0
1934 1938 f = 1
1935 1939
1936 1940 while n != bottom and n != nullid:
1937 1941 p = self.changelog.parents(n)[0]
1938 1942 if i == f:
1939 1943 l.append(n)
1940 1944 f = f * 2
1941 1945 n = p
1942 1946 i += 1
1943 1947
1944 1948 r.append(l)
1945 1949
1946 1950 return r
1947 1951
1948 1952 def checkpush(self, pushop):
1949 1953 """Extensions can override this function if additional checks have
1950 1954 to be performed before pushing, or call it if they override push
1951 1955 command.
1952 1956 """
1953 1957 pass
1954 1958
1955 1959 @unfilteredpropertycache
1956 1960 def prepushoutgoinghooks(self):
1957 1961 """Return util.hooks consists of a pushop with repo, remote, outgoing
1958 1962 methods, which are called before pushing changesets.
1959 1963 """
1960 1964 return util.hooks()
1961 1965
1962 1966 def pushkey(self, namespace, key, old, new):
1963 1967 try:
1964 1968 tr = self.currenttransaction()
1965 1969 hookargs = {}
1966 1970 if tr is not None:
1967 1971 hookargs.update(tr.hookargs)
1968 1972 hookargs['namespace'] = namespace
1969 1973 hookargs['key'] = key
1970 1974 hookargs['old'] = old
1971 1975 hookargs['new'] = new
1972 1976 self.hook('prepushkey', throw=True, **hookargs)
1973 1977 except error.HookAbort as exc:
1974 1978 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1975 1979 if exc.hint:
1976 1980 self.ui.write_err(_("(%s)\n") % exc.hint)
1977 1981 return False
1978 1982 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1979 1983 ret = pushkey.push(self, namespace, key, old, new)
1980 1984 def runhook():
1981 1985 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1982 1986 ret=ret)
1983 1987 self._afterlock(runhook)
1984 1988 return ret
1985 1989
1986 1990 def listkeys(self, namespace):
1987 1991 self.hook('prelistkeys', throw=True, namespace=namespace)
1988 1992 self.ui.debug('listing keys for "%s"\n' % namespace)
1989 1993 values = pushkey.list(self, namespace)
1990 1994 self.hook('listkeys', namespace=namespace, values=values)
1991 1995 return values
1992 1996
1993 1997 def debugwireargs(self, one, two, three=None, four=None, five=None):
1994 1998 '''used to test argument passing over the wire'''
1995 1999 return "%s %s %s %s %s" % (one, two, three, four, five)
1996 2000
1997 2001 def savecommitmessage(self, text):
1998 2002 fp = self.vfs('last-message.txt', 'wb')
1999 2003 try:
2000 2004 fp.write(text)
2001 2005 finally:
2002 2006 fp.close()
2003 2007 return self.pathto(fp.name[len(self.root) + 1:])
2004 2008
2005 2009 # used to avoid circular references so destructors work
2006 2010 def aftertrans(files):
2007 2011 renamefiles = [tuple(t) for t in files]
2008 2012 def a():
2009 2013 for vfs, src, dest in renamefiles:
2010 2014 try:
2011 2015 vfs.rename(src, dest)
2012 2016 except OSError: # journal file does not yet exist
2013 2017 pass
2014 2018 return a
2015 2019
2016 2020 def undoname(fn):
2017 2021 base, name = os.path.split(fn)
2018 2022 assert name.startswith('journal')
2019 2023 return os.path.join(base, name.replace('journal', 'undo', 1))
2020 2024
2021 2025 def instance(ui, path, create):
2022 2026 return localrepository(ui, util.urllocalpath(path), create)
2023 2027
2024 2028 def islocal(path):
2025 2029 return True
2026 2030
2027 2031 def newreporequirements(repo):
2028 2032 """Determine the set of requirements for a new local repository.
2029 2033
2030 2034 Extensions can wrap this function to specify custom requirements for
2031 2035 new repositories.
2032 2036 """
2033 2037 ui = repo.ui
2034 2038 requirements = set(['revlogv1'])
2035 2039 if ui.configbool('format', 'usestore', True):
2036 2040 requirements.add('store')
2037 2041 if ui.configbool('format', 'usefncache', True):
2038 2042 requirements.add('fncache')
2039 2043 if ui.configbool('format', 'dotencode', True):
2040 2044 requirements.add('dotencode')
2041 2045
2042 2046 compengine = ui.config('experimental', 'format.compression', 'zlib')
2043 2047 if compengine not in util.compengines:
2044 2048 raise error.Abort(_('compression engine %s defined by '
2045 2049 'experimental.format.compression not available') %
2046 2050 compengine,
2047 2051 hint=_('run "hg debuginstall" to list available '
2048 2052 'compression engines'))
2049 2053
2050 2054 # zlib is the historical default and doesn't need an explicit requirement.
2051 2055 if compengine != 'zlib':
2052 2056 requirements.add('exp-compression-%s' % compengine)
2053 2057
2054 2058 if scmutil.gdinitconfig(ui):
2055 2059 requirements.add('generaldelta')
2056 2060 if ui.configbool('experimental', 'treemanifest', False):
2057 2061 requirements.add('treemanifest')
2058 2062 if ui.configbool('experimental', 'manifestv2', False):
2059 2063 requirements.add('manifestv2')
2060 2064
2061 2065 return requirements
@@ -1,190 +1,189 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from __future__ import absolute_import
11 11
12 12 import errno
13 13 import os
14 14
15 15 from .i18n import _
16 16 from . import (
17 17 byterange,
18 18 changelog,
19 19 error,
20 20 localrepo,
21 21 manifest,
22 22 namespaces,
23 23 scmutil,
24 24 store,
25 25 url,
26 26 util,
27 27 )
28 28
29 29 urlerr = util.urlerr
30 30 urlreq = util.urlreq
31 31
32 32 class httprangereader(object):
33 33 def __init__(self, url, opener):
34 34 # we assume opener has HTTPRangeHandler
35 35 self.url = url
36 36 self.pos = 0
37 37 self.opener = opener
38 38 self.name = url
39 39
40 40 def __enter__(self):
41 41 return self
42 42
43 43 def __exit__(self, exc_type, exc_value, traceback):
44 44 self.close()
45 45
46 46 def seek(self, pos):
47 47 self.pos = pos
48 48 def read(self, bytes=None):
49 49 req = urlreq.request(self.url)
50 50 end = ''
51 51 if bytes:
52 52 end = self.pos + bytes - 1
53 53 if self.pos or end:
54 54 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
55 55
56 56 try:
57 57 f = self.opener.open(req)
58 58 data = f.read()
59 59 code = f.code
60 60 except urlerr.httperror as inst:
61 61 num = inst.code == 404 and errno.ENOENT or None
62 62 raise IOError(num, inst)
63 63 except urlerr.urlerror as inst:
64 64 raise IOError(None, inst.reason[1])
65 65
66 66 if code == 200:
67 67 # HTTPRangeHandler does nothing if remote does not support
68 68 # Range headers and returns the full entity. Let's slice it.
69 69 if bytes:
70 70 data = data[self.pos:self.pos + bytes]
71 71 else:
72 72 data = data[self.pos:]
73 73 elif bytes:
74 74 data = data[:bytes]
75 75 self.pos += len(data)
76 76 return data
77 77 def readlines(self):
78 78 return self.read().splitlines(True)
79 79 def __iter__(self):
80 80 return iter(self.readlines())
81 81 def close(self):
82 82 pass
83 83
84 84 def build_opener(ui, authinfo):
85 85 # urllib cannot handle URLs with embedded user or passwd
86 86 urlopener = url.opener(ui, authinfo)
87 87 urlopener.add_handler(byterange.HTTPRangeHandler())
88 88
89 89 class statichttpvfs(scmutil.abstractvfs):
90 90 def __init__(self, base):
91 91 self.base = base
92 92
93 93 def __call__(self, path, mode='r', *args, **kw):
94 94 if mode not in ('r', 'rb'):
95 95 raise IOError('Permission denied')
96 96 f = "/".join((self.base, urlreq.quote(path)))
97 97 return httprangereader(f, urlopener)
98 98
99 99 def join(self, path):
100 100 if path:
101 101 return os.path.join(self.base, path)
102 102 else:
103 103 return self.base
104 104
105 105 return statichttpvfs
106 106
107 107 class statichttppeer(localrepo.localpeer):
108 108 def local(self):
109 109 return None
110 110 def canpush(self):
111 111 return False
112 112
113 113 class statichttprepository(localrepo.localrepository):
114 114 supported = localrepo.localrepository._basesupported
115 115
116 116 def __init__(self, ui, path):
117 117 self._url = path
118 118 self.ui = ui
119 119
120 120 self.root = path
121 121 u = util.url(path.rstrip('/') + "/.hg")
122 122 self.path, authinfo = u.authinfo()
123 123
124 124 vfsclass = build_opener(ui, authinfo)
125 125 self.vfs = vfsclass(self.path)
126 self.opener = self.vfs
127 126 self._phasedefaults = []
128 127
129 128 self.names = namespaces.namespaces()
130 129
131 130 try:
132 131 requirements = scmutil.readrequires(self.vfs, self.supported)
133 132 except IOError as inst:
134 133 if inst.errno != errno.ENOENT:
135 134 raise
136 135 requirements = set()
137 136
138 137 # check if it is a non-empty old-style repository
139 138 try:
140 139 fp = self.vfs("00changelog.i")
141 140 fp.read(1)
142 141 fp.close()
143 142 except IOError as inst:
144 143 if inst.errno != errno.ENOENT:
145 144 raise
146 145 # we do not care about empty old-style repositories here
147 146 msg = _("'%s' does not appear to be an hg repository") % path
148 147 raise error.RepoError(msg)
149 148
150 149 # setup store
151 150 self.store = store.store(requirements, self.path, vfsclass)
152 151 self.spath = self.store.path
153 152 self.svfs = self.store.opener
154 153 self.sjoin = self.store.join
155 154 self._filecache = {}
156 155 self.requirements = requirements
157 156
158 157 self.manifestlog = manifest.manifestlog(self.svfs, self)
159 158 self.changelog = changelog.changelog(self.svfs)
160 159 self._tags = None
161 160 self.nodetagscache = None
162 161 self._branchcaches = {}
163 162 self._revbranchcache = None
164 163 self.encodepats = None
165 164 self.decodepats = None
166 165 self._transref = None
167 166
168 167 def _restrictcapabilities(self, caps):
169 168 caps = super(statichttprepository, self)._restrictcapabilities(caps)
170 169 return caps.difference(["pushkey"])
171 170
172 171 def url(self):
173 172 return self._url
174 173
175 174 def local(self):
176 175 return False
177 176
178 177 def peer(self):
179 178 return statichttppeer(self)
180 179
181 180 def lock(self, wait=True):
182 181 raise error.Abort(_('cannot lock static-http repository'))
183 182
184 183 def _writecaches(self):
185 184 pass # statichttprepository are read only
186 185
187 186 def instance(ui, path, create):
188 187 if create:
189 188 raise error.Abort(_('cannot create new static-http repository'))
190 189 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now