##// END OF EJS Templates
localrepo: use dirstate savebackup instead of handling dirstate file manually...
Mateusz Kwapich -
r29191:ad1ce3c7 default
parent child Browse files
Show More
@@ -1,1971 +1,1967
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import inspect
12 12 import os
13 13 import random
14 14 import time
15 15 import weakref
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 hex,
20 20 nullid,
21 21 short,
22 22 wdirrev,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 cmdutil,
31 31 context,
32 32 dirstate,
33 33 encoding,
34 34 error,
35 35 exchange,
36 36 extensions,
37 37 filelog,
38 38 hook,
39 39 lock as lockmod,
40 40 manifest,
41 41 match as matchmod,
42 42 merge as mergemod,
43 43 namespaces,
44 44 obsolete,
45 45 pathutil,
46 46 peer,
47 47 phases,
48 48 pushkey,
49 49 repoview,
50 50 revset,
51 51 scmutil,
52 52 store,
53 53 subrepo,
54 54 tags as tagsmod,
55 55 transaction,
56 56 util,
57 57 )
58 58
59 59 release = lockmod.release
60 60 urlerr = util.urlerr
61 61 urlreq = util.urlreq
62 62
63 63 class repofilecache(scmutil.filecache):
64 64 """All filecache usage on repo are done for logic that should be unfiltered
65 65 """
66 66
67 67 def __get__(self, repo, type=None):
68 68 return super(repofilecache, self).__get__(repo.unfiltered(), type)
69 69 def __set__(self, repo, value):
70 70 return super(repofilecache, self).__set__(repo.unfiltered(), value)
71 71 def __delete__(self, repo):
72 72 return super(repofilecache, self).__delete__(repo.unfiltered())
73 73
74 74 class storecache(repofilecache):
75 75 """filecache for files in the store"""
76 76 def join(self, obj, fname):
77 77 return obj.sjoin(fname)
78 78
79 79 class unfilteredpropertycache(util.propertycache):
80 80 """propertycache that apply to unfiltered repo only"""
81 81
82 82 def __get__(self, repo, type=None):
83 83 unfi = repo.unfiltered()
84 84 if unfi is repo:
85 85 return super(unfilteredpropertycache, self).__get__(unfi)
86 86 return getattr(unfi, self.name)
87 87
88 88 class filteredpropertycache(util.propertycache):
89 89 """propertycache that must take filtering in account"""
90 90
91 91 def cachevalue(self, obj, value):
92 92 object.__setattr__(obj, self.name, value)
93 93
94 94
95 95 def hasunfilteredcache(repo, name):
96 96 """check if a repo has an unfilteredpropertycache value for <name>"""
97 97 return name in vars(repo.unfiltered())
98 98
99 99 def unfilteredmethod(orig):
100 100 """decorate method that always need to be run on unfiltered version"""
101 101 def wrapper(repo, *args, **kwargs):
102 102 return orig(repo.unfiltered(), *args, **kwargs)
103 103 return wrapper
104 104
105 105 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
106 106 'unbundle'))
107 107 legacycaps = moderncaps.union(set(['changegroupsubset']))
108 108
109 109 class localpeer(peer.peerrepository):
110 110 '''peer for a local repo; reflects only the most recent API'''
111 111
112 112 def __init__(self, repo, caps=moderncaps):
113 113 peer.peerrepository.__init__(self)
114 114 self._repo = repo.filtered('served')
115 115 self.ui = repo.ui
116 116 self._caps = repo._restrictcapabilities(caps)
117 117 self.requirements = repo.requirements
118 118 self.supportedformats = repo.supportedformats
119 119
120 120 def close(self):
121 121 self._repo.close()
122 122
123 123 def _capabilities(self):
124 124 return self._caps
125 125
126 126 def local(self):
127 127 return self._repo
128 128
129 129 def canpush(self):
130 130 return True
131 131
132 132 def url(self):
133 133 return self._repo.url()
134 134
135 135 def lookup(self, key):
136 136 return self._repo.lookup(key)
137 137
138 138 def branchmap(self):
139 139 return self._repo.branchmap()
140 140
141 141 def heads(self):
142 142 return self._repo.heads()
143 143
144 144 def known(self, nodes):
145 145 return self._repo.known(nodes)
146 146
147 147 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
148 148 **kwargs):
149 149 cg = exchange.getbundle(self._repo, source, heads=heads,
150 150 common=common, bundlecaps=bundlecaps, **kwargs)
151 151 if bundlecaps is not None and 'HG20' in bundlecaps:
152 152 # When requesting a bundle2, getbundle returns a stream to make the
153 153 # wire level function happier. We need to build a proper object
154 154 # from it in local peer.
155 155 cg = bundle2.getunbundler(self.ui, cg)
156 156 return cg
157 157
158 158 # TODO We might want to move the next two calls into legacypeer and add
159 159 # unbundle instead.
160 160
161 161 def unbundle(self, cg, heads, url):
162 162 """apply a bundle on a repo
163 163
164 164 This function handles the repo locking itself."""
165 165 try:
166 166 try:
167 167 cg = exchange.readbundle(self.ui, cg, None)
168 168 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
169 169 if util.safehasattr(ret, 'getchunks'):
170 170 # This is a bundle20 object, turn it into an unbundler.
171 171 # This little dance should be dropped eventually when the
172 172 # API is finally improved.
173 173 stream = util.chunkbuffer(ret.getchunks())
174 174 ret = bundle2.getunbundler(self.ui, stream)
175 175 return ret
176 176 except Exception as exc:
177 177 # If the exception contains output salvaged from a bundle2
178 178 # reply, we need to make sure it is printed before continuing
179 179 # to fail. So we build a bundle2 with such output and consume
180 180 # it directly.
181 181 #
182 182 # This is not very elegant but allows a "simple" solution for
183 183 # issue4594
184 184 output = getattr(exc, '_bundle2salvagedoutput', ())
185 185 if output:
186 186 bundler = bundle2.bundle20(self._repo.ui)
187 187 for out in output:
188 188 bundler.addpart(out)
189 189 stream = util.chunkbuffer(bundler.getchunks())
190 190 b = bundle2.getunbundler(self.ui, stream)
191 191 bundle2.processbundle(self._repo, b)
192 192 raise
193 193 except error.PushRaced as exc:
194 194 raise error.ResponseError(_('push failed:'), str(exc))
195 195
196 196 def lock(self):
197 197 return self._repo.lock()
198 198
199 199 def addchangegroup(self, cg, source, url):
200 200 return cg.apply(self._repo, source, url)
201 201
202 202 def pushkey(self, namespace, key, old, new):
203 203 return self._repo.pushkey(namespace, key, old, new)
204 204
205 205 def listkeys(self, namespace):
206 206 return self._repo.listkeys(namespace)
207 207
208 208 def debugwireargs(self, one, two, three=None, four=None, five=None):
209 209 '''used to test argument passing over the wire'''
210 210 return "%s %s %s %s %s" % (one, two, three, four, five)
211 211
212 212 class locallegacypeer(localpeer):
213 213 '''peer extension which implements legacy methods too; used for tests with
214 214 restricted capabilities'''
215 215
216 216 def __init__(self, repo):
217 217 localpeer.__init__(self, repo, caps=legacycaps)
218 218
219 219 def branches(self, nodes):
220 220 return self._repo.branches(nodes)
221 221
222 222 def between(self, pairs):
223 223 return self._repo.between(pairs)
224 224
225 225 def changegroup(self, basenodes, source):
226 226 return changegroup.changegroup(self._repo, basenodes, source)
227 227
228 228 def changegroupsubset(self, bases, heads, source):
229 229 return changegroup.changegroupsubset(self._repo, bases, heads, source)
230 230
231 231 class localrepository(object):
232 232
233 233 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
234 234 'manifestv2'))
235 235 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
236 236 'dotencode'))
237 237 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
238 238 filtername = None
239 239
240 240 # a list of (ui, featureset) functions.
241 241 # only functions defined in module of enabled extensions are invoked
242 242 featuresetupfuncs = set()
243 243
244 244 def __init__(self, baseui, path=None, create=False):
245 245 self.requirements = set()
246 246 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
247 247 self.wopener = self.wvfs
248 248 self.root = self.wvfs.base
249 249 self.path = self.wvfs.join(".hg")
250 250 self.origroot = path
251 251 self.auditor = pathutil.pathauditor(self.root, self._checknested)
252 252 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
253 253 realfs=False)
254 254 self.vfs = scmutil.vfs(self.path)
255 255 self.opener = self.vfs
256 256 self.baseui = baseui
257 257 self.ui = baseui.copy()
258 258 self.ui.copy = baseui.copy # prevent copying repo configuration
259 259 # A list of callback to shape the phase if no data were found.
260 260 # Callback are in the form: func(repo, roots) --> processed root.
261 261 # This list it to be filled by extension during repo setup
262 262 self._phasedefaults = []
263 263 try:
264 264 self.ui.readconfig(self.join("hgrc"), self.root)
265 265 extensions.loadall(self.ui)
266 266 except IOError:
267 267 pass
268 268
269 269 if self.featuresetupfuncs:
270 270 self.supported = set(self._basesupported) # use private copy
271 271 extmods = set(m.__name__ for n, m
272 272 in extensions.extensions(self.ui))
273 273 for setupfunc in self.featuresetupfuncs:
274 274 if setupfunc.__module__ in extmods:
275 275 setupfunc(self.ui, self.supported)
276 276 else:
277 277 self.supported = self._basesupported
278 278
279 279 if not self.vfs.isdir():
280 280 if create:
281 281 self.requirements = newreporequirements(self)
282 282
283 283 if not self.wvfs.exists():
284 284 self.wvfs.makedirs()
285 285 self.vfs.makedir(notindexed=True)
286 286
287 287 if 'store' in self.requirements:
288 288 self.vfs.mkdir("store")
289 289
290 290 # create an invalid changelog
291 291 self.vfs.append(
292 292 "00changelog.i",
293 293 '\0\0\0\2' # represents revlogv2
294 294 ' dummy changelog to prevent using the old repo layout'
295 295 )
296 296 else:
297 297 raise error.RepoError(_("repository %s not found") % path)
298 298 elif create:
299 299 raise error.RepoError(_("repository %s already exists") % path)
300 300 else:
301 301 try:
302 302 self.requirements = scmutil.readrequires(
303 303 self.vfs, self.supported)
304 304 except IOError as inst:
305 305 if inst.errno != errno.ENOENT:
306 306 raise
307 307
308 308 self.sharedpath = self.path
309 309 try:
310 310 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
311 311 realpath=True)
312 312 s = vfs.base
313 313 if not vfs.exists():
314 314 raise error.RepoError(
315 315 _('.hg/sharedpath points to nonexistent directory %s') % s)
316 316 self.sharedpath = s
317 317 except IOError as inst:
318 318 if inst.errno != errno.ENOENT:
319 319 raise
320 320
321 321 self.store = store.store(
322 322 self.requirements, self.sharedpath, scmutil.vfs)
323 323 self.spath = self.store.path
324 324 self.svfs = self.store.vfs
325 325 self.sjoin = self.store.join
326 326 self.vfs.createmode = self.store.createmode
327 327 self._applyopenerreqs()
328 328 if create:
329 329 self._writerequirements()
330 330
331 331 self._dirstatevalidatewarned = False
332 332
333 333 self._branchcaches = {}
334 334 self._revbranchcache = None
335 335 self.filterpats = {}
336 336 self._datafilters = {}
337 337 self._transref = self._lockref = self._wlockref = None
338 338
339 339 # A cache for various files under .hg/ that tracks file changes,
340 340 # (used by the filecache decorator)
341 341 #
342 342 # Maps a property name to its util.filecacheentry
343 343 self._filecache = {}
344 344
345 345 # hold sets of revision to be filtered
346 346 # should be cleared when something might have changed the filter value:
347 347 # - new changesets,
348 348 # - phase change,
349 349 # - new obsolescence marker,
350 350 # - working directory parent change,
351 351 # - bookmark changes
352 352 self.filteredrevcache = {}
353 353
354 354 # generic mapping between names and nodes
355 355 self.names = namespaces.namespaces()
356 356
357 357 def close(self):
358 358 self._writecaches()
359 359
360 360 def _writecaches(self):
361 361 if self._revbranchcache:
362 362 self._revbranchcache.write()
363 363
364 364 def _restrictcapabilities(self, caps):
365 365 if self.ui.configbool('experimental', 'bundle2-advertise', True):
366 366 caps = set(caps)
367 367 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
368 368 caps.add('bundle2=' + urlreq.quote(capsblob))
369 369 return caps
370 370
371 371 def _applyopenerreqs(self):
372 372 self.svfs.options = dict((r, 1) for r in self.requirements
373 373 if r in self.openerreqs)
374 374 # experimental config: format.chunkcachesize
375 375 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
376 376 if chunkcachesize is not None:
377 377 self.svfs.options['chunkcachesize'] = chunkcachesize
378 378 # experimental config: format.maxchainlen
379 379 maxchainlen = self.ui.configint('format', 'maxchainlen')
380 380 if maxchainlen is not None:
381 381 self.svfs.options['maxchainlen'] = maxchainlen
382 382 # experimental config: format.manifestcachesize
383 383 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
384 384 if manifestcachesize is not None:
385 385 self.svfs.options['manifestcachesize'] = manifestcachesize
386 386 # experimental config: format.aggressivemergedeltas
387 387 aggressivemergedeltas = self.ui.configbool('format',
388 388 'aggressivemergedeltas', False)
389 389 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
390 390 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
391 391
392 392 def _writerequirements(self):
393 393 scmutil.writerequires(self.vfs, self.requirements)
394 394
395 395 def _checknested(self, path):
396 396 """Determine if path is a legal nested repository."""
397 397 if not path.startswith(self.root):
398 398 return False
399 399 subpath = path[len(self.root) + 1:]
400 400 normsubpath = util.pconvert(subpath)
401 401
402 402 # XXX: Checking against the current working copy is wrong in
403 403 # the sense that it can reject things like
404 404 #
405 405 # $ hg cat -r 10 sub/x.txt
406 406 #
407 407 # if sub/ is no longer a subrepository in the working copy
408 408 # parent revision.
409 409 #
410 410 # However, it can of course also allow things that would have
411 411 # been rejected before, such as the above cat command if sub/
412 412 # is a subrepository now, but was a normal directory before.
413 413 # The old path auditor would have rejected by mistake since it
414 414 # panics when it sees sub/.hg/.
415 415 #
416 416 # All in all, checking against the working copy seems sensible
417 417 # since we want to prevent access to nested repositories on
418 418 # the filesystem *now*.
419 419 ctx = self[None]
420 420 parts = util.splitpath(subpath)
421 421 while parts:
422 422 prefix = '/'.join(parts)
423 423 if prefix in ctx.substate:
424 424 if prefix == normsubpath:
425 425 return True
426 426 else:
427 427 sub = ctx.sub(prefix)
428 428 return sub.checknested(subpath[len(prefix) + 1:])
429 429 else:
430 430 parts.pop()
431 431 return False
432 432
433 433 def peer(self):
434 434 return localpeer(self) # not cached to avoid reference cycle
435 435
436 436 def unfiltered(self):
437 437 """Return unfiltered version of the repository
438 438
439 439 Intended to be overwritten by filtered repo."""
440 440 return self
441 441
442 442 def filtered(self, name):
443 443 """Return a filtered version of a repository"""
444 444 # build a new class with the mixin and the current class
445 445 # (possibly subclass of the repo)
446 446 class proxycls(repoview.repoview, self.unfiltered().__class__):
447 447 pass
448 448 return proxycls(self, name)
449 449
450 450 @repofilecache('bookmarks', 'bookmarks.current')
451 451 def _bookmarks(self):
452 452 return bookmarks.bmstore(self)
453 453
454 454 @property
455 455 def _activebookmark(self):
456 456 return self._bookmarks.active
457 457
458 458 def bookmarkheads(self, bookmark):
459 459 name = bookmark.split('@', 1)[0]
460 460 heads = []
461 461 for mark, n in self._bookmarks.iteritems():
462 462 if mark.split('@', 1)[0] == name:
463 463 heads.append(n)
464 464 return heads
465 465
466 466 # _phaserevs and _phasesets depend on changelog. what we need is to
467 467 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
468 468 # can't be easily expressed in filecache mechanism.
469 469 @storecache('phaseroots', '00changelog.i')
470 470 def _phasecache(self):
471 471 return phases.phasecache(self, self._phasedefaults)
472 472
473 473 @storecache('obsstore')
474 474 def obsstore(self):
475 475 # read default format for new obsstore.
476 476 # developer config: format.obsstore-version
477 477 defaultformat = self.ui.configint('format', 'obsstore-version', None)
478 478 # rely on obsstore class default when possible.
479 479 kwargs = {}
480 480 if defaultformat is not None:
481 481 kwargs['defaultformat'] = defaultformat
482 482 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
483 483 store = obsolete.obsstore(self.svfs, readonly=readonly,
484 484 **kwargs)
485 485 if store and readonly:
486 486 self.ui.warn(
487 487 _('obsolete feature not enabled but %i markers found!\n')
488 488 % len(list(store)))
489 489 return store
490 490
491 491 @storecache('00changelog.i')
492 492 def changelog(self):
493 493 c = changelog.changelog(self.svfs)
494 494 if 'HG_PENDING' in os.environ:
495 495 p = os.environ['HG_PENDING']
496 496 if p.startswith(self.root):
497 497 c.readpending('00changelog.i.a')
498 498 return c
499 499
500 500 @storecache('00manifest.i')
501 501 def manifest(self):
502 502 return manifest.manifest(self.svfs)
503 503
504 504 def dirlog(self, dir):
505 505 return self.manifest.dirlog(dir)
506 506
507 507 @repofilecache('dirstate')
508 508 def dirstate(self):
509 509 return dirstate.dirstate(self.vfs, self.ui, self.root,
510 510 self._dirstatevalidate)
511 511
512 512 def _dirstatevalidate(self, node):
513 513 try:
514 514 self.changelog.rev(node)
515 515 return node
516 516 except error.LookupError:
517 517 if not self._dirstatevalidatewarned:
518 518 self._dirstatevalidatewarned = True
519 519 self.ui.warn(_("warning: ignoring unknown"
520 520 " working parent %s!\n") % short(node))
521 521 return nullid
522 522
523 523 def __getitem__(self, changeid):
524 524 if changeid is None or changeid == wdirrev:
525 525 return context.workingctx(self)
526 526 if isinstance(changeid, slice):
527 527 return [context.changectx(self, i)
528 528 for i in xrange(*changeid.indices(len(self)))
529 529 if i not in self.changelog.filteredrevs]
530 530 return context.changectx(self, changeid)
531 531
532 532 def __contains__(self, changeid):
533 533 try:
534 534 self[changeid]
535 535 return True
536 536 except error.RepoLookupError:
537 537 return False
538 538
539 539 def __nonzero__(self):
540 540 return True
541 541
542 542 def __len__(self):
543 543 return len(self.changelog)
544 544
545 545 def __iter__(self):
546 546 return iter(self.changelog)
547 547
548 548 def revs(self, expr, *args):
549 549 '''Find revisions matching a revset.
550 550
551 551 The revset is specified as a string ``expr`` that may contain
552 552 %-formatting to escape certain types. See ``revset.formatspec``.
553 553
554 554 Return a revset.abstractsmartset, which is a list-like interface
555 555 that contains integer revisions.
556 556 '''
557 557 expr = revset.formatspec(expr, *args)
558 558 m = revset.match(None, expr)
559 559 return m(self)
560 560
561 561 def set(self, expr, *args):
562 562 '''Find revisions matching a revset and emit changectx instances.
563 563
564 564 This is a convenience wrapper around ``revs()`` that iterates the
565 565 result and is a generator of changectx instances.
566 566 '''
567 567 for r in self.revs(expr, *args):
568 568 yield self[r]
569 569
570 570 def url(self):
571 571 return 'file:' + self.root
572 572
573 573 def hook(self, name, throw=False, **args):
574 574 """Call a hook, passing this repo instance.
575 575
576 576 This a convenience method to aid invoking hooks. Extensions likely
577 577 won't call this unless they have registered a custom hook or are
578 578 replacing code that is expected to call a hook.
579 579 """
580 580 return hook.hook(self.ui, self, name, throw, **args)
581 581
582 582 @unfilteredmethod
583 583 def _tag(self, names, node, message, local, user, date, extra=None,
584 584 editor=False):
585 585 if isinstance(names, str):
586 586 names = (names,)
587 587
588 588 branches = self.branchmap()
589 589 for name in names:
590 590 self.hook('pretag', throw=True, node=hex(node), tag=name,
591 591 local=local)
592 592 if name in branches:
593 593 self.ui.warn(_("warning: tag %s conflicts with existing"
594 594 " branch name\n") % name)
595 595
596 596 def writetags(fp, names, munge, prevtags):
597 597 fp.seek(0, 2)
598 598 if prevtags and prevtags[-1] != '\n':
599 599 fp.write('\n')
600 600 for name in names:
601 601 if munge:
602 602 m = munge(name)
603 603 else:
604 604 m = name
605 605
606 606 if (self._tagscache.tagtypes and
607 607 name in self._tagscache.tagtypes):
608 608 old = self.tags().get(name, nullid)
609 609 fp.write('%s %s\n' % (hex(old), m))
610 610 fp.write('%s %s\n' % (hex(node), m))
611 611 fp.close()
612 612
613 613 prevtags = ''
614 614 if local:
615 615 try:
616 616 fp = self.vfs('localtags', 'r+')
617 617 except IOError:
618 618 fp = self.vfs('localtags', 'a')
619 619 else:
620 620 prevtags = fp.read()
621 621
622 622 # local tags are stored in the current charset
623 623 writetags(fp, names, None, prevtags)
624 624 for name in names:
625 625 self.hook('tag', node=hex(node), tag=name, local=local)
626 626 return
627 627
628 628 try:
629 629 fp = self.wfile('.hgtags', 'rb+')
630 630 except IOError as e:
631 631 if e.errno != errno.ENOENT:
632 632 raise
633 633 fp = self.wfile('.hgtags', 'ab')
634 634 else:
635 635 prevtags = fp.read()
636 636
637 637 # committed tags are stored in UTF-8
638 638 writetags(fp, names, encoding.fromlocal, prevtags)
639 639
640 640 fp.close()
641 641
642 642 self.invalidatecaches()
643 643
644 644 if '.hgtags' not in self.dirstate:
645 645 self[None].add(['.hgtags'])
646 646
647 647 m = matchmod.exact(self.root, '', ['.hgtags'])
648 648 tagnode = self.commit(message, user, date, extra=extra, match=m,
649 649 editor=editor)
650 650
651 651 for name in names:
652 652 self.hook('tag', node=hex(node), tag=name, local=local)
653 653
654 654 return tagnode
655 655
656 656 def tag(self, names, node, message, local, user, date, editor=False):
657 657 '''tag a revision with one or more symbolic names.
658 658
659 659 names is a list of strings or, when adding a single tag, names may be a
660 660 string.
661 661
662 662 if local is True, the tags are stored in a per-repository file.
663 663 otherwise, they are stored in the .hgtags file, and a new
664 664 changeset is committed with the change.
665 665
666 666 keyword arguments:
667 667
668 668 local: whether to store tags in non-version-controlled file
669 669 (default False)
670 670
671 671 message: commit message to use if committing
672 672
673 673 user: name of user to use if committing
674 674
675 675 date: date tuple to use if committing'''
676 676
677 677 if not local:
678 678 m = matchmod.exact(self.root, '', ['.hgtags'])
679 679 if any(self.status(match=m, unknown=True, ignored=True)):
680 680 raise error.Abort(_('working copy of .hgtags is changed'),
681 681 hint=_('please commit .hgtags manually'))
682 682
683 683 self.tags() # instantiate the cache
684 684 self._tag(names, node, message, local, user, date, editor=editor)
685 685
686 686 @filteredpropertycache
687 687 def _tagscache(self):
688 688 '''Returns a tagscache object that contains various tags related
689 689 caches.'''
690 690
691 691 # This simplifies its cache management by having one decorated
692 692 # function (this one) and the rest simply fetch things from it.
693 693 class tagscache(object):
694 694 def __init__(self):
695 695 # These two define the set of tags for this repository. tags
696 696 # maps tag name to node; tagtypes maps tag name to 'global' or
697 697 # 'local'. (Global tags are defined by .hgtags across all
698 698 # heads, and local tags are defined in .hg/localtags.)
699 699 # They constitute the in-memory cache of tags.
700 700 self.tags = self.tagtypes = None
701 701
702 702 self.nodetagscache = self.tagslist = None
703 703
704 704 cache = tagscache()
705 705 cache.tags, cache.tagtypes = self._findtags()
706 706
707 707 return cache
708 708
709 709 def tags(self):
710 710 '''return a mapping of tag to node'''
711 711 t = {}
712 712 if self.changelog.filteredrevs:
713 713 tags, tt = self._findtags()
714 714 else:
715 715 tags = self._tagscache.tags
716 716 for k, v in tags.iteritems():
717 717 try:
718 718 # ignore tags to unknown nodes
719 719 self.changelog.rev(v)
720 720 t[k] = v
721 721 except (error.LookupError, ValueError):
722 722 pass
723 723 return t
724 724
725 725 def _findtags(self):
726 726 '''Do the hard work of finding tags. Return a pair of dicts
727 727 (tags, tagtypes) where tags maps tag name to node, and tagtypes
728 728 maps tag name to a string like \'global\' or \'local\'.
729 729 Subclasses or extensions are free to add their own tags, but
730 730 should be aware that the returned dicts will be retained for the
731 731 duration of the localrepo object.'''
732 732
733 733 # XXX what tagtype should subclasses/extensions use? Currently
734 734 # mq and bookmarks add tags, but do not set the tagtype at all.
735 735 # Should each extension invent its own tag type? Should there
736 736 # be one tagtype for all such "virtual" tags? Or is the status
737 737 # quo fine?
738 738
739 739 alltags = {} # map tag name to (node, hist)
740 740 tagtypes = {}
741 741
742 742 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
743 743 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
744 744
745 745 # Build the return dicts. Have to re-encode tag names because
746 746 # the tags module always uses UTF-8 (in order not to lose info
747 747 # writing to the cache), but the rest of Mercurial wants them in
748 748 # local encoding.
749 749 tags = {}
750 750 for (name, (node, hist)) in alltags.iteritems():
751 751 if node != nullid:
752 752 tags[encoding.tolocal(name)] = node
753 753 tags['tip'] = self.changelog.tip()
754 754 tagtypes = dict([(encoding.tolocal(name), value)
755 755 for (name, value) in tagtypes.iteritems()])
756 756 return (tags, tagtypes)
757 757
758 758 def tagtype(self, tagname):
759 759 '''
760 760 return the type of the given tag. result can be:
761 761
762 762 'local' : a local tag
763 763 'global' : a global tag
764 764 None : tag does not exist
765 765 '''
766 766
767 767 return self._tagscache.tagtypes.get(tagname)
768 768
769 769 def tagslist(self):
770 770 '''return a list of tags ordered by revision'''
771 771 if not self._tagscache.tagslist:
772 772 l = []
773 773 for t, n in self.tags().iteritems():
774 774 l.append((self.changelog.rev(n), t, n))
775 775 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
776 776
777 777 return self._tagscache.tagslist
778 778
779 779 def nodetags(self, node):
780 780 '''return the tags associated with a node'''
781 781 if not self._tagscache.nodetagscache:
782 782 nodetagscache = {}
783 783 for t, n in self._tagscache.tags.iteritems():
784 784 nodetagscache.setdefault(n, []).append(t)
785 785 for tags in nodetagscache.itervalues():
786 786 tags.sort()
787 787 self._tagscache.nodetagscache = nodetagscache
788 788 return self._tagscache.nodetagscache.get(node, [])
789 789
790 790 def nodebookmarks(self, node):
791 791 """return the list of bookmarks pointing to the specified node"""
792 792 marks = []
793 793 for bookmark, n in self._bookmarks.iteritems():
794 794 if n == node:
795 795 marks.append(bookmark)
796 796 return sorted(marks)
797 797
798 798 def branchmap(self):
799 799 '''returns a dictionary {branch: [branchheads]} with branchheads
800 800 ordered by increasing revision number'''
801 801 branchmap.updatecache(self)
802 802 return self._branchcaches[self.filtername]
803 803
804 804 @unfilteredmethod
805 805 def revbranchcache(self):
806 806 if not self._revbranchcache:
807 807 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
808 808 return self._revbranchcache
809 809
810 810 def branchtip(self, branch, ignoremissing=False):
811 811 '''return the tip node for a given branch
812 812
813 813 If ignoremissing is True, then this method will not raise an error.
814 814 This is helpful for callers that only expect None for a missing branch
815 815 (e.g. namespace).
816 816
817 817 '''
818 818 try:
819 819 return self.branchmap().branchtip(branch)
820 820 except KeyError:
821 821 if not ignoremissing:
822 822 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
823 823 else:
824 824 pass
825 825
826 826 def lookup(self, key):
827 827 return self[key].node()
828 828
829 829 def lookupbranch(self, key, remote=None):
830 830 repo = remote or self
831 831 if key in repo.branchmap():
832 832 return key
833 833
834 834 repo = (remote and remote.local()) and remote or self
835 835 return repo[key].branch()
836 836
837 837 def known(self, nodes):
838 838 cl = self.changelog
839 839 nm = cl.nodemap
840 840 filtered = cl.filteredrevs
841 841 result = []
842 842 for n in nodes:
843 843 r = nm.get(n)
844 844 resp = not (r is None or r in filtered)
845 845 result.append(resp)
846 846 return result
847 847
848 848 def local(self):
849 849 return self
850 850
851 851 def publishing(self):
852 852 # it's safe (and desirable) to trust the publish flag unconditionally
853 853 # so that we don't finalize changes shared between users via ssh or nfs
854 854 return self.ui.configbool('phases', 'publish', True, untrusted=True)
855 855
856 856 def cancopy(self):
857 857 # so statichttprepo's override of local() works
858 858 if not self.local():
859 859 return False
860 860 if not self.publishing():
861 861 return True
862 862 # if publishing we can't copy if there is filtered content
863 863 return not self.filtered('visible').changelog.filteredrevs
864 864
865 865 def shared(self):
866 866 '''the type of shared repository (None if not shared)'''
867 867 if self.sharedpath != self.path:
868 868 return 'store'
869 869 return None
870 870
871 871 def join(self, f, *insidef):
872 872 return self.vfs.join(os.path.join(f, *insidef))
873 873
874 874 def wjoin(self, f, *insidef):
875 875 return self.vfs.reljoin(self.root, f, *insidef)
876 876
877 877 def file(self, f):
878 878 if f[0] == '/':
879 879 f = f[1:]
880 880 return filelog.filelog(self.svfs, f)
881 881
882 882 def changectx(self, changeid):
883 883 return self[changeid]
884 884
885 885 def setparents(self, p1, p2=nullid):
886 886 self.dirstate.beginparentchange()
887 887 copies = self.dirstate.setparents(p1, p2)
888 888 pctx = self[p1]
889 889 if copies:
890 890 # Adjust copy records, the dirstate cannot do it, it
891 891 # requires access to parents manifests. Preserve them
892 892 # only for entries added to first parent.
893 893 for f in copies:
894 894 if f not in pctx and copies[f] in pctx:
895 895 self.dirstate.copy(copies[f], f)
896 896 if p2 == nullid:
897 897 for f, s in sorted(self.dirstate.copies().items()):
898 898 if f not in pctx and s not in pctx:
899 899 self.dirstate.copy(None, f)
900 900 self.dirstate.endparentchange()
901 901
902 902 def filectx(self, path, changeid=None, fileid=None):
903 903 """changeid can be a changeset revision, node, or tag.
904 904 fileid can be a file revision or node."""
905 905 return context.filectx(self, path, changeid, fileid)
906 906
907 907 def getcwd(self):
908 908 return self.dirstate.getcwd()
909 909
910 910 def pathto(self, f, cwd=None):
911 911 return self.dirstate.pathto(f, cwd)
912 912
913 913 def wfile(self, f, mode='r'):
914 914 return self.wvfs(f, mode)
915 915
916 916 def _link(self, f):
917 917 return self.wvfs.islink(f)
918 918
919 919 def _loadfilter(self, filter):
920 920 if filter not in self.filterpats:
921 921 l = []
922 922 for pat, cmd in self.ui.configitems(filter):
923 923 if cmd == '!':
924 924 continue
925 925 mf = matchmod.match(self.root, '', [pat])
926 926 fn = None
927 927 params = cmd
928 928 for name, filterfn in self._datafilters.iteritems():
929 929 if cmd.startswith(name):
930 930 fn = filterfn
931 931 params = cmd[len(name):].lstrip()
932 932 break
933 933 if not fn:
934 934 fn = lambda s, c, **kwargs: util.filter(s, c)
935 935 # Wrap old filters not supporting keyword arguments
936 936 if not inspect.getargspec(fn)[2]:
937 937 oldfn = fn
938 938 fn = lambda s, c, **kwargs: oldfn(s, c)
939 939 l.append((mf, fn, params))
940 940 self.filterpats[filter] = l
941 941 return self.filterpats[filter]
942 942
943 943 def _filter(self, filterpats, filename, data):
944 944 for mf, fn, cmd in filterpats:
945 945 if mf(filename):
946 946 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
947 947 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
948 948 break
949 949
950 950 return data
951 951
952 952 @unfilteredpropertycache
953 953 def _encodefilterpats(self):
954 954 return self._loadfilter('encode')
955 955
956 956 @unfilteredpropertycache
957 957 def _decodefilterpats(self):
958 958 return self._loadfilter('decode')
959 959
960 960 def adddatafilter(self, name, filter):
961 961 self._datafilters[name] = filter
962 962
963 963 def wread(self, filename):
964 964 if self._link(filename):
965 965 data = self.wvfs.readlink(filename)
966 966 else:
967 967 data = self.wvfs.read(filename)
968 968 return self._filter(self._encodefilterpats, filename, data)
969 969
970 970 def wwrite(self, filename, data, flags, backgroundclose=False):
971 971 """write ``data`` into ``filename`` in the working directory
972 972
973 973 This returns length of written (maybe decoded) data.
974 974 """
975 975 data = self._filter(self._decodefilterpats, filename, data)
976 976 if 'l' in flags:
977 977 self.wvfs.symlink(data, filename)
978 978 else:
979 979 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
980 980 if 'x' in flags:
981 981 self.wvfs.setflags(filename, False, True)
982 982 return len(data)
983 983
984 984 def wwritedata(self, filename, data):
985 985 return self._filter(self._decodefilterpats, filename, data)
986 986
987 987 def currenttransaction(self):
988 988 """return the current transaction or None if non exists"""
989 989 if self._transref:
990 990 tr = self._transref()
991 991 else:
992 992 tr = None
993 993
994 994 if tr and tr.running():
995 995 return tr
996 996 return None
997 997
998 998 def transaction(self, desc, report=None):
999 999 if (self.ui.configbool('devel', 'all-warnings')
1000 1000 or self.ui.configbool('devel', 'check-locks')):
1001 1001 l = self._lockref and self._lockref()
1002 1002 if l is None or not l.held:
1003 1003 raise RuntimeError('programming error: transaction requires '
1004 1004 'locking')
1005 1005 tr = self.currenttransaction()
1006 1006 if tr is not None:
1007 1007 return tr.nest()
1008 1008
1009 1009 # abort here if the journal already exists
1010 1010 if self.svfs.exists("journal"):
1011 1011 raise error.RepoError(
1012 1012 _("abandoned transaction found"),
1013 1013 hint=_("run 'hg recover' to clean up transaction"))
1014 1014
1015 # make journal.dirstate contain in-memory changes at this point
1016 self.dirstate.write(None)
1017
1018 1015 idbase = "%.40f#%f" % (random.random(), time.time())
1019 1016 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1020 1017 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1021 1018
1022 1019 self._writejournal(desc)
1023 1020 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1024 1021 if report:
1025 1022 rp = report
1026 1023 else:
1027 1024 rp = self.ui.warn
1028 1025 vfsmap = {'plain': self.vfs} # root of .hg/
1029 1026 # we must avoid cyclic reference between repo and transaction.
1030 1027 reporef = weakref.ref(self)
1031 1028 def validate(tr):
1032 1029 """will run pre-closing hooks"""
1033 1030 reporef().hook('pretxnclose', throw=True,
1034 1031 txnname=desc, **tr.hookargs)
1035 1032 def releasefn(tr, success):
1036 1033 repo = reporef()
1037 1034 if success:
1038 1035 # this should be explicitly invoked here, because
1039 1036 # in-memory changes aren't written out at closing
1040 1037 # transaction, if tr.addfilegenerator (via
1041 1038 # dirstate.write or so) isn't invoked while
1042 1039 # transaction running
1043 1040 repo.dirstate.write(None)
1044 1041 else:
1045 1042 # discard all changes (including ones already written
1046 1043 # out) in this transaction
1047 1044 repo.dirstate.restorebackup(None, prefix='journal.')
1048 1045
1049 1046 repo.invalidate(clearfilecache=True)
1050 1047
1051 1048 tr = transaction.transaction(rp, self.svfs, vfsmap,
1052 1049 "journal",
1053 1050 "undo",
1054 1051 aftertrans(renames),
1055 1052 self.store.createmode,
1056 1053 validator=validate,
1057 1054 releasefn=releasefn)
1058 1055
1059 1056 tr.hookargs['txnid'] = txnid
1060 1057 # note: writing the fncache only during finalize mean that the file is
1061 1058 # outdated when running hooks. As fncache is used for streaming clone,
1062 1059 # this is not expected to break anything that happen during the hooks.
1063 1060 tr.addfinalize('flush-fncache', self.store.write)
1064 1061 def txnclosehook(tr2):
1065 1062 """To be run if transaction is successful, will schedule a hook run
1066 1063 """
1067 1064 # Don't reference tr2 in hook() so we don't hold a reference.
1068 1065 # This reduces memory consumption when there are multiple
1069 1066 # transactions per lock. This can likely go away if issue5045
1070 1067 # fixes the function accumulation.
1071 1068 hookargs = tr2.hookargs
1072 1069
1073 1070 def hook():
1074 1071 reporef().hook('txnclose', throw=False, txnname=desc,
1075 1072 **hookargs)
1076 1073 reporef()._afterlock(hook)
1077 1074 tr.addfinalize('txnclose-hook', txnclosehook)
1078 1075 def txnaborthook(tr2):
1079 1076 """To be run if transaction is aborted
1080 1077 """
1081 1078 reporef().hook('txnabort', throw=False, txnname=desc,
1082 1079 **tr2.hookargs)
1083 1080 tr.addabort('txnabort-hook', txnaborthook)
1084 1081 # avoid eager cache invalidation. in-memory data should be identical
1085 1082 # to stored data if transaction has no error.
1086 1083 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1087 1084 self._transref = weakref.ref(tr)
1088 1085 return tr
1089 1086
1090 1087 def _journalfiles(self):
1091 1088 return ((self.svfs, 'journal'),
1092 1089 (self.vfs, 'journal.dirstate'),
1093 1090 (self.vfs, 'journal.branch'),
1094 1091 (self.vfs, 'journal.desc'),
1095 1092 (self.vfs, 'journal.bookmarks'),
1096 1093 (self.svfs, 'journal.phaseroots'))
1097 1094
1098 1095 def undofiles(self):
1099 1096 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1100 1097
1101 1098 def _writejournal(self, desc):
1102 self.vfs.write("journal.dirstate",
1103 self.vfs.tryread("dirstate"))
1099 self.dirstate.savebackup(None, prefix='journal.')
1104 1100 self.vfs.write("journal.branch",
1105 1101 encoding.fromlocal(self.dirstate.branch()))
1106 1102 self.vfs.write("journal.desc",
1107 1103 "%d\n%s\n" % (len(self), desc))
1108 1104 self.vfs.write("journal.bookmarks",
1109 1105 self.vfs.tryread("bookmarks"))
1110 1106 self.svfs.write("journal.phaseroots",
1111 1107 self.svfs.tryread("phaseroots"))
1112 1108
1113 1109 def recover(self):
1114 1110 with self.lock():
1115 1111 if self.svfs.exists("journal"):
1116 1112 self.ui.status(_("rolling back interrupted transaction\n"))
1117 1113 vfsmap = {'': self.svfs,
1118 1114 'plain': self.vfs,}
1119 1115 transaction.rollback(self.svfs, vfsmap, "journal",
1120 1116 self.ui.warn)
1121 1117 self.invalidate()
1122 1118 return True
1123 1119 else:
1124 1120 self.ui.warn(_("no interrupted transaction available\n"))
1125 1121 return False
1126 1122
1127 1123 def rollback(self, dryrun=False, force=False):
1128 1124 wlock = lock = dsguard = None
1129 1125 try:
1130 1126 wlock = self.wlock()
1131 1127 lock = self.lock()
1132 1128 if self.svfs.exists("undo"):
1133 1129 dsguard = cmdutil.dirstateguard(self, 'rollback')
1134 1130
1135 1131 return self._rollback(dryrun, force, dsguard)
1136 1132 else:
1137 1133 self.ui.warn(_("no rollback information available\n"))
1138 1134 return 1
1139 1135 finally:
1140 1136 release(dsguard, lock, wlock)
1141 1137
1142 1138 @unfilteredmethod # Until we get smarter cache management
1143 1139 def _rollback(self, dryrun, force, dsguard):
1144 1140 ui = self.ui
1145 1141 try:
1146 1142 args = self.vfs.read('undo.desc').splitlines()
1147 1143 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1148 1144 if len(args) >= 3:
1149 1145 detail = args[2]
1150 1146 oldtip = oldlen - 1
1151 1147
1152 1148 if detail and ui.verbose:
1153 1149 msg = (_('repository tip rolled back to revision %s'
1154 1150 ' (undo %s: %s)\n')
1155 1151 % (oldtip, desc, detail))
1156 1152 else:
1157 1153 msg = (_('repository tip rolled back to revision %s'
1158 1154 ' (undo %s)\n')
1159 1155 % (oldtip, desc))
1160 1156 except IOError:
1161 1157 msg = _('rolling back unknown transaction\n')
1162 1158 desc = None
1163 1159
1164 1160 if not force and self['.'] != self['tip'] and desc == 'commit':
1165 1161 raise error.Abort(
1166 1162 _('rollback of last commit while not checked out '
1167 1163 'may lose data'), hint=_('use -f to force'))
1168 1164
1169 1165 ui.status(msg)
1170 1166 if dryrun:
1171 1167 return 0
1172 1168
1173 1169 parents = self.dirstate.parents()
1174 1170 self.destroying()
1175 1171 vfsmap = {'plain': self.vfs, '': self.svfs}
1176 1172 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1177 1173 if self.vfs.exists('undo.bookmarks'):
1178 1174 self.vfs.rename('undo.bookmarks', 'bookmarks')
1179 1175 if self.svfs.exists('undo.phaseroots'):
1180 1176 self.svfs.rename('undo.phaseroots', 'phaseroots')
1181 1177 self.invalidate()
1182 1178
1183 1179 parentgone = (parents[0] not in self.changelog.nodemap or
1184 1180 parents[1] not in self.changelog.nodemap)
1185 1181 if parentgone:
1186 1182 # prevent dirstateguard from overwriting already restored one
1187 1183 dsguard.close()
1188 1184
1189 1185 self.dirstate.restorebackup(None, prefix='undo.')
1190 1186 try:
1191 1187 branch = self.vfs.read('undo.branch')
1192 1188 self.dirstate.setbranch(encoding.tolocal(branch))
1193 1189 except IOError:
1194 1190 ui.warn(_('named branch could not be reset: '
1195 1191 'current branch is still \'%s\'\n')
1196 1192 % self.dirstate.branch())
1197 1193
1198 1194 parents = tuple([p.rev() for p in self[None].parents()])
1199 1195 if len(parents) > 1:
1200 1196 ui.status(_('working directory now based on '
1201 1197 'revisions %d and %d\n') % parents)
1202 1198 else:
1203 1199 ui.status(_('working directory now based on '
1204 1200 'revision %d\n') % parents)
1205 1201 mergemod.mergestate.clean(self, self['.'].node())
1206 1202
1207 1203 # TODO: if we know which new heads may result from this rollback, pass
1208 1204 # them to destroy(), which will prevent the branchhead cache from being
1209 1205 # invalidated.
1210 1206 self.destroyed()
1211 1207 return 0
1212 1208
1213 1209 def invalidatecaches(self):
1214 1210
1215 1211 if '_tagscache' in vars(self):
1216 1212 # can't use delattr on proxy
1217 1213 del self.__dict__['_tagscache']
1218 1214
1219 1215 self.unfiltered()._branchcaches.clear()
1220 1216 self.invalidatevolatilesets()
1221 1217
1222 1218 def invalidatevolatilesets(self):
1223 1219 self.filteredrevcache.clear()
1224 1220 obsolete.clearobscaches(self)
1225 1221
1226 1222 def invalidatedirstate(self):
1227 1223 '''Invalidates the dirstate, causing the next call to dirstate
1228 1224 to check if it was modified since the last time it was read,
1229 1225 rereading it if it has.
1230 1226
1231 1227 This is different to dirstate.invalidate() that it doesn't always
1232 1228 rereads the dirstate. Use dirstate.invalidate() if you want to
1233 1229 explicitly read the dirstate again (i.e. restoring it to a previous
1234 1230 known good state).'''
1235 1231 if hasunfilteredcache(self, 'dirstate'):
1236 1232 for k in self.dirstate._filecache:
1237 1233 try:
1238 1234 delattr(self.dirstate, k)
1239 1235 except AttributeError:
1240 1236 pass
1241 1237 delattr(self.unfiltered(), 'dirstate')
1242 1238
1243 1239 def invalidate(self, clearfilecache=False):
1244 1240 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1245 1241 for k in self._filecache.keys():
1246 1242 # dirstate is invalidated separately in invalidatedirstate()
1247 1243 if k == 'dirstate':
1248 1244 continue
1249 1245
1250 1246 if clearfilecache:
1251 1247 del self._filecache[k]
1252 1248 try:
1253 1249 delattr(unfiltered, k)
1254 1250 except AttributeError:
1255 1251 pass
1256 1252 self.invalidatecaches()
1257 1253 self.store.invalidatecaches()
1258 1254
1259 1255 def invalidateall(self):
1260 1256 '''Fully invalidates both store and non-store parts, causing the
1261 1257 subsequent operation to reread any outside changes.'''
1262 1258 # extension should hook this to invalidate its caches
1263 1259 self.invalidate()
1264 1260 self.invalidatedirstate()
1265 1261
1266 1262 def _refreshfilecachestats(self, tr):
1267 1263 """Reload stats of cached files so that they are flagged as valid"""
1268 1264 for k, ce in self._filecache.items():
1269 1265 if k == 'dirstate' or k not in self.__dict__:
1270 1266 continue
1271 1267 ce.refresh()
1272 1268
1273 1269 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1274 1270 inheritchecker=None, parentenvvar=None):
1275 1271 parentlock = None
1276 1272 # the contents of parentenvvar are used by the underlying lock to
1277 1273 # determine whether it can be inherited
1278 1274 if parentenvvar is not None:
1279 1275 parentlock = os.environ.get(parentenvvar)
1280 1276 try:
1281 1277 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1282 1278 acquirefn=acquirefn, desc=desc,
1283 1279 inheritchecker=inheritchecker,
1284 1280 parentlock=parentlock)
1285 1281 except error.LockHeld as inst:
1286 1282 if not wait:
1287 1283 raise
1288 1284 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1289 1285 (desc, inst.locker))
1290 1286 # default to 600 seconds timeout
1291 1287 l = lockmod.lock(vfs, lockname,
1292 1288 int(self.ui.config("ui", "timeout", "600")),
1293 1289 releasefn=releasefn, acquirefn=acquirefn,
1294 1290 desc=desc)
1295 1291 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1296 1292 return l
1297 1293
1298 1294 def _afterlock(self, callback):
1299 1295 """add a callback to be run when the repository is fully unlocked
1300 1296
1301 1297 The callback will be executed when the outermost lock is released
1302 1298 (with wlock being higher level than 'lock')."""
1303 1299 for ref in (self._wlockref, self._lockref):
1304 1300 l = ref and ref()
1305 1301 if l and l.held:
1306 1302 l.postrelease.append(callback)
1307 1303 break
1308 1304 else: # no lock have been found.
1309 1305 callback()
1310 1306
1311 1307 def lock(self, wait=True):
1312 1308 '''Lock the repository store (.hg/store) and return a weak reference
1313 1309 to the lock. Use this before modifying the store (e.g. committing or
1314 1310 stripping). If you are opening a transaction, get a lock as well.)
1315 1311
1316 1312 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1317 1313 'wlock' first to avoid a dead-lock hazard.'''
1318 1314 l = self._lockref and self._lockref()
1319 1315 if l is not None and l.held:
1320 1316 l.lock()
1321 1317 return l
1322 1318
1323 1319 l = self._lock(self.svfs, "lock", wait, None,
1324 1320 self.invalidate, _('repository %s') % self.origroot)
1325 1321 self._lockref = weakref.ref(l)
1326 1322 return l
1327 1323
1328 1324 def _wlockchecktransaction(self):
1329 1325 if self.currenttransaction() is not None:
1330 1326 raise error.LockInheritanceContractViolation(
1331 1327 'wlock cannot be inherited in the middle of a transaction')
1332 1328
1333 1329 def wlock(self, wait=True):
1334 1330 '''Lock the non-store parts of the repository (everything under
1335 1331 .hg except .hg/store) and return a weak reference to the lock.
1336 1332
1337 1333 Use this before modifying files in .hg.
1338 1334
1339 1335 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1340 1336 'wlock' first to avoid a dead-lock hazard.'''
1341 1337 l = self._wlockref and self._wlockref()
1342 1338 if l is not None and l.held:
1343 1339 l.lock()
1344 1340 return l
1345 1341
1346 1342 # We do not need to check for non-waiting lock acquisition. Such
1347 1343 # acquisition would not cause dead-lock as they would just fail.
1348 1344 if wait and (self.ui.configbool('devel', 'all-warnings')
1349 1345 or self.ui.configbool('devel', 'check-locks')):
1350 1346 l = self._lockref and self._lockref()
1351 1347 if l is not None and l.held:
1352 1348 self.ui.develwarn('"wlock" acquired after "lock"')
1353 1349
1354 1350 def unlock():
1355 1351 if self.dirstate.pendingparentchange():
1356 1352 self.dirstate.invalidate()
1357 1353 else:
1358 1354 self.dirstate.write(None)
1359 1355
1360 1356 self._filecache['dirstate'].refresh()
1361 1357
1362 1358 l = self._lock(self.vfs, "wlock", wait, unlock,
1363 1359 self.invalidatedirstate, _('working directory of %s') %
1364 1360 self.origroot,
1365 1361 inheritchecker=self._wlockchecktransaction,
1366 1362 parentenvvar='HG_WLOCK_LOCKER')
1367 1363 self._wlockref = weakref.ref(l)
1368 1364 return l
1369 1365
1370 1366 def _currentlock(self, lockref):
1371 1367 """Returns the lock if it's held, or None if it's not."""
1372 1368 if lockref is None:
1373 1369 return None
1374 1370 l = lockref()
1375 1371 if l is None or not l.held:
1376 1372 return None
1377 1373 return l
1378 1374
1379 1375 def currentwlock(self):
1380 1376 """Returns the wlock if it's held, or None if it's not."""
1381 1377 return self._currentlock(self._wlockref)
1382 1378
1383 1379 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1384 1380 """
1385 1381 commit an individual file as part of a larger transaction
1386 1382 """
1387 1383
1388 1384 fname = fctx.path()
1389 1385 fparent1 = manifest1.get(fname, nullid)
1390 1386 fparent2 = manifest2.get(fname, nullid)
1391 1387 if isinstance(fctx, context.filectx):
1392 1388 node = fctx.filenode()
1393 1389 if node in [fparent1, fparent2]:
1394 1390 self.ui.debug('reusing %s filelog entry\n' % fname)
1395 1391 return node
1396 1392
1397 1393 flog = self.file(fname)
1398 1394 meta = {}
1399 1395 copy = fctx.renamed()
1400 1396 if copy and copy[0] != fname:
1401 1397 # Mark the new revision of this file as a copy of another
1402 1398 # file. This copy data will effectively act as a parent
1403 1399 # of this new revision. If this is a merge, the first
1404 1400 # parent will be the nullid (meaning "look up the copy data")
1405 1401 # and the second one will be the other parent. For example:
1406 1402 #
1407 1403 # 0 --- 1 --- 3 rev1 changes file foo
1408 1404 # \ / rev2 renames foo to bar and changes it
1409 1405 # \- 2 -/ rev3 should have bar with all changes and
1410 1406 # should record that bar descends from
1411 1407 # bar in rev2 and foo in rev1
1412 1408 #
1413 1409 # this allows this merge to succeed:
1414 1410 #
1415 1411 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1416 1412 # \ / merging rev3 and rev4 should use bar@rev2
1417 1413 # \- 2 --- 4 as the merge base
1418 1414 #
1419 1415
1420 1416 cfname = copy[0]
1421 1417 crev = manifest1.get(cfname)
1422 1418 newfparent = fparent2
1423 1419
1424 1420 if manifest2: # branch merge
1425 1421 if fparent2 == nullid or crev is None: # copied on remote side
1426 1422 if cfname in manifest2:
1427 1423 crev = manifest2[cfname]
1428 1424 newfparent = fparent1
1429 1425
1430 1426 # Here, we used to search backwards through history to try to find
1431 1427 # where the file copy came from if the source of a copy was not in
1432 1428 # the parent directory. However, this doesn't actually make sense to
1433 1429 # do (what does a copy from something not in your working copy even
1434 1430 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1435 1431 # the user that copy information was dropped, so if they didn't
1436 1432 # expect this outcome it can be fixed, but this is the correct
1437 1433 # behavior in this circumstance.
1438 1434
1439 1435 if crev:
1440 1436 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1441 1437 meta["copy"] = cfname
1442 1438 meta["copyrev"] = hex(crev)
1443 1439 fparent1, fparent2 = nullid, newfparent
1444 1440 else:
1445 1441 self.ui.warn(_("warning: can't find ancestor for '%s' "
1446 1442 "copied from '%s'!\n") % (fname, cfname))
1447 1443
1448 1444 elif fparent1 == nullid:
1449 1445 fparent1, fparent2 = fparent2, nullid
1450 1446 elif fparent2 != nullid:
1451 1447 # is one parent an ancestor of the other?
1452 1448 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1453 1449 if fparent1 in fparentancestors:
1454 1450 fparent1, fparent2 = fparent2, nullid
1455 1451 elif fparent2 in fparentancestors:
1456 1452 fparent2 = nullid
1457 1453
1458 1454 # is the file changed?
1459 1455 text = fctx.data()
1460 1456 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1461 1457 changelist.append(fname)
1462 1458 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1463 1459 # are just the flags changed during merge?
1464 1460 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1465 1461 changelist.append(fname)
1466 1462
1467 1463 return fparent1
1468 1464
1469 1465 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1470 1466 """check for commit arguments that aren't commitable"""
1471 1467 if match.isexact() or match.prefix():
1472 1468 matched = set(status.modified + status.added + status.removed)
1473 1469
1474 1470 for f in match.files():
1475 1471 f = self.dirstate.normalize(f)
1476 1472 if f == '.' or f in matched or f in wctx.substate:
1477 1473 continue
1478 1474 if f in status.deleted:
1479 1475 fail(f, _('file not found!'))
1480 1476 if f in vdirs: # visited directory
1481 1477 d = f + '/'
1482 1478 for mf in matched:
1483 1479 if mf.startswith(d):
1484 1480 break
1485 1481 else:
1486 1482 fail(f, _("no match under directory!"))
1487 1483 elif f not in self.dirstate:
1488 1484 fail(f, _("file not tracked!"))
1489 1485
1490 1486 @unfilteredmethod
1491 1487 def commit(self, text="", user=None, date=None, match=None, force=False,
1492 1488 editor=False, extra=None):
1493 1489 """Add a new revision to current repository.
1494 1490
1495 1491 Revision information is gathered from the working directory,
1496 1492 match can be used to filter the committed files. If editor is
1497 1493 supplied, it is called to get a commit message.
1498 1494 """
1499 1495 if extra is None:
1500 1496 extra = {}
1501 1497
1502 1498 def fail(f, msg):
1503 1499 raise error.Abort('%s: %s' % (f, msg))
1504 1500
1505 1501 if not match:
1506 1502 match = matchmod.always(self.root, '')
1507 1503
1508 1504 if not force:
1509 1505 vdirs = []
1510 1506 match.explicitdir = vdirs.append
1511 1507 match.bad = fail
1512 1508
1513 1509 wlock = lock = tr = None
1514 1510 try:
1515 1511 wlock = self.wlock()
1516 1512 lock = self.lock() # for recent changelog (see issue4368)
1517 1513
1518 1514 wctx = self[None]
1519 1515 merge = len(wctx.parents()) > 1
1520 1516
1521 1517 if not force and merge and match.ispartial():
1522 1518 raise error.Abort(_('cannot partially commit a merge '
1523 1519 '(do not specify files or patterns)'))
1524 1520
1525 1521 status = self.status(match=match, clean=force)
1526 1522 if force:
1527 1523 status.modified.extend(status.clean) # mq may commit clean files
1528 1524
1529 1525 # check subrepos
1530 1526 subs = []
1531 1527 commitsubs = set()
1532 1528 newstate = wctx.substate.copy()
1533 1529 # only manage subrepos and .hgsubstate if .hgsub is present
1534 1530 if '.hgsub' in wctx:
1535 1531 # we'll decide whether to track this ourselves, thanks
1536 1532 for c in status.modified, status.added, status.removed:
1537 1533 if '.hgsubstate' in c:
1538 1534 c.remove('.hgsubstate')
1539 1535
1540 1536 # compare current state to last committed state
1541 1537 # build new substate based on last committed state
1542 1538 oldstate = wctx.p1().substate
1543 1539 for s in sorted(newstate.keys()):
1544 1540 if not match(s):
1545 1541 # ignore working copy, use old state if present
1546 1542 if s in oldstate:
1547 1543 newstate[s] = oldstate[s]
1548 1544 continue
1549 1545 if not force:
1550 1546 raise error.Abort(
1551 1547 _("commit with new subrepo %s excluded") % s)
1552 1548 dirtyreason = wctx.sub(s).dirtyreason(True)
1553 1549 if dirtyreason:
1554 1550 if not self.ui.configbool('ui', 'commitsubrepos'):
1555 1551 raise error.Abort(dirtyreason,
1556 1552 hint=_("use --subrepos for recursive commit"))
1557 1553 subs.append(s)
1558 1554 commitsubs.add(s)
1559 1555 else:
1560 1556 bs = wctx.sub(s).basestate()
1561 1557 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1562 1558 if oldstate.get(s, (None, None, None))[1] != bs:
1563 1559 subs.append(s)
1564 1560
1565 1561 # check for removed subrepos
1566 1562 for p in wctx.parents():
1567 1563 r = [s for s in p.substate if s not in newstate]
1568 1564 subs += [s for s in r if match(s)]
1569 1565 if subs:
1570 1566 if (not match('.hgsub') and
1571 1567 '.hgsub' in (wctx.modified() + wctx.added())):
1572 1568 raise error.Abort(
1573 1569 _("can't commit subrepos without .hgsub"))
1574 1570 status.modified.insert(0, '.hgsubstate')
1575 1571
1576 1572 elif '.hgsub' in status.removed:
1577 1573 # clean up .hgsubstate when .hgsub is removed
1578 1574 if ('.hgsubstate' in wctx and
1579 1575 '.hgsubstate' not in (status.modified + status.added +
1580 1576 status.removed)):
1581 1577 status.removed.insert(0, '.hgsubstate')
1582 1578
1583 1579 # make sure all explicit patterns are matched
1584 1580 if not force:
1585 1581 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1586 1582
1587 1583 cctx = context.workingcommitctx(self, status,
1588 1584 text, user, date, extra)
1589 1585
1590 1586 # internal config: ui.allowemptycommit
1591 1587 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1592 1588 or extra.get('close') or merge or cctx.files()
1593 1589 or self.ui.configbool('ui', 'allowemptycommit'))
1594 1590 if not allowemptycommit:
1595 1591 return None
1596 1592
1597 1593 if merge and cctx.deleted():
1598 1594 raise error.Abort(_("cannot commit merge with missing files"))
1599 1595
1600 1596 ms = mergemod.mergestate.read(self)
1601 1597
1602 1598 if list(ms.unresolved()):
1603 1599 raise error.Abort(_('unresolved merge conflicts '
1604 1600 '(see "hg help resolve")'))
1605 1601 if ms.mdstate() != 's' or list(ms.driverresolved()):
1606 1602 raise error.Abort(_('driver-resolved merge conflicts'),
1607 1603 hint=_('run "hg resolve --all" to resolve'))
1608 1604
1609 1605 if editor:
1610 1606 cctx._text = editor(self, cctx, subs)
1611 1607 edited = (text != cctx._text)
1612 1608
1613 1609 # Save commit message in case this transaction gets rolled back
1614 1610 # (e.g. by a pretxncommit hook). Leave the content alone on
1615 1611 # the assumption that the user will use the same editor again.
1616 1612 msgfn = self.savecommitmessage(cctx._text)
1617 1613
1618 1614 # commit subs and write new state
1619 1615 if subs:
1620 1616 for s in sorted(commitsubs):
1621 1617 sub = wctx.sub(s)
1622 1618 self.ui.status(_('committing subrepository %s\n') %
1623 1619 subrepo.subrelpath(sub))
1624 1620 sr = sub.commit(cctx._text, user, date)
1625 1621 newstate[s] = (newstate[s][0], sr)
1626 1622 subrepo.writestate(self, newstate)
1627 1623
1628 1624 p1, p2 = self.dirstate.parents()
1629 1625 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1630 1626 try:
1631 1627 self.hook("precommit", throw=True, parent1=hookp1,
1632 1628 parent2=hookp2)
1633 1629 tr = self.transaction('commit')
1634 1630 ret = self.commitctx(cctx, True)
1635 1631 except: # re-raises
1636 1632 if edited:
1637 1633 self.ui.write(
1638 1634 _('note: commit message saved in %s\n') % msgfn)
1639 1635 raise
1640 1636 # update bookmarks, dirstate and mergestate
1641 1637 bookmarks.update(self, [p1, p2], ret)
1642 1638 cctx.markcommitted(ret)
1643 1639 ms.reset()
1644 1640 tr.close()
1645 1641
1646 1642 finally:
1647 1643 lockmod.release(tr, lock, wlock)
1648 1644
1649 1645 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1650 1646 # hack for command that use a temporary commit (eg: histedit)
1651 1647 # temporary commit got stripped before hook release
1652 1648 if self.changelog.hasnode(ret):
1653 1649 self.hook("commit", node=node, parent1=parent1,
1654 1650 parent2=parent2)
1655 1651 self._afterlock(commithook)
1656 1652 return ret
1657 1653
1658 1654 @unfilteredmethod
1659 1655 def commitctx(self, ctx, error=False):
1660 1656 """Add a new revision to current repository.
1661 1657 Revision information is passed via the context argument.
1662 1658 """
1663 1659
1664 1660 tr = None
1665 1661 p1, p2 = ctx.p1(), ctx.p2()
1666 1662 user = ctx.user()
1667 1663
1668 1664 lock = self.lock()
1669 1665 try:
1670 1666 tr = self.transaction("commit")
1671 1667 trp = weakref.proxy(tr)
1672 1668
1673 1669 if ctx.files():
1674 1670 m1 = p1.manifest()
1675 1671 m2 = p2.manifest()
1676 1672 m = m1.copy()
1677 1673
1678 1674 # check in files
1679 1675 added = []
1680 1676 changed = []
1681 1677 removed = list(ctx.removed())
1682 1678 linkrev = len(self)
1683 1679 self.ui.note(_("committing files:\n"))
1684 1680 for f in sorted(ctx.modified() + ctx.added()):
1685 1681 self.ui.note(f + "\n")
1686 1682 try:
1687 1683 fctx = ctx[f]
1688 1684 if fctx is None:
1689 1685 removed.append(f)
1690 1686 else:
1691 1687 added.append(f)
1692 1688 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1693 1689 trp, changed)
1694 1690 m.setflag(f, fctx.flags())
1695 1691 except OSError as inst:
1696 1692 self.ui.warn(_("trouble committing %s!\n") % f)
1697 1693 raise
1698 1694 except IOError as inst:
1699 1695 errcode = getattr(inst, 'errno', errno.ENOENT)
1700 1696 if error or errcode and errcode != errno.ENOENT:
1701 1697 self.ui.warn(_("trouble committing %s!\n") % f)
1702 1698 raise
1703 1699
1704 1700 # update manifest
1705 1701 self.ui.note(_("committing manifest\n"))
1706 1702 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1707 1703 drop = [f for f in removed if f in m]
1708 1704 for f in drop:
1709 1705 del m[f]
1710 1706 mn = self.manifest.add(m, trp, linkrev,
1711 1707 p1.manifestnode(), p2.manifestnode(),
1712 1708 added, drop)
1713 1709 files = changed + removed
1714 1710 else:
1715 1711 mn = p1.manifestnode()
1716 1712 files = []
1717 1713
1718 1714 # update changelog
1719 1715 self.ui.note(_("committing changelog\n"))
1720 1716 self.changelog.delayupdate(tr)
1721 1717 n = self.changelog.add(mn, files, ctx.description(),
1722 1718 trp, p1.node(), p2.node(),
1723 1719 user, ctx.date(), ctx.extra().copy())
1724 1720 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1725 1721 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1726 1722 parent2=xp2)
1727 1723 # set the new commit is proper phase
1728 1724 targetphase = subrepo.newcommitphase(self.ui, ctx)
1729 1725 if targetphase:
1730 1726 # retract boundary do not alter parent changeset.
1731 1727 # if a parent have higher the resulting phase will
1732 1728 # be compliant anyway
1733 1729 #
1734 1730 # if minimal phase was 0 we don't need to retract anything
1735 1731 phases.retractboundary(self, tr, targetphase, [n])
1736 1732 tr.close()
1737 1733 branchmap.updatecache(self.filtered('served'))
1738 1734 return n
1739 1735 finally:
1740 1736 if tr:
1741 1737 tr.release()
1742 1738 lock.release()
1743 1739
1744 1740 @unfilteredmethod
1745 1741 def destroying(self):
1746 1742 '''Inform the repository that nodes are about to be destroyed.
1747 1743 Intended for use by strip and rollback, so there's a common
1748 1744 place for anything that has to be done before destroying history.
1749 1745
1750 1746 This is mostly useful for saving state that is in memory and waiting
1751 1747 to be flushed when the current lock is released. Because a call to
1752 1748 destroyed is imminent, the repo will be invalidated causing those
1753 1749 changes to stay in memory (waiting for the next unlock), or vanish
1754 1750 completely.
1755 1751 '''
1756 1752 # When using the same lock to commit and strip, the phasecache is left
1757 1753 # dirty after committing. Then when we strip, the repo is invalidated,
1758 1754 # causing those changes to disappear.
1759 1755 if '_phasecache' in vars(self):
1760 1756 self._phasecache.write()
1761 1757
1762 1758 @unfilteredmethod
1763 1759 def destroyed(self):
1764 1760 '''Inform the repository that nodes have been destroyed.
1765 1761 Intended for use by strip and rollback, so there's a common
1766 1762 place for anything that has to be done after destroying history.
1767 1763 '''
1768 1764 # When one tries to:
1769 1765 # 1) destroy nodes thus calling this method (e.g. strip)
1770 1766 # 2) use phasecache somewhere (e.g. commit)
1771 1767 #
1772 1768 # then 2) will fail because the phasecache contains nodes that were
1773 1769 # removed. We can either remove phasecache from the filecache,
1774 1770 # causing it to reload next time it is accessed, or simply filter
1775 1771 # the removed nodes now and write the updated cache.
1776 1772 self._phasecache.filterunknown(self)
1777 1773 self._phasecache.write()
1778 1774
1779 1775 # update the 'served' branch cache to help read only server process
1780 1776 # Thanks to branchcache collaboration this is done from the nearest
1781 1777 # filtered subset and it is expected to be fast.
1782 1778 branchmap.updatecache(self.filtered('served'))
1783 1779
1784 1780 # Ensure the persistent tag cache is updated. Doing it now
1785 1781 # means that the tag cache only has to worry about destroyed
1786 1782 # heads immediately after a strip/rollback. That in turn
1787 1783 # guarantees that "cachetip == currenttip" (comparing both rev
1788 1784 # and node) always means no nodes have been added or destroyed.
1789 1785
1790 1786 # XXX this is suboptimal when qrefresh'ing: we strip the current
1791 1787 # head, refresh the tag cache, then immediately add a new head.
1792 1788 # But I think doing it this way is necessary for the "instant
1793 1789 # tag cache retrieval" case to work.
1794 1790 self.invalidate()
1795 1791
1796 1792 def walk(self, match, node=None):
1797 1793 '''
1798 1794 walk recursively through the directory tree or a given
1799 1795 changeset, finding all files matched by the match
1800 1796 function
1801 1797 '''
1802 1798 return self[node].walk(match)
1803 1799
1804 1800 def status(self, node1='.', node2=None, match=None,
1805 1801 ignored=False, clean=False, unknown=False,
1806 1802 listsubrepos=False):
1807 1803 '''a convenience method that calls node1.status(node2)'''
1808 1804 return self[node1].status(node2, match, ignored, clean, unknown,
1809 1805 listsubrepos)
1810 1806
1811 1807 def heads(self, start=None):
1812 1808 heads = self.changelog.heads(start)
1813 1809 # sort the output in rev descending order
1814 1810 return sorted(heads, key=self.changelog.rev, reverse=True)
1815 1811
1816 1812 def branchheads(self, branch=None, start=None, closed=False):
1817 1813 '''return a (possibly filtered) list of heads for the given branch
1818 1814
1819 1815 Heads are returned in topological order, from newest to oldest.
1820 1816 If branch is None, use the dirstate branch.
1821 1817 If start is not None, return only heads reachable from start.
1822 1818 If closed is True, return heads that are marked as closed as well.
1823 1819 '''
1824 1820 if branch is None:
1825 1821 branch = self[None].branch()
1826 1822 branches = self.branchmap()
1827 1823 if branch not in branches:
1828 1824 return []
1829 1825 # the cache returns heads ordered lowest to highest
1830 1826 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1831 1827 if start is not None:
1832 1828 # filter out the heads that cannot be reached from startrev
1833 1829 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1834 1830 bheads = [h for h in bheads if h in fbheads]
1835 1831 return bheads
1836 1832
1837 1833 def branches(self, nodes):
1838 1834 if not nodes:
1839 1835 nodes = [self.changelog.tip()]
1840 1836 b = []
1841 1837 for n in nodes:
1842 1838 t = n
1843 1839 while True:
1844 1840 p = self.changelog.parents(n)
1845 1841 if p[1] != nullid or p[0] == nullid:
1846 1842 b.append((t, n, p[0], p[1]))
1847 1843 break
1848 1844 n = p[0]
1849 1845 return b
1850 1846
1851 1847 def between(self, pairs):
1852 1848 r = []
1853 1849
1854 1850 for top, bottom in pairs:
1855 1851 n, l, i = top, [], 0
1856 1852 f = 1
1857 1853
1858 1854 while n != bottom and n != nullid:
1859 1855 p = self.changelog.parents(n)[0]
1860 1856 if i == f:
1861 1857 l.append(n)
1862 1858 f = f * 2
1863 1859 n = p
1864 1860 i += 1
1865 1861
1866 1862 r.append(l)
1867 1863
1868 1864 return r
1869 1865
1870 1866 def checkpush(self, pushop):
1871 1867 """Extensions can override this function if additional checks have
1872 1868 to be performed before pushing, or call it if they override push
1873 1869 command.
1874 1870 """
1875 1871 pass
1876 1872
1877 1873 @unfilteredpropertycache
1878 1874 def prepushoutgoinghooks(self):
1879 1875 """Return util.hooks consists of a pushop with repo, remote, outgoing
1880 1876 methods, which are called before pushing changesets.
1881 1877 """
1882 1878 return util.hooks()
1883 1879
1884 1880 def pushkey(self, namespace, key, old, new):
1885 1881 try:
1886 1882 tr = self.currenttransaction()
1887 1883 hookargs = {}
1888 1884 if tr is not None:
1889 1885 hookargs.update(tr.hookargs)
1890 1886 hookargs['namespace'] = namespace
1891 1887 hookargs['key'] = key
1892 1888 hookargs['old'] = old
1893 1889 hookargs['new'] = new
1894 1890 self.hook('prepushkey', throw=True, **hookargs)
1895 1891 except error.HookAbort as exc:
1896 1892 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1897 1893 if exc.hint:
1898 1894 self.ui.write_err(_("(%s)\n") % exc.hint)
1899 1895 return False
1900 1896 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1901 1897 ret = pushkey.push(self, namespace, key, old, new)
1902 1898 def runhook():
1903 1899 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1904 1900 ret=ret)
1905 1901 self._afterlock(runhook)
1906 1902 return ret
1907 1903
1908 1904 def listkeys(self, namespace):
1909 1905 self.hook('prelistkeys', throw=True, namespace=namespace)
1910 1906 self.ui.debug('listing keys for "%s"\n' % namespace)
1911 1907 values = pushkey.list(self, namespace)
1912 1908 self.hook('listkeys', namespace=namespace, values=values)
1913 1909 return values
1914 1910
1915 1911 def debugwireargs(self, one, two, three=None, four=None, five=None):
1916 1912 '''used to test argument passing over the wire'''
1917 1913 return "%s %s %s %s %s" % (one, two, three, four, five)
1918 1914
1919 1915 def savecommitmessage(self, text):
1920 1916 fp = self.vfs('last-message.txt', 'wb')
1921 1917 try:
1922 1918 fp.write(text)
1923 1919 finally:
1924 1920 fp.close()
1925 1921 return self.pathto(fp.name[len(self.root) + 1:])
1926 1922
1927 1923 # used to avoid circular references so destructors work
1928 1924 def aftertrans(files):
1929 1925 renamefiles = [tuple(t) for t in files]
1930 1926 def a():
1931 1927 for vfs, src, dest in renamefiles:
1932 1928 try:
1933 1929 vfs.rename(src, dest)
1934 1930 except OSError: # journal file does not yet exist
1935 1931 pass
1936 1932 return a
1937 1933
1938 1934 def undoname(fn):
1939 1935 base, name = os.path.split(fn)
1940 1936 assert name.startswith('journal')
1941 1937 return os.path.join(base, name.replace('journal', 'undo', 1))
1942 1938
1943 1939 def instance(ui, path, create):
1944 1940 return localrepository(ui, util.urllocalpath(path), create)
1945 1941
1946 1942 def islocal(path):
1947 1943 return True
1948 1944
1949 1945 def newreporequirements(repo):
1950 1946 """Determine the set of requirements for a new local repository.
1951 1947
1952 1948 Extensions can wrap this function to specify custom requirements for
1953 1949 new repositories.
1954 1950 """
1955 1951 ui = repo.ui
1956 1952 requirements = set(['revlogv1'])
1957 1953 if ui.configbool('format', 'usestore', True):
1958 1954 requirements.add('store')
1959 1955 if ui.configbool('format', 'usefncache', True):
1960 1956 requirements.add('fncache')
1961 1957 if ui.configbool('format', 'dotencode', True):
1962 1958 requirements.add('dotencode')
1963 1959
1964 1960 if scmutil.gdinitconfig(ui):
1965 1961 requirements.add('generaldelta')
1966 1962 if ui.configbool('experimental', 'treemanifest', False):
1967 1963 requirements.add('treemanifest')
1968 1964 if ui.configbool('experimental', 'manifestv2', False):
1969 1965 requirements.add('manifestv2')
1970 1966
1971 1967 return requirements
@@ -1,157 +1,158
1 1 #require unix-permissions
2 2
3 3 test that new files created in .hg inherit the permissions from .hg/store
4 4
5 5 $ mkdir dir
6 6
7 7 just in case somebody has a strange $TMPDIR
8 8
9 9 $ chmod g-s dir
10 10 $ cd dir
11 11
12 12 $ cat >printmodes.py <<EOF
13 13 > import os, sys
14 14 >
15 15 > allnames = []
16 16 > isdir = {}
17 17 > for root, dirs, files in os.walk(sys.argv[1]):
18 18 > for d in dirs:
19 19 > name = os.path.join(root, d)
20 20 > isdir[name] = 1
21 21 > allnames.append(name)
22 22 > for f in files:
23 23 > name = os.path.join(root, f)
24 24 > allnames.append(name)
25 25 > allnames.sort()
26 26 > for name in allnames:
27 27 > suffix = name in isdir and '/' or ''
28 28 > print '%05o %s%s' % (os.lstat(name).st_mode & 07777, name, suffix)
29 29 > EOF
30 30
31 31 $ cat >mode.py <<EOF
32 32 > import sys
33 33 > import os
34 34 > print '%05o' % os.lstat(sys.argv[1]).st_mode
35 35 > EOF
36 36
37 37 $ umask 077
38 38
39 39 $ hg init repo
40 40 $ cd repo
41 41
42 42 $ chmod 0770 .hg/store
43 43
44 44 before commit
45 45 store can be written by the group, other files cannot
46 46 store is setgid
47 47
48 48 $ python ../printmodes.py .
49 49 00700 ./.hg/
50 50 00600 ./.hg/00changelog.i
51 51 00600 ./.hg/requires
52 52 00770 ./.hg/store/
53 53
54 54 $ mkdir dir
55 55 $ touch foo dir/bar
56 56 $ hg ci -qAm 'add files'
57 57
58 58 after commit
59 59 working dir files can only be written by the owner
60 60 files created in .hg can be written by the group
61 61 (in particular, store/**, dirstate, branch cache file, undo files)
62 62 new directories are setgid
63 63
64 64 $ python ../printmodes.py .
65 65 00700 ./.hg/
66 66 00600 ./.hg/00changelog.i
67 67 00770 ./.hg/cache/
68 68 00660 ./.hg/cache/branch2-served
69 69 00660 ./.hg/cache/rbc-names-v1
70 70 00660 ./.hg/cache/rbc-revs-v1
71 71 00660 ./.hg/dirstate
72 72 00660 ./.hg/last-message.txt
73 73 00600 ./.hg/requires
74 74 00770 ./.hg/store/
75 75 00660 ./.hg/store/00changelog.i
76 76 00660 ./.hg/store/00manifest.i
77 77 00770 ./.hg/store/data/
78 78 00770 ./.hg/store/data/dir/
79 79 00660 ./.hg/store/data/dir/bar.i
80 80 00660 ./.hg/store/data/foo.i
81 81 00660 ./.hg/store/fncache
82 82 00660 ./.hg/store/phaseroots
83 83 00660 ./.hg/store/undo
84 84 00660 ./.hg/store/undo.backupfiles
85 85 00660 ./.hg/store/undo.phaseroots
86 86 00660 ./.hg/undo.backup.dirstate
87 87 00660 ./.hg/undo.bookmarks
88 88 00660 ./.hg/undo.branch
89 89 00660 ./.hg/undo.desc
90 90 00660 ./.hg/undo.dirstate
91 91 00700 ./dir/
92 92 00600 ./dir/bar
93 93 00600 ./foo
94 94
95 95 $ umask 007
96 96 $ hg init ../push
97 97
98 98 before push
99 99 group can write everything
100 100
101 101 $ python ../printmodes.py ../push
102 102 00770 ../push/.hg/
103 103 00660 ../push/.hg/00changelog.i
104 104 00660 ../push/.hg/requires
105 105 00770 ../push/.hg/store/
106 106
107 107 $ umask 077
108 108 $ hg -q push ../push
109 109
110 110 after push
111 111 group can still write everything
112 112
113 113 $ python ../printmodes.py ../push
114 114 00770 ../push/.hg/
115 115 00660 ../push/.hg/00changelog.i
116 116 00770 ../push/.hg/cache/
117 117 00660 ../push/.hg/cache/branch2-base
118 118 00660 ../push/.hg/cache/rbc-names-v1
119 119 00660 ../push/.hg/cache/rbc-revs-v1
120 00660 ../push/.hg/dirstate
120 121 00660 ../push/.hg/requires
121 122 00770 ../push/.hg/store/
122 123 00660 ../push/.hg/store/00changelog.i
123 124 00660 ../push/.hg/store/00manifest.i
124 125 00770 ../push/.hg/store/data/
125 126 00770 ../push/.hg/store/data/dir/
126 127 00660 ../push/.hg/store/data/dir/bar.i
127 128 00660 ../push/.hg/store/data/foo.i
128 129 00660 ../push/.hg/store/fncache
129 130 00660 ../push/.hg/store/undo
130 131 00660 ../push/.hg/store/undo.backupfiles
131 132 00660 ../push/.hg/store/undo.phaseroots
132 133 00660 ../push/.hg/undo.bookmarks
133 134 00660 ../push/.hg/undo.branch
134 135 00660 ../push/.hg/undo.desc
135 136 00660 ../push/.hg/undo.dirstate
136 137
137 138
138 139 Test that we don't lose the setgid bit when we call chmod.
139 140 Not all systems support setgid directories (e.g. HFS+), so
140 141 just check that directories have the same mode.
141 142
142 143 $ cd ..
143 144 $ hg init setgid
144 145 $ cd setgid
145 146 $ chmod g+rwx .hg/store
146 147 $ chmod g+s .hg/store 2> /dev/null || true
147 148 $ mkdir dir
148 149 $ touch dir/file
149 150 $ hg ci -qAm 'add dir/file'
150 151 $ storemode=`python ../mode.py .hg/store`
151 152 $ dirmode=`python ../mode.py .hg/store/data/dir`
152 153 $ if [ "$storemode" != "$dirmode" ]; then
153 154 > echo "$storemode != $dirmode"
154 155 > fi
155 156 $ cd ..
156 157
157 158 $ cd .. # g-s dir
General Comments 0
You need to be logged in to leave comments. Login now