##// END OF EJS Templates
commit: avoid match.files() in conditions...
Martin von Zweigbergk -
r25274:14408524 default
parent child Browse files
Show More
@@ -1,1938 +1,1938 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect, random
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception, exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced, exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return changegroup.addchangegroup(self._repo, cg, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 196 'manifestv2'))
197 197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 198 'dotencode'))
199 199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return ['revlogv1']
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.requirements = set()
211 211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 212 self.wopener = self.wvfs
213 213 self.root = self.wvfs.base
214 214 self.path = self.wvfs.join(".hg")
215 215 self.origroot = path
216 216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 217 self.vfs = scmutil.vfs(self.path)
218 218 self.opener = self.vfs
219 219 self.baseui = baseui
220 220 self.ui = baseui.copy()
221 221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 222 # A list of callback to shape the phase if no data were found.
223 223 # Callback are in the form: func(repo, roots) --> processed root.
224 224 # This list it to be filled by extension during repo setup
225 225 self._phasedefaults = []
226 226 try:
227 227 self.ui.readconfig(self.join("hgrc"), self.root)
228 228 extensions.loadall(self.ui)
229 229 except IOError:
230 230 pass
231 231
232 232 if self.featuresetupfuncs:
233 233 self.supported = set(self._basesupported) # use private copy
234 234 extmods = set(m.__name__ for n, m
235 235 in extensions.extensions(self.ui))
236 236 for setupfunc in self.featuresetupfuncs:
237 237 if setupfunc.__module__ in extmods:
238 238 setupfunc(self.ui, self.supported)
239 239 else:
240 240 self.supported = self._basesupported
241 241
242 242 if not self.vfs.isdir():
243 243 if create:
244 244 if not self.wvfs.exists():
245 245 self.wvfs.makedirs()
246 246 self.vfs.makedir(notindexed=True)
247 247 self.requirements.update(self._baserequirements(create))
248 248 if self.ui.configbool('format', 'usestore', True):
249 249 self.vfs.mkdir("store")
250 250 self.requirements.add("store")
251 251 if self.ui.configbool('format', 'usefncache', True):
252 252 self.requirements.add("fncache")
253 253 if self.ui.configbool('format', 'dotencode', True):
254 254 self.requirements.add('dotencode')
255 255 # create an invalid changelog
256 256 self.vfs.append(
257 257 "00changelog.i",
258 258 '\0\0\0\2' # represents revlogv2
259 259 ' dummy changelog to prevent using the old repo layout'
260 260 )
261 261 if self.ui.configbool('format', 'generaldelta', False):
262 262 self.requirements.add("generaldelta")
263 263 if self.ui.configbool('experimental', 'treemanifest', False):
264 264 self.requirements.add("treemanifest")
265 265 if self.ui.configbool('experimental', 'manifestv2', False):
266 266 self.requirements.add("manifestv2")
267 267 else:
268 268 raise error.RepoError(_("repository %s not found") % path)
269 269 elif create:
270 270 raise error.RepoError(_("repository %s already exists") % path)
271 271 else:
272 272 try:
273 273 self.requirements = scmutil.readrequires(
274 274 self.vfs, self.supported)
275 275 except IOError, inst:
276 276 if inst.errno != errno.ENOENT:
277 277 raise
278 278
279 279 self.sharedpath = self.path
280 280 try:
281 281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 282 realpath=True)
283 283 s = vfs.base
284 284 if not vfs.exists():
285 285 raise error.RepoError(
286 286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 287 self.sharedpath = s
288 288 except IOError, inst:
289 289 if inst.errno != errno.ENOENT:
290 290 raise
291 291
292 292 self.store = store.store(
293 293 self.requirements, self.sharedpath, scmutil.vfs)
294 294 self.spath = self.store.path
295 295 self.svfs = self.store.vfs
296 296 self.sopener = self.svfs
297 297 self.sjoin = self.store.join
298 298 self.vfs.createmode = self.store.createmode
299 299 self._applyopenerreqs()
300 300 if create:
301 301 self._writerequirements()
302 302
303 303
304 304 self._branchcaches = {}
305 305 self._revbranchcache = None
306 306 self.filterpats = {}
307 307 self._datafilters = {}
308 308 self._transref = self._lockref = self._wlockref = None
309 309
310 310 # A cache for various files under .hg/ that tracks file changes,
311 311 # (used by the filecache decorator)
312 312 #
313 313 # Maps a property name to its util.filecacheentry
314 314 self._filecache = {}
315 315
316 316 # hold sets of revision to be filtered
317 317 # should be cleared when something might have changed the filter value:
318 318 # - new changesets,
319 319 # - phase change,
320 320 # - new obsolescence marker,
321 321 # - working directory parent change,
322 322 # - bookmark changes
323 323 self.filteredrevcache = {}
324 324
325 325 # generic mapping between names and nodes
326 326 self.names = namespaces.namespaces()
327 327
328 328 def close(self):
329 329 self._writecaches()
330 330
331 331 def _writecaches(self):
332 332 if self._revbranchcache:
333 333 self._revbranchcache.write()
334 334
335 335 def _restrictcapabilities(self, caps):
336 336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 337 caps = set(caps)
338 338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 339 caps.add('bundle2=' + urllib.quote(capsblob))
340 340 return caps
341 341
342 342 def _applyopenerreqs(self):
343 343 self.svfs.options = dict((r, 1) for r in self.requirements
344 344 if r in self.openerreqs)
345 345 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
346 346 if chunkcachesize is not None:
347 347 self.svfs.options['chunkcachesize'] = chunkcachesize
348 348 maxchainlen = self.ui.configint('format', 'maxchainlen')
349 349 if maxchainlen is not None:
350 350 self.svfs.options['maxchainlen'] = maxchainlen
351 351 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
352 352 if manifestcachesize is not None:
353 353 self.svfs.options['manifestcachesize'] = manifestcachesize
354 354
355 355 def _writerequirements(self):
356 356 scmutil.writerequires(self.vfs, self.requirements)
357 357
358 358 def _checknested(self, path):
359 359 """Determine if path is a legal nested repository."""
360 360 if not path.startswith(self.root):
361 361 return False
362 362 subpath = path[len(self.root) + 1:]
363 363 normsubpath = util.pconvert(subpath)
364 364
365 365 # XXX: Checking against the current working copy is wrong in
366 366 # the sense that it can reject things like
367 367 #
368 368 # $ hg cat -r 10 sub/x.txt
369 369 #
370 370 # if sub/ is no longer a subrepository in the working copy
371 371 # parent revision.
372 372 #
373 373 # However, it can of course also allow things that would have
374 374 # been rejected before, such as the above cat command if sub/
375 375 # is a subrepository now, but was a normal directory before.
376 376 # The old path auditor would have rejected by mistake since it
377 377 # panics when it sees sub/.hg/.
378 378 #
379 379 # All in all, checking against the working copy seems sensible
380 380 # since we want to prevent access to nested repositories on
381 381 # the filesystem *now*.
382 382 ctx = self[None]
383 383 parts = util.splitpath(subpath)
384 384 while parts:
385 385 prefix = '/'.join(parts)
386 386 if prefix in ctx.substate:
387 387 if prefix == normsubpath:
388 388 return True
389 389 else:
390 390 sub = ctx.sub(prefix)
391 391 return sub.checknested(subpath[len(prefix) + 1:])
392 392 else:
393 393 parts.pop()
394 394 return False
395 395
396 396 def peer(self):
397 397 return localpeer(self) # not cached to avoid reference cycle
398 398
399 399 def unfiltered(self):
400 400 """Return unfiltered version of the repository
401 401
402 402 Intended to be overwritten by filtered repo."""
403 403 return self
404 404
405 405 def filtered(self, name):
406 406 """Return a filtered version of a repository"""
407 407 # build a new class with the mixin and the current class
408 408 # (possibly subclass of the repo)
409 409 class proxycls(repoview.repoview, self.unfiltered().__class__):
410 410 pass
411 411 return proxycls(self, name)
412 412
413 413 @repofilecache('bookmarks')
414 414 def _bookmarks(self):
415 415 return bookmarks.bmstore(self)
416 416
417 417 @repofilecache('bookmarks.current')
418 418 def _activebookmark(self):
419 419 return bookmarks.readactive(self)
420 420
421 421 def bookmarkheads(self, bookmark):
422 422 name = bookmark.split('@', 1)[0]
423 423 heads = []
424 424 for mark, n in self._bookmarks.iteritems():
425 425 if mark.split('@', 1)[0] == name:
426 426 heads.append(n)
427 427 return heads
428 428
429 429 @storecache('phaseroots')
430 430 def _phasecache(self):
431 431 return phases.phasecache(self, self._phasedefaults)
432 432
433 433 @storecache('obsstore')
434 434 def obsstore(self):
435 435 # read default format for new obsstore.
436 436 defaultformat = self.ui.configint('format', 'obsstore-version', None)
437 437 # rely on obsstore class default when possible.
438 438 kwargs = {}
439 439 if defaultformat is not None:
440 440 kwargs['defaultformat'] = defaultformat
441 441 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
442 442 store = obsolete.obsstore(self.svfs, readonly=readonly,
443 443 **kwargs)
444 444 if store and readonly:
445 445 self.ui.warn(
446 446 _('obsolete feature not enabled but %i markers found!\n')
447 447 % len(list(store)))
448 448 return store
449 449
450 450 @storecache('00changelog.i')
451 451 def changelog(self):
452 452 c = changelog.changelog(self.svfs)
453 453 if 'HG_PENDING' in os.environ:
454 454 p = os.environ['HG_PENDING']
455 455 if p.startswith(self.root):
456 456 c.readpending('00changelog.i.a')
457 457 return c
458 458
459 459 @storecache('00manifest.i')
460 460 def manifest(self):
461 461 return manifest.manifest(self.svfs)
462 462
463 463 def dirlog(self, dir):
464 464 return self.manifest.dirlog(dir)
465 465
466 466 @repofilecache('dirstate')
467 467 def dirstate(self):
468 468 warned = [0]
469 469 def validate(node):
470 470 try:
471 471 self.changelog.rev(node)
472 472 return node
473 473 except error.LookupError:
474 474 if not warned[0]:
475 475 warned[0] = True
476 476 self.ui.warn(_("warning: ignoring unknown"
477 477 " working parent %s!\n") % short(node))
478 478 return nullid
479 479
480 480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
481 481
482 482 def __getitem__(self, changeid):
483 483 if changeid is None:
484 484 return context.workingctx(self)
485 485 if isinstance(changeid, slice):
486 486 return [context.changectx(self, i)
487 487 for i in xrange(*changeid.indices(len(self)))
488 488 if i not in self.changelog.filteredrevs]
489 489 return context.changectx(self, changeid)
490 490
491 491 def __contains__(self, changeid):
492 492 try:
493 493 self[changeid]
494 494 return True
495 495 except error.RepoLookupError:
496 496 return False
497 497
498 498 def __nonzero__(self):
499 499 return True
500 500
501 501 def __len__(self):
502 502 return len(self.changelog)
503 503
504 504 def __iter__(self):
505 505 return iter(self.changelog)
506 506
507 507 def revs(self, expr, *args):
508 508 '''Return a list of revisions matching the given revset'''
509 509 expr = revset.formatspec(expr, *args)
510 510 m = revset.match(None, expr)
511 511 return m(self)
512 512
513 513 def set(self, expr, *args):
514 514 '''
515 515 Yield a context for each matching revision, after doing arg
516 516 replacement via revset.formatspec
517 517 '''
518 518 for r in self.revs(expr, *args):
519 519 yield self[r]
520 520
521 521 def url(self):
522 522 return 'file:' + self.root
523 523
524 524 def hook(self, name, throw=False, **args):
525 525 """Call a hook, passing this repo instance.
526 526
527 527 This a convenience method to aid invoking hooks. Extensions likely
528 528 won't call this unless they have registered a custom hook or are
529 529 replacing code that is expected to call a hook.
530 530 """
531 531 return hook.hook(self.ui, self, name, throw, **args)
532 532
533 533 @unfilteredmethod
534 534 def _tag(self, names, node, message, local, user, date, extra={},
535 535 editor=False):
536 536 if isinstance(names, str):
537 537 names = (names,)
538 538
539 539 branches = self.branchmap()
540 540 for name in names:
541 541 self.hook('pretag', throw=True, node=hex(node), tag=name,
542 542 local=local)
543 543 if name in branches:
544 544 self.ui.warn(_("warning: tag %s conflicts with existing"
545 545 " branch name\n") % name)
546 546
547 547 def writetags(fp, names, munge, prevtags):
548 548 fp.seek(0, 2)
549 549 if prevtags and prevtags[-1] != '\n':
550 550 fp.write('\n')
551 551 for name in names:
552 552 if munge:
553 553 m = munge(name)
554 554 else:
555 555 m = name
556 556
557 557 if (self._tagscache.tagtypes and
558 558 name in self._tagscache.tagtypes):
559 559 old = self.tags().get(name, nullid)
560 560 fp.write('%s %s\n' % (hex(old), m))
561 561 fp.write('%s %s\n' % (hex(node), m))
562 562 fp.close()
563 563
564 564 prevtags = ''
565 565 if local:
566 566 try:
567 567 fp = self.vfs('localtags', 'r+')
568 568 except IOError:
569 569 fp = self.vfs('localtags', 'a')
570 570 else:
571 571 prevtags = fp.read()
572 572
573 573 # local tags are stored in the current charset
574 574 writetags(fp, names, None, prevtags)
575 575 for name in names:
576 576 self.hook('tag', node=hex(node), tag=name, local=local)
577 577 return
578 578
579 579 try:
580 580 fp = self.wfile('.hgtags', 'rb+')
581 581 except IOError, e:
582 582 if e.errno != errno.ENOENT:
583 583 raise
584 584 fp = self.wfile('.hgtags', 'ab')
585 585 else:
586 586 prevtags = fp.read()
587 587
588 588 # committed tags are stored in UTF-8
589 589 writetags(fp, names, encoding.fromlocal, prevtags)
590 590
591 591 fp.close()
592 592
593 593 self.invalidatecaches()
594 594
595 595 if '.hgtags' not in self.dirstate:
596 596 self[None].add(['.hgtags'])
597 597
598 598 m = matchmod.exact(self.root, '', ['.hgtags'])
599 599 tagnode = self.commit(message, user, date, extra=extra, match=m,
600 600 editor=editor)
601 601
602 602 for name in names:
603 603 self.hook('tag', node=hex(node), tag=name, local=local)
604 604
605 605 return tagnode
606 606
607 607 def tag(self, names, node, message, local, user, date, editor=False):
608 608 '''tag a revision with one or more symbolic names.
609 609
610 610 names is a list of strings or, when adding a single tag, names may be a
611 611 string.
612 612
613 613 if local is True, the tags are stored in a per-repository file.
614 614 otherwise, they are stored in the .hgtags file, and a new
615 615 changeset is committed with the change.
616 616
617 617 keyword arguments:
618 618
619 619 local: whether to store tags in non-version-controlled file
620 620 (default False)
621 621
622 622 message: commit message to use if committing
623 623
624 624 user: name of user to use if committing
625 625
626 626 date: date tuple to use if committing'''
627 627
628 628 if not local:
629 629 m = matchmod.exact(self.root, '', ['.hgtags'])
630 630 if any(self.status(match=m, unknown=True, ignored=True)):
631 631 raise util.Abort(_('working copy of .hgtags is changed'),
632 632 hint=_('please commit .hgtags manually'))
633 633
634 634 self.tags() # instantiate the cache
635 635 self._tag(names, node, message, local, user, date, editor=editor)
636 636
637 637 @filteredpropertycache
638 638 def _tagscache(self):
639 639 '''Returns a tagscache object that contains various tags related
640 640 caches.'''
641 641
642 642 # This simplifies its cache management by having one decorated
643 643 # function (this one) and the rest simply fetch things from it.
644 644 class tagscache(object):
645 645 def __init__(self):
646 646 # These two define the set of tags for this repository. tags
647 647 # maps tag name to node; tagtypes maps tag name to 'global' or
648 648 # 'local'. (Global tags are defined by .hgtags across all
649 649 # heads, and local tags are defined in .hg/localtags.)
650 650 # They constitute the in-memory cache of tags.
651 651 self.tags = self.tagtypes = None
652 652
653 653 self.nodetagscache = self.tagslist = None
654 654
655 655 cache = tagscache()
656 656 cache.tags, cache.tagtypes = self._findtags()
657 657
658 658 return cache
659 659
660 660 def tags(self):
661 661 '''return a mapping of tag to node'''
662 662 t = {}
663 663 if self.changelog.filteredrevs:
664 664 tags, tt = self._findtags()
665 665 else:
666 666 tags = self._tagscache.tags
667 667 for k, v in tags.iteritems():
668 668 try:
669 669 # ignore tags to unknown nodes
670 670 self.changelog.rev(v)
671 671 t[k] = v
672 672 except (error.LookupError, ValueError):
673 673 pass
674 674 return t
675 675
676 676 def _findtags(self):
677 677 '''Do the hard work of finding tags. Return a pair of dicts
678 678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
679 679 maps tag name to a string like \'global\' or \'local\'.
680 680 Subclasses or extensions are free to add their own tags, but
681 681 should be aware that the returned dicts will be retained for the
682 682 duration of the localrepo object.'''
683 683
684 684 # XXX what tagtype should subclasses/extensions use? Currently
685 685 # mq and bookmarks add tags, but do not set the tagtype at all.
686 686 # Should each extension invent its own tag type? Should there
687 687 # be one tagtype for all such "virtual" tags? Or is the status
688 688 # quo fine?
689 689
690 690 alltags = {} # map tag name to (node, hist)
691 691 tagtypes = {}
692 692
693 693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
694 694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
695 695
696 696 # Build the return dicts. Have to re-encode tag names because
697 697 # the tags module always uses UTF-8 (in order not to lose info
698 698 # writing to the cache), but the rest of Mercurial wants them in
699 699 # local encoding.
700 700 tags = {}
701 701 for (name, (node, hist)) in alltags.iteritems():
702 702 if node != nullid:
703 703 tags[encoding.tolocal(name)] = node
704 704 tags['tip'] = self.changelog.tip()
705 705 tagtypes = dict([(encoding.tolocal(name), value)
706 706 for (name, value) in tagtypes.iteritems()])
707 707 return (tags, tagtypes)
708 708
709 709 def tagtype(self, tagname):
710 710 '''
711 711 return the type of the given tag. result can be:
712 712
713 713 'local' : a local tag
714 714 'global' : a global tag
715 715 None : tag does not exist
716 716 '''
717 717
718 718 return self._tagscache.tagtypes.get(tagname)
719 719
720 720 def tagslist(self):
721 721 '''return a list of tags ordered by revision'''
722 722 if not self._tagscache.tagslist:
723 723 l = []
724 724 for t, n in self.tags().iteritems():
725 725 l.append((self.changelog.rev(n), t, n))
726 726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
727 727
728 728 return self._tagscache.tagslist
729 729
730 730 def nodetags(self, node):
731 731 '''return the tags associated with a node'''
732 732 if not self._tagscache.nodetagscache:
733 733 nodetagscache = {}
734 734 for t, n in self._tagscache.tags.iteritems():
735 735 nodetagscache.setdefault(n, []).append(t)
736 736 for tags in nodetagscache.itervalues():
737 737 tags.sort()
738 738 self._tagscache.nodetagscache = nodetagscache
739 739 return self._tagscache.nodetagscache.get(node, [])
740 740
741 741 def nodebookmarks(self, node):
742 742 marks = []
743 743 for bookmark, n in self._bookmarks.iteritems():
744 744 if n == node:
745 745 marks.append(bookmark)
746 746 return sorted(marks)
747 747
748 748 def branchmap(self):
749 749 '''returns a dictionary {branch: [branchheads]} with branchheads
750 750 ordered by increasing revision number'''
751 751 branchmap.updatecache(self)
752 752 return self._branchcaches[self.filtername]
753 753
754 754 @unfilteredmethod
755 755 def revbranchcache(self):
756 756 if not self._revbranchcache:
757 757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
758 758 return self._revbranchcache
759 759
760 760 def branchtip(self, branch, ignoremissing=False):
761 761 '''return the tip node for a given branch
762 762
763 763 If ignoremissing is True, then this method will not raise an error.
764 764 This is helpful for callers that only expect None for a missing branch
765 765 (e.g. namespace).
766 766
767 767 '''
768 768 try:
769 769 return self.branchmap().branchtip(branch)
770 770 except KeyError:
771 771 if not ignoremissing:
772 772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
773 773 else:
774 774 pass
775 775
776 776 def lookup(self, key):
777 777 return self[key].node()
778 778
779 779 def lookupbranch(self, key, remote=None):
780 780 repo = remote or self
781 781 if key in repo.branchmap():
782 782 return key
783 783
784 784 repo = (remote and remote.local()) and remote or self
785 785 return repo[key].branch()
786 786
787 787 def known(self, nodes):
788 788 nm = self.changelog.nodemap
789 789 pc = self._phasecache
790 790 result = []
791 791 for n in nodes:
792 792 r = nm.get(n)
793 793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 794 result.append(resp)
795 795 return result
796 796
797 797 def local(self):
798 798 return self
799 799
800 800 def cancopy(self):
801 801 # so statichttprepo's override of local() works
802 802 if not self.local():
803 803 return False
804 804 if not self.ui.configbool('phases', 'publish', True):
805 805 return True
806 806 # if publishing we can't copy if there is filtered content
807 807 return not self.filtered('visible').changelog.filteredrevs
808 808
809 809 def shared(self):
810 810 '''the type of shared repository (None if not shared)'''
811 811 if self.sharedpath != self.path:
812 812 return 'store'
813 813 return None
814 814
815 815 def join(self, f, *insidef):
816 816 return self.vfs.join(os.path.join(f, *insidef))
817 817
818 818 def wjoin(self, f, *insidef):
819 819 return self.vfs.reljoin(self.root, f, *insidef)
820 820
821 821 def file(self, f):
822 822 if f[0] == '/':
823 823 f = f[1:]
824 824 return filelog.filelog(self.svfs, f)
825 825
826 826 def changectx(self, changeid):
827 827 return self[changeid]
828 828
829 829 def parents(self, changeid=None):
830 830 '''get list of changectxs for parents of changeid'''
831 831 return self[changeid].parents()
832 832
833 833 def setparents(self, p1, p2=nullid):
834 834 self.dirstate.beginparentchange()
835 835 copies = self.dirstate.setparents(p1, p2)
836 836 pctx = self[p1]
837 837 if copies:
838 838 # Adjust copy records, the dirstate cannot do it, it
839 839 # requires access to parents manifests. Preserve them
840 840 # only for entries added to first parent.
841 841 for f in copies:
842 842 if f not in pctx and copies[f] in pctx:
843 843 self.dirstate.copy(copies[f], f)
844 844 if p2 == nullid:
845 845 for f, s in sorted(self.dirstate.copies().items()):
846 846 if f not in pctx and s not in pctx:
847 847 self.dirstate.copy(None, f)
848 848 self.dirstate.endparentchange()
849 849
850 850 def filectx(self, path, changeid=None, fileid=None):
851 851 """changeid can be a changeset revision, node, or tag.
852 852 fileid can be a file revision or node."""
853 853 return context.filectx(self, path, changeid, fileid)
854 854
855 855 def getcwd(self):
856 856 return self.dirstate.getcwd()
857 857
858 858 def pathto(self, f, cwd=None):
859 859 return self.dirstate.pathto(f, cwd)
860 860
861 861 def wfile(self, f, mode='r'):
862 862 return self.wvfs(f, mode)
863 863
864 864 def _link(self, f):
865 865 return self.wvfs.islink(f)
866 866
867 867 def _loadfilter(self, filter):
868 868 if filter not in self.filterpats:
869 869 l = []
870 870 for pat, cmd in self.ui.configitems(filter):
871 871 if cmd == '!':
872 872 continue
873 873 mf = matchmod.match(self.root, '', [pat])
874 874 fn = None
875 875 params = cmd
876 876 for name, filterfn in self._datafilters.iteritems():
877 877 if cmd.startswith(name):
878 878 fn = filterfn
879 879 params = cmd[len(name):].lstrip()
880 880 break
881 881 if not fn:
882 882 fn = lambda s, c, **kwargs: util.filter(s, c)
883 883 # Wrap old filters not supporting keyword arguments
884 884 if not inspect.getargspec(fn)[2]:
885 885 oldfn = fn
886 886 fn = lambda s, c, **kwargs: oldfn(s, c)
887 887 l.append((mf, fn, params))
888 888 self.filterpats[filter] = l
889 889 return self.filterpats[filter]
890 890
891 891 def _filter(self, filterpats, filename, data):
892 892 for mf, fn, cmd in filterpats:
893 893 if mf(filename):
894 894 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
895 895 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
896 896 break
897 897
898 898 return data
899 899
900 900 @unfilteredpropertycache
901 901 def _encodefilterpats(self):
902 902 return self._loadfilter('encode')
903 903
904 904 @unfilteredpropertycache
905 905 def _decodefilterpats(self):
906 906 return self._loadfilter('decode')
907 907
908 908 def adddatafilter(self, name, filter):
909 909 self._datafilters[name] = filter
910 910
911 911 def wread(self, filename):
912 912 if self._link(filename):
913 913 data = self.wvfs.readlink(filename)
914 914 else:
915 915 data = self.wvfs.read(filename)
916 916 return self._filter(self._encodefilterpats, filename, data)
917 917
918 918 def wwrite(self, filename, data, flags):
919 919 """write ``data`` into ``filename`` in the working directory
920 920
921 921 This returns length of written (maybe decoded) data.
922 922 """
923 923 data = self._filter(self._decodefilterpats, filename, data)
924 924 if 'l' in flags:
925 925 self.wvfs.symlink(data, filename)
926 926 else:
927 927 self.wvfs.write(filename, data)
928 928 if 'x' in flags:
929 929 self.wvfs.setflags(filename, False, True)
930 930 return len(data)
931 931
932 932 def wwritedata(self, filename, data):
933 933 return self._filter(self._decodefilterpats, filename, data)
934 934
935 935 def currenttransaction(self):
936 936 """return the current transaction or None if non exists"""
937 937 if self._transref:
938 938 tr = self._transref()
939 939 else:
940 940 tr = None
941 941
942 942 if tr and tr.running():
943 943 return tr
944 944 return None
945 945
946 946 def transaction(self, desc, report=None):
947 947 if (self.ui.configbool('devel', 'all')
948 948 or self.ui.configbool('devel', 'check-locks')):
949 949 l = self._lockref and self._lockref()
950 950 if l is None or not l.held:
951 951 scmutil.develwarn(self.ui, 'transaction with no lock')
952 952 tr = self.currenttransaction()
953 953 if tr is not None:
954 954 return tr.nest()
955 955
956 956 # abort here if the journal already exists
957 957 if self.svfs.exists("journal"):
958 958 raise error.RepoError(
959 959 _("abandoned transaction found"),
960 960 hint=_("run 'hg recover' to clean up transaction"))
961 961
962 962 idbase = "%.40f#%f" % (random.random(), time.time())
963 963 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
964 964 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
965 965
966 966 self._writejournal(desc)
967 967 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
968 968 if report:
969 969 rp = report
970 970 else:
971 971 rp = self.ui.warn
972 972 vfsmap = {'plain': self.vfs} # root of .hg/
973 973 # we must avoid cyclic reference between repo and transaction.
974 974 reporef = weakref.ref(self)
975 975 def validate(tr):
976 976 """will run pre-closing hooks"""
977 977 pending = lambda: tr.writepending() and self.root or ""
978 978 reporef().hook('pretxnclose', throw=True, pending=pending,
979 979 txnname=desc, **tr.hookargs)
980 980
981 981 tr = transaction.transaction(rp, self.sopener, vfsmap,
982 982 "journal",
983 983 "undo",
984 984 aftertrans(renames),
985 985 self.store.createmode,
986 986 validator=validate)
987 987
988 988 tr.hookargs['txnid'] = txnid
989 989 # note: writing the fncache only during finalize mean that the file is
990 990 # outdated when running hooks. As fncache is used for streaming clone,
991 991 # this is not expected to break anything that happen during the hooks.
992 992 tr.addfinalize('flush-fncache', self.store.write)
993 993 def txnclosehook(tr2):
994 994 """To be run if transaction is successful, will schedule a hook run
995 995 """
996 996 def hook():
997 997 reporef().hook('txnclose', throw=False, txnname=desc,
998 998 **tr2.hookargs)
999 999 reporef()._afterlock(hook)
1000 1000 tr.addfinalize('txnclose-hook', txnclosehook)
1001 1001 def txnaborthook(tr2):
1002 1002 """To be run if transaction is aborted
1003 1003 """
1004 1004 reporef().hook('txnabort', throw=False, txnname=desc,
1005 1005 **tr2.hookargs)
1006 1006 tr.addabort('txnabort-hook', txnaborthook)
1007 1007 self._transref = weakref.ref(tr)
1008 1008 return tr
1009 1009
1010 1010 def _journalfiles(self):
1011 1011 return ((self.svfs, 'journal'),
1012 1012 (self.vfs, 'journal.dirstate'),
1013 1013 (self.vfs, 'journal.branch'),
1014 1014 (self.vfs, 'journal.desc'),
1015 1015 (self.vfs, 'journal.bookmarks'),
1016 1016 (self.svfs, 'journal.phaseroots'))
1017 1017
1018 1018 def undofiles(self):
1019 1019 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1020 1020
1021 1021 def _writejournal(self, desc):
1022 1022 self.vfs.write("journal.dirstate",
1023 1023 self.vfs.tryread("dirstate"))
1024 1024 self.vfs.write("journal.branch",
1025 1025 encoding.fromlocal(self.dirstate.branch()))
1026 1026 self.vfs.write("journal.desc",
1027 1027 "%d\n%s\n" % (len(self), desc))
1028 1028 self.vfs.write("journal.bookmarks",
1029 1029 self.vfs.tryread("bookmarks"))
1030 1030 self.svfs.write("journal.phaseroots",
1031 1031 self.svfs.tryread("phaseroots"))
1032 1032
1033 1033 def recover(self):
1034 1034 lock = self.lock()
1035 1035 try:
1036 1036 if self.svfs.exists("journal"):
1037 1037 self.ui.status(_("rolling back interrupted transaction\n"))
1038 1038 vfsmap = {'': self.svfs,
1039 1039 'plain': self.vfs,}
1040 1040 transaction.rollback(self.svfs, vfsmap, "journal",
1041 1041 self.ui.warn)
1042 1042 self.invalidate()
1043 1043 return True
1044 1044 else:
1045 1045 self.ui.warn(_("no interrupted transaction available\n"))
1046 1046 return False
1047 1047 finally:
1048 1048 lock.release()
1049 1049
1050 1050 def rollback(self, dryrun=False, force=False):
1051 1051 wlock = lock = None
1052 1052 try:
1053 1053 wlock = self.wlock()
1054 1054 lock = self.lock()
1055 1055 if self.svfs.exists("undo"):
1056 1056 return self._rollback(dryrun, force)
1057 1057 else:
1058 1058 self.ui.warn(_("no rollback information available\n"))
1059 1059 return 1
1060 1060 finally:
1061 1061 release(lock, wlock)
1062 1062
1063 1063 @unfilteredmethod # Until we get smarter cache management
1064 1064 def _rollback(self, dryrun, force):
1065 1065 ui = self.ui
1066 1066 try:
1067 1067 args = self.vfs.read('undo.desc').splitlines()
1068 1068 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1069 1069 if len(args) >= 3:
1070 1070 detail = args[2]
1071 1071 oldtip = oldlen - 1
1072 1072
1073 1073 if detail and ui.verbose:
1074 1074 msg = (_('repository tip rolled back to revision %s'
1075 1075 ' (undo %s: %s)\n')
1076 1076 % (oldtip, desc, detail))
1077 1077 else:
1078 1078 msg = (_('repository tip rolled back to revision %s'
1079 1079 ' (undo %s)\n')
1080 1080 % (oldtip, desc))
1081 1081 except IOError:
1082 1082 msg = _('rolling back unknown transaction\n')
1083 1083 desc = None
1084 1084
1085 1085 if not force and self['.'] != self['tip'] and desc == 'commit':
1086 1086 raise util.Abort(
1087 1087 _('rollback of last commit while not checked out '
1088 1088 'may lose data'), hint=_('use -f to force'))
1089 1089
1090 1090 ui.status(msg)
1091 1091 if dryrun:
1092 1092 return 0
1093 1093
1094 1094 parents = self.dirstate.parents()
1095 1095 self.destroying()
1096 1096 vfsmap = {'plain': self.vfs, '': self.svfs}
1097 1097 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1098 1098 if self.vfs.exists('undo.bookmarks'):
1099 1099 self.vfs.rename('undo.bookmarks', 'bookmarks')
1100 1100 if self.svfs.exists('undo.phaseroots'):
1101 1101 self.svfs.rename('undo.phaseroots', 'phaseroots')
1102 1102 self.invalidate()
1103 1103
1104 1104 parentgone = (parents[0] not in self.changelog.nodemap or
1105 1105 parents[1] not in self.changelog.nodemap)
1106 1106 if parentgone:
1107 1107 self.vfs.rename('undo.dirstate', 'dirstate')
1108 1108 try:
1109 1109 branch = self.vfs.read('undo.branch')
1110 1110 self.dirstate.setbranch(encoding.tolocal(branch))
1111 1111 except IOError:
1112 1112 ui.warn(_('named branch could not be reset: '
1113 1113 'current branch is still \'%s\'\n')
1114 1114 % self.dirstate.branch())
1115 1115
1116 1116 self.dirstate.invalidate()
1117 1117 parents = tuple([p.rev() for p in self.parents()])
1118 1118 if len(parents) > 1:
1119 1119 ui.status(_('working directory now based on '
1120 1120 'revisions %d and %d\n') % parents)
1121 1121 else:
1122 1122 ui.status(_('working directory now based on '
1123 1123 'revision %d\n') % parents)
1124 1124 ms = mergemod.mergestate(self)
1125 1125 ms.reset(self['.'].node())
1126 1126
1127 1127 # TODO: if we know which new heads may result from this rollback, pass
1128 1128 # them to destroy(), which will prevent the branchhead cache from being
1129 1129 # invalidated.
1130 1130 self.destroyed()
1131 1131 return 0
1132 1132
1133 1133 def invalidatecaches(self):
1134 1134
1135 1135 if '_tagscache' in vars(self):
1136 1136 # can't use delattr on proxy
1137 1137 del self.__dict__['_tagscache']
1138 1138
1139 1139 self.unfiltered()._branchcaches.clear()
1140 1140 self.invalidatevolatilesets()
1141 1141
1142 1142 def invalidatevolatilesets(self):
1143 1143 self.filteredrevcache.clear()
1144 1144 obsolete.clearobscaches(self)
1145 1145
1146 1146 def invalidatedirstate(self):
1147 1147 '''Invalidates the dirstate, causing the next call to dirstate
1148 1148 to check if it was modified since the last time it was read,
1149 1149 rereading it if it has.
1150 1150
1151 1151 This is different to dirstate.invalidate() that it doesn't always
1152 1152 rereads the dirstate. Use dirstate.invalidate() if you want to
1153 1153 explicitly read the dirstate again (i.e. restoring it to a previous
1154 1154 known good state).'''
1155 1155 if hasunfilteredcache(self, 'dirstate'):
1156 1156 for k in self.dirstate._filecache:
1157 1157 try:
1158 1158 delattr(self.dirstate, k)
1159 1159 except AttributeError:
1160 1160 pass
1161 1161 delattr(self.unfiltered(), 'dirstate')
1162 1162
1163 1163 def invalidate(self):
1164 1164 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1165 1165 for k in self._filecache:
1166 1166 # dirstate is invalidated separately in invalidatedirstate()
1167 1167 if k == 'dirstate':
1168 1168 continue
1169 1169
1170 1170 try:
1171 1171 delattr(unfiltered, k)
1172 1172 except AttributeError:
1173 1173 pass
1174 1174 self.invalidatecaches()
1175 1175 self.store.invalidatecaches()
1176 1176
1177 1177 def invalidateall(self):
1178 1178 '''Fully invalidates both store and non-store parts, causing the
1179 1179 subsequent operation to reread any outside changes.'''
1180 1180 # extension should hook this to invalidate its caches
1181 1181 self.invalidate()
1182 1182 self.invalidatedirstate()
1183 1183
1184 1184 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1185 1185 try:
1186 1186 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1187 1187 except error.LockHeld, inst:
1188 1188 if not wait:
1189 1189 raise
1190 1190 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1191 1191 (desc, inst.locker))
1192 1192 # default to 600 seconds timeout
1193 1193 l = lockmod.lock(vfs, lockname,
1194 1194 int(self.ui.config("ui", "timeout", "600")),
1195 1195 releasefn, desc=desc)
1196 1196 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1197 1197 if acquirefn:
1198 1198 acquirefn()
1199 1199 return l
1200 1200
1201 1201 def _afterlock(self, callback):
1202 1202 """add a callback to be run when the repository is fully unlocked
1203 1203
1204 1204 The callback will be executed when the outermost lock is released
1205 1205 (with wlock being higher level than 'lock')."""
1206 1206 for ref in (self._wlockref, self._lockref):
1207 1207 l = ref and ref()
1208 1208 if l and l.held:
1209 1209 l.postrelease.append(callback)
1210 1210 break
1211 1211 else: # no lock have been found.
1212 1212 callback()
1213 1213
1214 1214 def lock(self, wait=True):
1215 1215 '''Lock the repository store (.hg/store) and return a weak reference
1216 1216 to the lock. Use this before modifying the store (e.g. committing or
1217 1217 stripping). If you are opening a transaction, get a lock as well.)
1218 1218
1219 1219 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1220 1220 'wlock' first to avoid a dead-lock hazard.'''
1221 1221 l = self._lockref and self._lockref()
1222 1222 if l is not None and l.held:
1223 1223 l.lock()
1224 1224 return l
1225 1225
1226 1226 def unlock():
1227 1227 for k, ce in self._filecache.items():
1228 1228 if k == 'dirstate' or k not in self.__dict__:
1229 1229 continue
1230 1230 ce.refresh()
1231 1231
1232 1232 l = self._lock(self.svfs, "lock", wait, unlock,
1233 1233 self.invalidate, _('repository %s') % self.origroot)
1234 1234 self._lockref = weakref.ref(l)
1235 1235 return l
1236 1236
1237 1237 def wlock(self, wait=True):
1238 1238 '''Lock the non-store parts of the repository (everything under
1239 1239 .hg except .hg/store) and return a weak reference to the lock.
1240 1240
1241 1241 Use this before modifying files in .hg.
1242 1242
1243 1243 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1244 1244 'wlock' first to avoid a dead-lock hazard.'''
1245 1245 l = self._wlockref and self._wlockref()
1246 1246 if l is not None and l.held:
1247 1247 l.lock()
1248 1248 return l
1249 1249
1250 1250 # We do not need to check for non-waiting lock aquisition. Such
1251 1251 # acquisition would not cause dead-lock as they would just fail.
1252 1252 if wait and (self.ui.configbool('devel', 'all')
1253 1253 or self.ui.configbool('devel', 'check-locks')):
1254 1254 l = self._lockref and self._lockref()
1255 1255 if l is not None and l.held:
1256 1256 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1257 1257
1258 1258 def unlock():
1259 1259 if self.dirstate.pendingparentchange():
1260 1260 self.dirstate.invalidate()
1261 1261 else:
1262 1262 self.dirstate.write()
1263 1263
1264 1264 self._filecache['dirstate'].refresh()
1265 1265
1266 1266 l = self._lock(self.vfs, "wlock", wait, unlock,
1267 1267 self.invalidatedirstate, _('working directory of %s') %
1268 1268 self.origroot)
1269 1269 self._wlockref = weakref.ref(l)
1270 1270 return l
1271 1271
1272 1272 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1273 1273 """
1274 1274 commit an individual file as part of a larger transaction
1275 1275 """
1276 1276
1277 1277 fname = fctx.path()
1278 1278 fparent1 = manifest1.get(fname, nullid)
1279 1279 fparent2 = manifest2.get(fname, nullid)
1280 1280 if isinstance(fctx, context.filectx):
1281 1281 node = fctx.filenode()
1282 1282 if node in [fparent1, fparent2]:
1283 1283 self.ui.debug('reusing %s filelog entry\n' % fname)
1284 1284 return node
1285 1285
1286 1286 flog = self.file(fname)
1287 1287 meta = {}
1288 1288 copy = fctx.renamed()
1289 1289 if copy and copy[0] != fname:
1290 1290 # Mark the new revision of this file as a copy of another
1291 1291 # file. This copy data will effectively act as a parent
1292 1292 # of this new revision. If this is a merge, the first
1293 1293 # parent will be the nullid (meaning "look up the copy data")
1294 1294 # and the second one will be the other parent. For example:
1295 1295 #
1296 1296 # 0 --- 1 --- 3 rev1 changes file foo
1297 1297 # \ / rev2 renames foo to bar and changes it
1298 1298 # \- 2 -/ rev3 should have bar with all changes and
1299 1299 # should record that bar descends from
1300 1300 # bar in rev2 and foo in rev1
1301 1301 #
1302 1302 # this allows this merge to succeed:
1303 1303 #
1304 1304 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1305 1305 # \ / merging rev3 and rev4 should use bar@rev2
1306 1306 # \- 2 --- 4 as the merge base
1307 1307 #
1308 1308
1309 1309 cfname = copy[0]
1310 1310 crev = manifest1.get(cfname)
1311 1311 newfparent = fparent2
1312 1312
1313 1313 if manifest2: # branch merge
1314 1314 if fparent2 == nullid or crev is None: # copied on remote side
1315 1315 if cfname in manifest2:
1316 1316 crev = manifest2[cfname]
1317 1317 newfparent = fparent1
1318 1318
1319 1319 # Here, we used to search backwards through history to try to find
1320 1320 # where the file copy came from if the source of a copy was not in
1321 1321 # the parent directory. However, this doesn't actually make sense to
1322 1322 # do (what does a copy from something not in your working copy even
1323 1323 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1324 1324 # the user that copy information was dropped, so if they didn't
1325 1325 # expect this outcome it can be fixed, but this is the correct
1326 1326 # behavior in this circumstance.
1327 1327
1328 1328 if crev:
1329 1329 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1330 1330 meta["copy"] = cfname
1331 1331 meta["copyrev"] = hex(crev)
1332 1332 fparent1, fparent2 = nullid, newfparent
1333 1333 else:
1334 1334 self.ui.warn(_("warning: can't find ancestor for '%s' "
1335 1335 "copied from '%s'!\n") % (fname, cfname))
1336 1336
1337 1337 elif fparent1 == nullid:
1338 1338 fparent1, fparent2 = fparent2, nullid
1339 1339 elif fparent2 != nullid:
1340 1340 # is one parent an ancestor of the other?
1341 1341 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1342 1342 if fparent1 in fparentancestors:
1343 1343 fparent1, fparent2 = fparent2, nullid
1344 1344 elif fparent2 in fparentancestors:
1345 1345 fparent2 = nullid
1346 1346
1347 1347 # is the file changed?
1348 1348 text = fctx.data()
1349 1349 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1350 1350 changelist.append(fname)
1351 1351 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1352 1352 # are just the flags changed during merge?
1353 1353 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1354 1354 changelist.append(fname)
1355 1355
1356 1356 return fparent1
1357 1357
1358 1358 @unfilteredmethod
1359 1359 def commit(self, text="", user=None, date=None, match=None, force=False,
1360 1360 editor=False, extra={}):
1361 1361 """Add a new revision to current repository.
1362 1362
1363 1363 Revision information is gathered from the working directory,
1364 1364 match can be used to filter the committed files. If editor is
1365 1365 supplied, it is called to get a commit message.
1366 1366 """
1367 1367
1368 1368 def fail(f, msg):
1369 1369 raise util.Abort('%s: %s' % (f, msg))
1370 1370
1371 1371 if not match:
1372 1372 match = matchmod.always(self.root, '')
1373 1373
1374 1374 if not force:
1375 1375 vdirs = []
1376 1376 match.explicitdir = vdirs.append
1377 1377 match.bad = fail
1378 1378
1379 1379 wlock = self.wlock()
1380 1380 try:
1381 1381 wctx = self[None]
1382 1382 merge = len(wctx.parents()) > 1
1383 1383
1384 1384 if not force and merge and match.ispartial():
1385 1385 raise util.Abort(_('cannot partially commit a merge '
1386 1386 '(do not specify files or patterns)'))
1387 1387
1388 1388 status = self.status(match=match, clean=force)
1389 1389 if force:
1390 1390 status.modified.extend(status.clean) # mq may commit clean files
1391 1391
1392 1392 # check subrepos
1393 1393 subs = []
1394 1394 commitsubs = set()
1395 1395 newstate = wctx.substate.copy()
1396 1396 # only manage subrepos and .hgsubstate if .hgsub is present
1397 1397 if '.hgsub' in wctx:
1398 1398 # we'll decide whether to track this ourselves, thanks
1399 1399 for c in status.modified, status.added, status.removed:
1400 1400 if '.hgsubstate' in c:
1401 1401 c.remove('.hgsubstate')
1402 1402
1403 1403 # compare current state to last committed state
1404 1404 # build new substate based on last committed state
1405 1405 oldstate = wctx.p1().substate
1406 1406 for s in sorted(newstate.keys()):
1407 1407 if not match(s):
1408 1408 # ignore working copy, use old state if present
1409 1409 if s in oldstate:
1410 1410 newstate[s] = oldstate[s]
1411 1411 continue
1412 1412 if not force:
1413 1413 raise util.Abort(
1414 1414 _("commit with new subrepo %s excluded") % s)
1415 1415 dirtyreason = wctx.sub(s).dirtyreason(True)
1416 1416 if dirtyreason:
1417 1417 if not self.ui.configbool('ui', 'commitsubrepos'):
1418 1418 raise util.Abort(dirtyreason,
1419 1419 hint=_("use --subrepos for recursive commit"))
1420 1420 subs.append(s)
1421 1421 commitsubs.add(s)
1422 1422 else:
1423 1423 bs = wctx.sub(s).basestate()
1424 1424 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1425 1425 if oldstate.get(s, (None, None, None))[1] != bs:
1426 1426 subs.append(s)
1427 1427
1428 1428 # check for removed subrepos
1429 1429 for p in wctx.parents():
1430 1430 r = [s for s in p.substate if s not in newstate]
1431 1431 subs += [s for s in r if match(s)]
1432 1432 if subs:
1433 1433 if (not match('.hgsub') and
1434 1434 '.hgsub' in (wctx.modified() + wctx.added())):
1435 1435 raise util.Abort(
1436 1436 _("can't commit subrepos without .hgsub"))
1437 1437 status.modified.insert(0, '.hgsubstate')
1438 1438
1439 1439 elif '.hgsub' in status.removed:
1440 1440 # clean up .hgsubstate when .hgsub is removed
1441 1441 if ('.hgsubstate' in wctx and
1442 1442 '.hgsubstate' not in (status.modified + status.added +
1443 1443 status.removed)):
1444 1444 status.removed.insert(0, '.hgsubstate')
1445 1445
1446 1446 # make sure all explicit patterns are matched
1447 if not force and match.files():
1447 if not force and (match.isexact() or match.prefix()):
1448 1448 matched = set(status.modified + status.added + status.removed)
1449 1449
1450 1450 for f in match.files():
1451 1451 f = self.dirstate.normalize(f)
1452 1452 if f == '.' or f in matched or f in wctx.substate:
1453 1453 continue
1454 1454 if f in status.deleted:
1455 1455 fail(f, _('file not found!'))
1456 1456 if f in vdirs: # visited directory
1457 1457 d = f + '/'
1458 1458 for mf in matched:
1459 1459 if mf.startswith(d):
1460 1460 break
1461 1461 else:
1462 1462 fail(f, _("no match under directory!"))
1463 1463 elif f not in self.dirstate:
1464 1464 fail(f, _("file not tracked!"))
1465 1465
1466 1466 cctx = context.workingcommitctx(self, status,
1467 1467 text, user, date, extra)
1468 1468
1469 1469 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1470 1470 or extra.get('close') or merge or cctx.files()
1471 1471 or self.ui.configbool('ui', 'allowemptycommit'))
1472 1472 if not allowemptycommit:
1473 1473 return None
1474 1474
1475 1475 if merge and cctx.deleted():
1476 1476 raise util.Abort(_("cannot commit merge with missing files"))
1477 1477
1478 1478 ms = mergemod.mergestate(self)
1479 1479 for f in status.modified:
1480 1480 if f in ms and ms[f] == 'u':
1481 1481 raise util.Abort(_('unresolved merge conflicts '
1482 1482 '(see "hg help resolve")'))
1483 1483
1484 1484 if editor:
1485 1485 cctx._text = editor(self, cctx, subs)
1486 1486 edited = (text != cctx._text)
1487 1487
1488 1488 # Save commit message in case this transaction gets rolled back
1489 1489 # (e.g. by a pretxncommit hook). Leave the content alone on
1490 1490 # the assumption that the user will use the same editor again.
1491 1491 msgfn = self.savecommitmessage(cctx._text)
1492 1492
1493 1493 # commit subs and write new state
1494 1494 if subs:
1495 1495 for s in sorted(commitsubs):
1496 1496 sub = wctx.sub(s)
1497 1497 self.ui.status(_('committing subrepository %s\n') %
1498 1498 subrepo.subrelpath(sub))
1499 1499 sr = sub.commit(cctx._text, user, date)
1500 1500 newstate[s] = (newstate[s][0], sr)
1501 1501 subrepo.writestate(self, newstate)
1502 1502
1503 1503 p1, p2 = self.dirstate.parents()
1504 1504 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1505 1505 try:
1506 1506 self.hook("precommit", throw=True, parent1=hookp1,
1507 1507 parent2=hookp2)
1508 1508 ret = self.commitctx(cctx, True)
1509 1509 except: # re-raises
1510 1510 if edited:
1511 1511 self.ui.write(
1512 1512 _('note: commit message saved in %s\n') % msgfn)
1513 1513 raise
1514 1514
1515 1515 # update bookmarks, dirstate and mergestate
1516 1516 bookmarks.update(self, [p1, p2], ret)
1517 1517 cctx.markcommitted(ret)
1518 1518 ms.reset()
1519 1519 finally:
1520 1520 wlock.release()
1521 1521
1522 1522 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1523 1523 # hack for command that use a temporary commit (eg: histedit)
1524 1524 # temporary commit got stripped before hook release
1525 1525 if self.changelog.hasnode(ret):
1526 1526 self.hook("commit", node=node, parent1=parent1,
1527 1527 parent2=parent2)
1528 1528 self._afterlock(commithook)
1529 1529 return ret
1530 1530
1531 1531 @unfilteredmethod
1532 1532 def commitctx(self, ctx, error=False):
1533 1533 """Add a new revision to current repository.
1534 1534 Revision information is passed via the context argument.
1535 1535 """
1536 1536
1537 1537 tr = None
1538 1538 p1, p2 = ctx.p1(), ctx.p2()
1539 1539 user = ctx.user()
1540 1540
1541 1541 lock = self.lock()
1542 1542 try:
1543 1543 tr = self.transaction("commit")
1544 1544 trp = weakref.proxy(tr)
1545 1545
1546 1546 if ctx.files():
1547 1547 m1 = p1.manifest()
1548 1548 m2 = p2.manifest()
1549 1549 m = m1.copy()
1550 1550
1551 1551 # check in files
1552 1552 added = []
1553 1553 changed = []
1554 1554 removed = list(ctx.removed())
1555 1555 linkrev = len(self)
1556 1556 self.ui.note(_("committing files:\n"))
1557 1557 for f in sorted(ctx.modified() + ctx.added()):
1558 1558 self.ui.note(f + "\n")
1559 1559 try:
1560 1560 fctx = ctx[f]
1561 1561 if fctx is None:
1562 1562 removed.append(f)
1563 1563 else:
1564 1564 added.append(f)
1565 1565 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1566 1566 trp, changed)
1567 1567 m.setflag(f, fctx.flags())
1568 1568 except OSError, inst:
1569 1569 self.ui.warn(_("trouble committing %s!\n") % f)
1570 1570 raise
1571 1571 except IOError, inst:
1572 1572 errcode = getattr(inst, 'errno', errno.ENOENT)
1573 1573 if error or errcode and errcode != errno.ENOENT:
1574 1574 self.ui.warn(_("trouble committing %s!\n") % f)
1575 1575 raise
1576 1576
1577 1577 # update manifest
1578 1578 self.ui.note(_("committing manifest\n"))
1579 1579 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1580 1580 drop = [f for f in removed if f in m]
1581 1581 for f in drop:
1582 1582 del m[f]
1583 1583 mn = self.manifest.add(m, trp, linkrev,
1584 1584 p1.manifestnode(), p2.manifestnode(),
1585 1585 added, drop)
1586 1586 files = changed + removed
1587 1587 else:
1588 1588 mn = p1.manifestnode()
1589 1589 files = []
1590 1590
1591 1591 # update changelog
1592 1592 self.ui.note(_("committing changelog\n"))
1593 1593 self.changelog.delayupdate(tr)
1594 1594 n = self.changelog.add(mn, files, ctx.description(),
1595 1595 trp, p1.node(), p2.node(),
1596 1596 user, ctx.date(), ctx.extra().copy())
1597 1597 p = lambda: tr.writepending() and self.root or ""
1598 1598 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1599 1599 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1600 1600 parent2=xp2, pending=p)
1601 1601 # set the new commit is proper phase
1602 1602 targetphase = subrepo.newcommitphase(self.ui, ctx)
1603 1603 if targetphase:
1604 1604 # retract boundary do not alter parent changeset.
1605 1605 # if a parent have higher the resulting phase will
1606 1606 # be compliant anyway
1607 1607 #
1608 1608 # if minimal phase was 0 we don't need to retract anything
1609 1609 phases.retractboundary(self, tr, targetphase, [n])
1610 1610 tr.close()
1611 1611 branchmap.updatecache(self.filtered('served'))
1612 1612 return n
1613 1613 finally:
1614 1614 if tr:
1615 1615 tr.release()
1616 1616 lock.release()
1617 1617
1618 1618 @unfilteredmethod
1619 1619 def destroying(self):
1620 1620 '''Inform the repository that nodes are about to be destroyed.
1621 1621 Intended for use by strip and rollback, so there's a common
1622 1622 place for anything that has to be done before destroying history.
1623 1623
1624 1624 This is mostly useful for saving state that is in memory and waiting
1625 1625 to be flushed when the current lock is released. Because a call to
1626 1626 destroyed is imminent, the repo will be invalidated causing those
1627 1627 changes to stay in memory (waiting for the next unlock), or vanish
1628 1628 completely.
1629 1629 '''
1630 1630 # When using the same lock to commit and strip, the phasecache is left
1631 1631 # dirty after committing. Then when we strip, the repo is invalidated,
1632 1632 # causing those changes to disappear.
1633 1633 if '_phasecache' in vars(self):
1634 1634 self._phasecache.write()
1635 1635
1636 1636 @unfilteredmethod
1637 1637 def destroyed(self):
1638 1638 '''Inform the repository that nodes have been destroyed.
1639 1639 Intended for use by strip and rollback, so there's a common
1640 1640 place for anything that has to be done after destroying history.
1641 1641 '''
1642 1642 # When one tries to:
1643 1643 # 1) destroy nodes thus calling this method (e.g. strip)
1644 1644 # 2) use phasecache somewhere (e.g. commit)
1645 1645 #
1646 1646 # then 2) will fail because the phasecache contains nodes that were
1647 1647 # removed. We can either remove phasecache from the filecache,
1648 1648 # causing it to reload next time it is accessed, or simply filter
1649 1649 # the removed nodes now and write the updated cache.
1650 1650 self._phasecache.filterunknown(self)
1651 1651 self._phasecache.write()
1652 1652
1653 1653 # update the 'served' branch cache to help read only server process
1654 1654 # Thanks to branchcache collaboration this is done from the nearest
1655 1655 # filtered subset and it is expected to be fast.
1656 1656 branchmap.updatecache(self.filtered('served'))
1657 1657
1658 1658 # Ensure the persistent tag cache is updated. Doing it now
1659 1659 # means that the tag cache only has to worry about destroyed
1660 1660 # heads immediately after a strip/rollback. That in turn
1661 1661 # guarantees that "cachetip == currenttip" (comparing both rev
1662 1662 # and node) always means no nodes have been added or destroyed.
1663 1663
1664 1664 # XXX this is suboptimal when qrefresh'ing: we strip the current
1665 1665 # head, refresh the tag cache, then immediately add a new head.
1666 1666 # But I think doing it this way is necessary for the "instant
1667 1667 # tag cache retrieval" case to work.
1668 1668 self.invalidate()
1669 1669
1670 1670 def walk(self, match, node=None):
1671 1671 '''
1672 1672 walk recursively through the directory tree or a given
1673 1673 changeset, finding all files matched by the match
1674 1674 function
1675 1675 '''
1676 1676 return self[node].walk(match)
1677 1677
1678 1678 def status(self, node1='.', node2=None, match=None,
1679 1679 ignored=False, clean=False, unknown=False,
1680 1680 listsubrepos=False):
1681 1681 '''a convenience method that calls node1.status(node2)'''
1682 1682 return self[node1].status(node2, match, ignored, clean, unknown,
1683 1683 listsubrepos)
1684 1684
1685 1685 def heads(self, start=None):
1686 1686 heads = self.changelog.heads(start)
1687 1687 # sort the output in rev descending order
1688 1688 return sorted(heads, key=self.changelog.rev, reverse=True)
1689 1689
1690 1690 def branchheads(self, branch=None, start=None, closed=False):
1691 1691 '''return a (possibly filtered) list of heads for the given branch
1692 1692
1693 1693 Heads are returned in topological order, from newest to oldest.
1694 1694 If branch is None, use the dirstate branch.
1695 1695 If start is not None, return only heads reachable from start.
1696 1696 If closed is True, return heads that are marked as closed as well.
1697 1697 '''
1698 1698 if branch is None:
1699 1699 branch = self[None].branch()
1700 1700 branches = self.branchmap()
1701 1701 if branch not in branches:
1702 1702 return []
1703 1703 # the cache returns heads ordered lowest to highest
1704 1704 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1705 1705 if start is not None:
1706 1706 # filter out the heads that cannot be reached from startrev
1707 1707 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1708 1708 bheads = [h for h in bheads if h in fbheads]
1709 1709 return bheads
1710 1710
1711 1711 def branches(self, nodes):
1712 1712 if not nodes:
1713 1713 nodes = [self.changelog.tip()]
1714 1714 b = []
1715 1715 for n in nodes:
1716 1716 t = n
1717 1717 while True:
1718 1718 p = self.changelog.parents(n)
1719 1719 if p[1] != nullid or p[0] == nullid:
1720 1720 b.append((t, n, p[0], p[1]))
1721 1721 break
1722 1722 n = p[0]
1723 1723 return b
1724 1724
1725 1725 def between(self, pairs):
1726 1726 r = []
1727 1727
1728 1728 for top, bottom in pairs:
1729 1729 n, l, i = top, [], 0
1730 1730 f = 1
1731 1731
1732 1732 while n != bottom and n != nullid:
1733 1733 p = self.changelog.parents(n)[0]
1734 1734 if i == f:
1735 1735 l.append(n)
1736 1736 f = f * 2
1737 1737 n = p
1738 1738 i += 1
1739 1739
1740 1740 r.append(l)
1741 1741
1742 1742 return r
1743 1743
1744 1744 def checkpush(self, pushop):
1745 1745 """Extensions can override this function if additional checks have
1746 1746 to be performed before pushing, or call it if they override push
1747 1747 command.
1748 1748 """
1749 1749 pass
1750 1750
1751 1751 @unfilteredpropertycache
1752 1752 def prepushoutgoinghooks(self):
1753 1753 """Return util.hooks consists of "(repo, remote, outgoing)"
1754 1754 functions, which are called before pushing changesets.
1755 1755 """
1756 1756 return util.hooks()
1757 1757
1758 1758 def stream_in(self, remote, remotereqs):
1759 1759 # Save remote branchmap. We will use it later
1760 1760 # to speed up branchcache creation
1761 1761 rbranchmap = None
1762 1762 if remote.capable("branchmap"):
1763 1763 rbranchmap = remote.branchmap()
1764 1764
1765 1765 fp = remote.stream_out()
1766 1766 l = fp.readline()
1767 1767 try:
1768 1768 resp = int(l)
1769 1769 except ValueError:
1770 1770 raise error.ResponseError(
1771 1771 _('unexpected response from remote server:'), l)
1772 1772 if resp == 1:
1773 1773 raise util.Abort(_('operation forbidden by server'))
1774 1774 elif resp == 2:
1775 1775 raise util.Abort(_('locking the remote repository failed'))
1776 1776 elif resp != 0:
1777 1777 raise util.Abort(_('the server sent an unknown error code'))
1778 1778
1779 1779 self.applystreamclone(remotereqs, rbranchmap, fp)
1780 1780 return len(self.heads()) + 1
1781 1781
1782 1782 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1783 1783 """Apply stream clone data to this repository.
1784 1784
1785 1785 "remotereqs" is a set of requirements to handle the incoming data.
1786 1786 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1787 1787 can be None.
1788 1788 "fp" is a file object containing the raw stream data, suitable for
1789 1789 feeding into exchange.consumestreamclone.
1790 1790 """
1791 1791 lock = self.lock()
1792 1792 try:
1793 1793 exchange.consumestreamclone(self, fp)
1794 1794
1795 1795 # new requirements = old non-format requirements +
1796 1796 # new format-related remote requirements
1797 1797 # requirements from the streamed-in repository
1798 1798 self.requirements = remotereqs | (
1799 1799 self.requirements - self.supportedformats)
1800 1800 self._applyopenerreqs()
1801 1801 self._writerequirements()
1802 1802
1803 1803 if remotebranchmap:
1804 1804 rbheads = []
1805 1805 closed = []
1806 1806 for bheads in remotebranchmap.itervalues():
1807 1807 rbheads.extend(bheads)
1808 1808 for h in bheads:
1809 1809 r = self.changelog.rev(h)
1810 1810 b, c = self.changelog.branchinfo(r)
1811 1811 if c:
1812 1812 closed.append(h)
1813 1813
1814 1814 if rbheads:
1815 1815 rtiprev = max((int(self.changelog.rev(node))
1816 1816 for node in rbheads))
1817 1817 cache = branchmap.branchcache(remotebranchmap,
1818 1818 self[rtiprev].node(),
1819 1819 rtiprev,
1820 1820 closednodes=closed)
1821 1821 # Try to stick it as low as possible
1822 1822 # filter above served are unlikely to be fetch from a clone
1823 1823 for candidate in ('base', 'immutable', 'served'):
1824 1824 rview = self.filtered(candidate)
1825 1825 if cache.validfor(rview):
1826 1826 self._branchcaches[candidate] = cache
1827 1827 cache.write(rview)
1828 1828 break
1829 1829 self.invalidate()
1830 1830 finally:
1831 1831 lock.release()
1832 1832
1833 1833 def clone(self, remote, heads=[], stream=None):
1834 1834 '''clone remote repository.
1835 1835
1836 1836 keyword arguments:
1837 1837 heads: list of revs to clone (forces use of pull)
1838 1838 stream: use streaming clone if possible'''
1839 1839
1840 1840 # now, all clients that can request uncompressed clones can
1841 1841 # read repo formats supported by all servers that can serve
1842 1842 # them.
1843 1843
1844 1844 # if revlog format changes, client will have to check version
1845 1845 # and format flags on "stream" capability, and use
1846 1846 # uncompressed only if compatible.
1847 1847
1848 1848 if stream is None:
1849 1849 # if the server explicitly prefers to stream (for fast LANs)
1850 1850 stream = remote.capable('stream-preferred')
1851 1851
1852 1852 if stream and not heads:
1853 1853 # 'stream' means remote revlog format is revlogv1 only
1854 1854 if remote.capable('stream'):
1855 1855 self.stream_in(remote, set(('revlogv1',)))
1856 1856 else:
1857 1857 # otherwise, 'streamreqs' contains the remote revlog format
1858 1858 streamreqs = remote.capable('streamreqs')
1859 1859 if streamreqs:
1860 1860 streamreqs = set(streamreqs.split(','))
1861 1861 # if we support it, stream in and adjust our requirements
1862 1862 if not streamreqs - self.supportedformats:
1863 1863 self.stream_in(remote, streamreqs)
1864 1864
1865 1865 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1866 1866 try:
1867 1867 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1868 1868 ret = exchange.pull(self, remote, heads).cgresult
1869 1869 finally:
1870 1870 self.ui.restoreconfig(quiet)
1871 1871 return ret
1872 1872
1873 1873 def pushkey(self, namespace, key, old, new):
1874 1874 try:
1875 1875 tr = self.currenttransaction()
1876 1876 hookargs = {}
1877 1877 if tr is not None:
1878 1878 hookargs.update(tr.hookargs)
1879 1879 pending = lambda: tr.writepending() and self.root or ""
1880 1880 hookargs['pending'] = pending
1881 1881 hookargs['namespace'] = namespace
1882 1882 hookargs['key'] = key
1883 1883 hookargs['old'] = old
1884 1884 hookargs['new'] = new
1885 1885 self.hook('prepushkey', throw=True, **hookargs)
1886 1886 except error.HookAbort, exc:
1887 1887 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1888 1888 if exc.hint:
1889 1889 self.ui.write_err(_("(%s)\n") % exc.hint)
1890 1890 return False
1891 1891 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1892 1892 ret = pushkey.push(self, namespace, key, old, new)
1893 1893 def runhook():
1894 1894 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1895 1895 ret=ret)
1896 1896 self._afterlock(runhook)
1897 1897 return ret
1898 1898
1899 1899 def listkeys(self, namespace):
1900 1900 self.hook('prelistkeys', throw=True, namespace=namespace)
1901 1901 self.ui.debug('listing keys for "%s"\n' % namespace)
1902 1902 values = pushkey.list(self, namespace)
1903 1903 self.hook('listkeys', namespace=namespace, values=values)
1904 1904 return values
1905 1905
1906 1906 def debugwireargs(self, one, two, three=None, four=None, five=None):
1907 1907 '''used to test argument passing over the wire'''
1908 1908 return "%s %s %s %s %s" % (one, two, three, four, five)
1909 1909
1910 1910 def savecommitmessage(self, text):
1911 1911 fp = self.vfs('last-message.txt', 'wb')
1912 1912 try:
1913 1913 fp.write(text)
1914 1914 finally:
1915 1915 fp.close()
1916 1916 return self.pathto(fp.name[len(self.root) + 1:])
1917 1917
1918 1918 # used to avoid circular references so destructors work
1919 1919 def aftertrans(files):
1920 1920 renamefiles = [tuple(t) for t in files]
1921 1921 def a():
1922 1922 for vfs, src, dest in renamefiles:
1923 1923 try:
1924 1924 vfs.rename(src, dest)
1925 1925 except OSError: # journal file does not yet exist
1926 1926 pass
1927 1927 return a
1928 1928
1929 1929 def undoname(fn):
1930 1930 base, name = os.path.split(fn)
1931 1931 assert name.startswith('journal')
1932 1932 return os.path.join(base, name.replace('journal', 'undo', 1))
1933 1933
1934 1934 def instance(ui, path, create):
1935 1935 return localrepository(ui, util.urllocalpath(path), create)
1936 1936
1937 1937 def islocal(path):
1938 1938 return True
General Comments 0
You need to be logged in to leave comments. Login now