##// END OF EJS Templates
localrepo: eliminate requirements class variable (API)...
Drew Gottlieb -
r24913:e3a928bd default
parent child Browse files
Show More
@@ -1,1972 +1,1971 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception, exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced, exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return changegroup.addchangegroup(self._repo, cg, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
196 196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 197 'dotencode'))
198 198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
199 requirements = ['revlogv1']
200 199 filtername = None
201 200
202 201 # a list of (ui, featureset) functions.
203 202 # only functions defined in module of enabled extensions are invoked
204 203 featuresetupfuncs = set()
205 204
206 205 def _baserequirements(self, create):
207 return self.requirements[:]
206 return ['revlogv1']
208 207
209 208 def __init__(self, baseui, path=None, create=False):
210 209 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 210 self.wopener = self.wvfs
212 211 self.root = self.wvfs.base
213 212 self.path = self.wvfs.join(".hg")
214 213 self.origroot = path
215 214 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 215 self.vfs = scmutil.vfs(self.path)
217 216 self.opener = self.vfs
218 217 self.baseui = baseui
219 218 self.ui = baseui.copy()
220 219 self.ui.copy = baseui.copy # prevent copying repo configuration
221 220 # A list of callback to shape the phase if no data were found.
222 221 # Callback are in the form: func(repo, roots) --> processed root.
223 222 # This list it to be filled by extension during repo setup
224 223 self._phasedefaults = []
225 224 try:
226 225 self.ui.readconfig(self.join("hgrc"), self.root)
227 226 extensions.loadall(self.ui)
228 227 except IOError:
229 228 pass
230 229
231 230 if self.featuresetupfuncs:
232 231 self.supported = set(self._basesupported) # use private copy
233 232 extmods = set(m.__name__ for n, m
234 233 in extensions.extensions(self.ui))
235 234 for setupfunc in self.featuresetupfuncs:
236 235 if setupfunc.__module__ in extmods:
237 236 setupfunc(self.ui, self.supported)
238 237 else:
239 238 self.supported = self._basesupported
240 239
241 240 if not self.vfs.isdir():
242 241 if create:
243 242 if not self.wvfs.exists():
244 243 self.wvfs.makedirs()
245 244 self.vfs.makedir(notindexed=True)
246 245 requirements = self._baserequirements(create)
247 246 if self.ui.configbool('format', 'usestore', True):
248 247 self.vfs.mkdir("store")
249 248 requirements.append("store")
250 249 if self.ui.configbool('format', 'usefncache', True):
251 250 requirements.append("fncache")
252 251 if self.ui.configbool('format', 'dotencode', True):
253 252 requirements.append('dotencode')
254 253 # create an invalid changelog
255 254 self.vfs.append(
256 255 "00changelog.i",
257 256 '\0\0\0\2' # represents revlogv2
258 257 ' dummy changelog to prevent using the old repo layout'
259 258 )
260 259 if self.ui.configbool('format', 'generaldelta', False):
261 260 requirements.append("generaldelta")
262 261 if self.ui.configbool('experimental', 'manifestv2', False):
263 262 requirements.append("manifestv2")
264 263 requirements = set(requirements)
265 264 else:
266 265 raise error.RepoError(_("repository %s not found") % path)
267 266 elif create:
268 267 raise error.RepoError(_("repository %s already exists") % path)
269 268 else:
270 269 try:
271 270 requirements = scmutil.readrequires(self.vfs, self.supported)
272 271 except IOError, inst:
273 272 if inst.errno != errno.ENOENT:
274 273 raise
275 274 requirements = set()
276 275
277 276 self.sharedpath = self.path
278 277 try:
279 278 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
280 279 realpath=True)
281 280 s = vfs.base
282 281 if not vfs.exists():
283 282 raise error.RepoError(
284 283 _('.hg/sharedpath points to nonexistent directory %s') % s)
285 284 self.sharedpath = s
286 285 except IOError, inst:
287 286 if inst.errno != errno.ENOENT:
288 287 raise
289 288
290 289 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
291 290 self.spath = self.store.path
292 291 self.svfs = self.store.vfs
293 292 self.sopener = self.svfs
294 293 self.sjoin = self.store.join
295 294 self.vfs.createmode = self.store.createmode
296 295 self._applyrequirements(requirements)
297 296 if create:
298 297 self._writerequirements()
299 298
300 299
301 300 self._branchcaches = {}
302 301 self._revbranchcache = None
303 302 self.filterpats = {}
304 303 self._datafilters = {}
305 304 self._transref = self._lockref = self._wlockref = None
306 305
307 306 # A cache for various files under .hg/ that tracks file changes,
308 307 # (used by the filecache decorator)
309 308 #
310 309 # Maps a property name to its util.filecacheentry
311 310 self._filecache = {}
312 311
313 312 # hold sets of revision to be filtered
314 313 # should be cleared when something might have changed the filter value:
315 314 # - new changesets,
316 315 # - phase change,
317 316 # - new obsolescence marker,
318 317 # - working directory parent change,
319 318 # - bookmark changes
320 319 self.filteredrevcache = {}
321 320
322 321 # generic mapping between names and nodes
323 322 self.names = namespaces.namespaces()
324 323
325 324 def close(self):
326 325 self._writecaches()
327 326
328 327 def _writecaches(self):
329 328 if self._revbranchcache:
330 329 self._revbranchcache.write()
331 330
332 331 def _restrictcapabilities(self, caps):
333 332 if self.ui.configbool('experimental', 'bundle2-advertise', True):
334 333 caps = set(caps)
335 334 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
336 335 caps.add('bundle2=' + urllib.quote(capsblob))
337 336 return caps
338 337
339 338 def _applyrequirements(self, requirements):
340 339 self.requirements = requirements
341 340 self.svfs.options = dict((r, 1) for r in requirements
342 341 if r in self.openerreqs)
343 342 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
344 343 if chunkcachesize is not None:
345 344 self.svfs.options['chunkcachesize'] = chunkcachesize
346 345 maxchainlen = self.ui.configint('format', 'maxchainlen')
347 346 if maxchainlen is not None:
348 347 self.svfs.options['maxchainlen'] = maxchainlen
349 348 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
350 349 if manifestcachesize is not None:
351 350 self.svfs.options['manifestcachesize'] = manifestcachesize
352 351 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
353 352 if usetreemanifest is not None:
354 353 self.svfs.options['usetreemanifest'] = usetreemanifest
355 354
356 355 def _writerequirements(self):
357 356 reqfile = self.vfs("requires", "w")
358 357 for r in sorted(self.requirements):
359 358 reqfile.write("%s\n" % r)
360 359 reqfile.close()
361 360
362 361 def _checknested(self, path):
363 362 """Determine if path is a legal nested repository."""
364 363 if not path.startswith(self.root):
365 364 return False
366 365 subpath = path[len(self.root) + 1:]
367 366 normsubpath = util.pconvert(subpath)
368 367
369 368 # XXX: Checking against the current working copy is wrong in
370 369 # the sense that it can reject things like
371 370 #
372 371 # $ hg cat -r 10 sub/x.txt
373 372 #
374 373 # if sub/ is no longer a subrepository in the working copy
375 374 # parent revision.
376 375 #
377 376 # However, it can of course also allow things that would have
378 377 # been rejected before, such as the above cat command if sub/
379 378 # is a subrepository now, but was a normal directory before.
380 379 # The old path auditor would have rejected by mistake since it
381 380 # panics when it sees sub/.hg/.
382 381 #
383 382 # All in all, checking against the working copy seems sensible
384 383 # since we want to prevent access to nested repositories on
385 384 # the filesystem *now*.
386 385 ctx = self[None]
387 386 parts = util.splitpath(subpath)
388 387 while parts:
389 388 prefix = '/'.join(parts)
390 389 if prefix in ctx.substate:
391 390 if prefix == normsubpath:
392 391 return True
393 392 else:
394 393 sub = ctx.sub(prefix)
395 394 return sub.checknested(subpath[len(prefix) + 1:])
396 395 else:
397 396 parts.pop()
398 397 return False
399 398
400 399 def peer(self):
401 400 return localpeer(self) # not cached to avoid reference cycle
402 401
403 402 def unfiltered(self):
404 403 """Return unfiltered version of the repository
405 404
406 405 Intended to be overwritten by filtered repo."""
407 406 return self
408 407
409 408 def filtered(self, name):
410 409 """Return a filtered version of a repository"""
411 410 # build a new class with the mixin and the current class
412 411 # (possibly subclass of the repo)
413 412 class proxycls(repoview.repoview, self.unfiltered().__class__):
414 413 pass
415 414 return proxycls(self, name)
416 415
417 416 @repofilecache('bookmarks')
418 417 def _bookmarks(self):
419 418 return bookmarks.bmstore(self)
420 419
421 420 @repofilecache('bookmarks.current')
422 421 def _bookmarkcurrent(self):
423 422 return bookmarks.readcurrent(self)
424 423
425 424 def bookmarkheads(self, bookmark):
426 425 name = bookmark.split('@', 1)[0]
427 426 heads = []
428 427 for mark, n in self._bookmarks.iteritems():
429 428 if mark.split('@', 1)[0] == name:
430 429 heads.append(n)
431 430 return heads
432 431
433 432 @storecache('phaseroots')
434 433 def _phasecache(self):
435 434 return phases.phasecache(self, self._phasedefaults)
436 435
437 436 @storecache('obsstore')
438 437 def obsstore(self):
439 438 # read default format for new obsstore.
440 439 defaultformat = self.ui.configint('format', 'obsstore-version', None)
441 440 # rely on obsstore class default when possible.
442 441 kwargs = {}
443 442 if defaultformat is not None:
444 443 kwargs['defaultformat'] = defaultformat
445 444 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
446 445 store = obsolete.obsstore(self.svfs, readonly=readonly,
447 446 **kwargs)
448 447 if store and readonly:
449 448 self.ui.warn(
450 449 _('obsolete feature not enabled but %i markers found!\n')
451 450 % len(list(store)))
452 451 return store
453 452
454 453 @storecache('00changelog.i')
455 454 def changelog(self):
456 455 c = changelog.changelog(self.svfs)
457 456 if 'HG_PENDING' in os.environ:
458 457 p = os.environ['HG_PENDING']
459 458 if p.startswith(self.root):
460 459 c.readpending('00changelog.i.a')
461 460 return c
462 461
463 462 @storecache('00manifest.i')
464 463 def manifest(self):
465 464 return manifest.manifest(self.svfs)
466 465
467 466 @repofilecache('dirstate')
468 467 def dirstate(self):
469 468 warned = [0]
470 469 def validate(node):
471 470 try:
472 471 self.changelog.rev(node)
473 472 return node
474 473 except error.LookupError:
475 474 if not warned[0]:
476 475 warned[0] = True
477 476 self.ui.warn(_("warning: ignoring unknown"
478 477 " working parent %s!\n") % short(node))
479 478 return nullid
480 479
481 480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
482 481
483 482 def __getitem__(self, changeid):
484 483 if changeid is None:
485 484 return context.workingctx(self)
486 485 if isinstance(changeid, slice):
487 486 return [context.changectx(self, i)
488 487 for i in xrange(*changeid.indices(len(self)))
489 488 if i not in self.changelog.filteredrevs]
490 489 return context.changectx(self, changeid)
491 490
492 491 def __contains__(self, changeid):
493 492 try:
494 493 self[changeid]
495 494 return True
496 495 except error.RepoLookupError:
497 496 return False
498 497
499 498 def __nonzero__(self):
500 499 return True
501 500
502 501 def __len__(self):
503 502 return len(self.changelog)
504 503
505 504 def __iter__(self):
506 505 return iter(self.changelog)
507 506
508 507 def revs(self, expr, *args):
509 508 '''Return a list of revisions matching the given revset'''
510 509 expr = revset.formatspec(expr, *args)
511 510 m = revset.match(None, expr)
512 511 return m(self)
513 512
514 513 def set(self, expr, *args):
515 514 '''
516 515 Yield a context for each matching revision, after doing arg
517 516 replacement via revset.formatspec
518 517 '''
519 518 for r in self.revs(expr, *args):
520 519 yield self[r]
521 520
522 521 def url(self):
523 522 return 'file:' + self.root
524 523
525 524 def hook(self, name, throw=False, **args):
526 525 """Call a hook, passing this repo instance.
527 526
528 527 This a convenience method to aid invoking hooks. Extensions likely
529 528 won't call this unless they have registered a custom hook or are
530 529 replacing code that is expected to call a hook.
531 530 """
532 531 return hook.hook(self.ui, self, name, throw, **args)
533 532
534 533 @unfilteredmethod
535 534 def _tag(self, names, node, message, local, user, date, extra={},
536 535 editor=False):
537 536 if isinstance(names, str):
538 537 names = (names,)
539 538
540 539 branches = self.branchmap()
541 540 for name in names:
542 541 self.hook('pretag', throw=True, node=hex(node), tag=name,
543 542 local=local)
544 543 if name in branches:
545 544 self.ui.warn(_("warning: tag %s conflicts with existing"
546 545 " branch name\n") % name)
547 546
548 547 def writetags(fp, names, munge, prevtags):
549 548 fp.seek(0, 2)
550 549 if prevtags and prevtags[-1] != '\n':
551 550 fp.write('\n')
552 551 for name in names:
553 552 if munge:
554 553 m = munge(name)
555 554 else:
556 555 m = name
557 556
558 557 if (self._tagscache.tagtypes and
559 558 name in self._tagscache.tagtypes):
560 559 old = self.tags().get(name, nullid)
561 560 fp.write('%s %s\n' % (hex(old), m))
562 561 fp.write('%s %s\n' % (hex(node), m))
563 562 fp.close()
564 563
565 564 prevtags = ''
566 565 if local:
567 566 try:
568 567 fp = self.vfs('localtags', 'r+')
569 568 except IOError:
570 569 fp = self.vfs('localtags', 'a')
571 570 else:
572 571 prevtags = fp.read()
573 572
574 573 # local tags are stored in the current charset
575 574 writetags(fp, names, None, prevtags)
576 575 for name in names:
577 576 self.hook('tag', node=hex(node), tag=name, local=local)
578 577 return
579 578
580 579 try:
581 580 fp = self.wfile('.hgtags', 'rb+')
582 581 except IOError, e:
583 582 if e.errno != errno.ENOENT:
584 583 raise
585 584 fp = self.wfile('.hgtags', 'ab')
586 585 else:
587 586 prevtags = fp.read()
588 587
589 588 # committed tags are stored in UTF-8
590 589 writetags(fp, names, encoding.fromlocal, prevtags)
591 590
592 591 fp.close()
593 592
594 593 self.invalidatecaches()
595 594
596 595 if '.hgtags' not in self.dirstate:
597 596 self[None].add(['.hgtags'])
598 597
599 598 m = matchmod.exact(self.root, '', ['.hgtags'])
600 599 tagnode = self.commit(message, user, date, extra=extra, match=m,
601 600 editor=editor)
602 601
603 602 for name in names:
604 603 self.hook('tag', node=hex(node), tag=name, local=local)
605 604
606 605 return tagnode
607 606
608 607 def tag(self, names, node, message, local, user, date, editor=False):
609 608 '''tag a revision with one or more symbolic names.
610 609
611 610 names is a list of strings or, when adding a single tag, names may be a
612 611 string.
613 612
614 613 if local is True, the tags are stored in a per-repository file.
615 614 otherwise, they are stored in the .hgtags file, and a new
616 615 changeset is committed with the change.
617 616
618 617 keyword arguments:
619 618
620 619 local: whether to store tags in non-version-controlled file
621 620 (default False)
622 621
623 622 message: commit message to use if committing
624 623
625 624 user: name of user to use if committing
626 625
627 626 date: date tuple to use if committing'''
628 627
629 628 if not local:
630 629 m = matchmod.exact(self.root, '', ['.hgtags'])
631 630 if util.any(self.status(match=m, unknown=True, ignored=True)):
632 631 raise util.Abort(_('working copy of .hgtags is changed'),
633 632 hint=_('please commit .hgtags manually'))
634 633
635 634 self.tags() # instantiate the cache
636 635 self._tag(names, node, message, local, user, date, editor=editor)
637 636
638 637 @filteredpropertycache
639 638 def _tagscache(self):
640 639 '''Returns a tagscache object that contains various tags related
641 640 caches.'''
642 641
643 642 # This simplifies its cache management by having one decorated
644 643 # function (this one) and the rest simply fetch things from it.
645 644 class tagscache(object):
646 645 def __init__(self):
647 646 # These two define the set of tags for this repository. tags
648 647 # maps tag name to node; tagtypes maps tag name to 'global' or
649 648 # 'local'. (Global tags are defined by .hgtags across all
650 649 # heads, and local tags are defined in .hg/localtags.)
651 650 # They constitute the in-memory cache of tags.
652 651 self.tags = self.tagtypes = None
653 652
654 653 self.nodetagscache = self.tagslist = None
655 654
656 655 cache = tagscache()
657 656 cache.tags, cache.tagtypes = self._findtags()
658 657
659 658 return cache
660 659
661 660 def tags(self):
662 661 '''return a mapping of tag to node'''
663 662 t = {}
664 663 if self.changelog.filteredrevs:
665 664 tags, tt = self._findtags()
666 665 else:
667 666 tags = self._tagscache.tags
668 667 for k, v in tags.iteritems():
669 668 try:
670 669 # ignore tags to unknown nodes
671 670 self.changelog.rev(v)
672 671 t[k] = v
673 672 except (error.LookupError, ValueError):
674 673 pass
675 674 return t
676 675
677 676 def _findtags(self):
678 677 '''Do the hard work of finding tags. Return a pair of dicts
679 678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
680 679 maps tag name to a string like \'global\' or \'local\'.
681 680 Subclasses or extensions are free to add their own tags, but
682 681 should be aware that the returned dicts will be retained for the
683 682 duration of the localrepo object.'''
684 683
685 684 # XXX what tagtype should subclasses/extensions use? Currently
686 685 # mq and bookmarks add tags, but do not set the tagtype at all.
687 686 # Should each extension invent its own tag type? Should there
688 687 # be one tagtype for all such "virtual" tags? Or is the status
689 688 # quo fine?
690 689
691 690 alltags = {} # map tag name to (node, hist)
692 691 tagtypes = {}
693 692
694 693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
695 694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
696 695
697 696 # Build the return dicts. Have to re-encode tag names because
698 697 # the tags module always uses UTF-8 (in order not to lose info
699 698 # writing to the cache), but the rest of Mercurial wants them in
700 699 # local encoding.
701 700 tags = {}
702 701 for (name, (node, hist)) in alltags.iteritems():
703 702 if node != nullid:
704 703 tags[encoding.tolocal(name)] = node
705 704 tags['tip'] = self.changelog.tip()
706 705 tagtypes = dict([(encoding.tolocal(name), value)
707 706 for (name, value) in tagtypes.iteritems()])
708 707 return (tags, tagtypes)
709 708
710 709 def tagtype(self, tagname):
711 710 '''
712 711 return the type of the given tag. result can be:
713 712
714 713 'local' : a local tag
715 714 'global' : a global tag
716 715 None : tag does not exist
717 716 '''
718 717
719 718 return self._tagscache.tagtypes.get(tagname)
720 719
721 720 def tagslist(self):
722 721 '''return a list of tags ordered by revision'''
723 722 if not self._tagscache.tagslist:
724 723 l = []
725 724 for t, n in self.tags().iteritems():
726 725 l.append((self.changelog.rev(n), t, n))
727 726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
728 727
729 728 return self._tagscache.tagslist
730 729
731 730 def nodetags(self, node):
732 731 '''return the tags associated with a node'''
733 732 if not self._tagscache.nodetagscache:
734 733 nodetagscache = {}
735 734 for t, n in self._tagscache.tags.iteritems():
736 735 nodetagscache.setdefault(n, []).append(t)
737 736 for tags in nodetagscache.itervalues():
738 737 tags.sort()
739 738 self._tagscache.nodetagscache = nodetagscache
740 739 return self._tagscache.nodetagscache.get(node, [])
741 740
742 741 def nodebookmarks(self, node):
743 742 marks = []
744 743 for bookmark, n in self._bookmarks.iteritems():
745 744 if n == node:
746 745 marks.append(bookmark)
747 746 return sorted(marks)
748 747
749 748 def branchmap(self):
750 749 '''returns a dictionary {branch: [branchheads]} with branchheads
751 750 ordered by increasing revision number'''
752 751 branchmap.updatecache(self)
753 752 return self._branchcaches[self.filtername]
754 753
755 754 @unfilteredmethod
756 755 def revbranchcache(self):
757 756 if not self._revbranchcache:
758 757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
759 758 return self._revbranchcache
760 759
761 760 def branchtip(self, branch, ignoremissing=False):
762 761 '''return the tip node for a given branch
763 762
764 763 If ignoremissing is True, then this method will not raise an error.
765 764 This is helpful for callers that only expect None for a missing branch
766 765 (e.g. namespace).
767 766
768 767 '''
769 768 try:
770 769 return self.branchmap().branchtip(branch)
771 770 except KeyError:
772 771 if not ignoremissing:
773 772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
774 773 else:
775 774 pass
776 775
777 776 def lookup(self, key):
778 777 return self[key].node()
779 778
780 779 def lookupbranch(self, key, remote=None):
781 780 repo = remote or self
782 781 if key in repo.branchmap():
783 782 return key
784 783
785 784 repo = (remote and remote.local()) and remote or self
786 785 return repo[key].branch()
787 786
788 787 def known(self, nodes):
789 788 nm = self.changelog.nodemap
790 789 pc = self._phasecache
791 790 result = []
792 791 for n in nodes:
793 792 r = nm.get(n)
794 793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
795 794 result.append(resp)
796 795 return result
797 796
798 797 def local(self):
799 798 return self
800 799
801 800 def cancopy(self):
802 801 # so statichttprepo's override of local() works
803 802 if not self.local():
804 803 return False
805 804 if not self.ui.configbool('phases', 'publish', True):
806 805 return True
807 806 # if publishing we can't copy if there is filtered content
808 807 return not self.filtered('visible').changelog.filteredrevs
809 808
810 809 def shared(self):
811 810 '''the type of shared repository (None if not shared)'''
812 811 if self.sharedpath != self.path:
813 812 return 'store'
814 813 return None
815 814
816 815 def join(self, f, *insidef):
817 816 return self.vfs.join(os.path.join(f, *insidef))
818 817
819 818 def wjoin(self, f, *insidef):
820 819 return self.vfs.reljoin(self.root, f, *insidef)
821 820
822 821 def file(self, f):
823 822 if f[0] == '/':
824 823 f = f[1:]
825 824 return filelog.filelog(self.svfs, f)
826 825
827 826 def changectx(self, changeid):
828 827 return self[changeid]
829 828
830 829 def parents(self, changeid=None):
831 830 '''get list of changectxs for parents of changeid'''
832 831 return self[changeid].parents()
833 832
834 833 def setparents(self, p1, p2=nullid):
835 834 self.dirstate.beginparentchange()
836 835 copies = self.dirstate.setparents(p1, p2)
837 836 pctx = self[p1]
838 837 if copies:
839 838 # Adjust copy records, the dirstate cannot do it, it
840 839 # requires access to parents manifests. Preserve them
841 840 # only for entries added to first parent.
842 841 for f in copies:
843 842 if f not in pctx and copies[f] in pctx:
844 843 self.dirstate.copy(copies[f], f)
845 844 if p2 == nullid:
846 845 for f, s in sorted(self.dirstate.copies().items()):
847 846 if f not in pctx and s not in pctx:
848 847 self.dirstate.copy(None, f)
849 848 self.dirstate.endparentchange()
850 849
851 850 def filectx(self, path, changeid=None, fileid=None):
852 851 """changeid can be a changeset revision, node, or tag.
853 852 fileid can be a file revision or node."""
854 853 return context.filectx(self, path, changeid, fileid)
855 854
856 855 def getcwd(self):
857 856 return self.dirstate.getcwd()
858 857
859 858 def pathto(self, f, cwd=None):
860 859 return self.dirstate.pathto(f, cwd)
861 860
862 861 def wfile(self, f, mode='r'):
863 862 return self.wvfs(f, mode)
864 863
865 864 def _link(self, f):
866 865 return self.wvfs.islink(f)
867 866
868 867 def _loadfilter(self, filter):
869 868 if filter not in self.filterpats:
870 869 l = []
871 870 for pat, cmd in self.ui.configitems(filter):
872 871 if cmd == '!':
873 872 continue
874 873 mf = matchmod.match(self.root, '', [pat])
875 874 fn = None
876 875 params = cmd
877 876 for name, filterfn in self._datafilters.iteritems():
878 877 if cmd.startswith(name):
879 878 fn = filterfn
880 879 params = cmd[len(name):].lstrip()
881 880 break
882 881 if not fn:
883 882 fn = lambda s, c, **kwargs: util.filter(s, c)
884 883 # Wrap old filters not supporting keyword arguments
885 884 if not inspect.getargspec(fn)[2]:
886 885 oldfn = fn
887 886 fn = lambda s, c, **kwargs: oldfn(s, c)
888 887 l.append((mf, fn, params))
889 888 self.filterpats[filter] = l
890 889 return self.filterpats[filter]
891 890
892 891 def _filter(self, filterpats, filename, data):
893 892 for mf, fn, cmd in filterpats:
894 893 if mf(filename):
895 894 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
896 895 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
897 896 break
898 897
899 898 return data
900 899
901 900 @unfilteredpropertycache
902 901 def _encodefilterpats(self):
903 902 return self._loadfilter('encode')
904 903
905 904 @unfilteredpropertycache
906 905 def _decodefilterpats(self):
907 906 return self._loadfilter('decode')
908 907
909 908 def adddatafilter(self, name, filter):
910 909 self._datafilters[name] = filter
911 910
912 911 def wread(self, filename):
913 912 if self._link(filename):
914 913 data = self.wvfs.readlink(filename)
915 914 else:
916 915 data = self.wvfs.read(filename)
917 916 return self._filter(self._encodefilterpats, filename, data)
918 917
919 918 def wwrite(self, filename, data, flags):
920 919 """write ``data`` into ``filename`` in the working directory
921 920
922 921 This returns length of written (maybe decoded) data.
923 922 """
924 923 data = self._filter(self._decodefilterpats, filename, data)
925 924 if 'l' in flags:
926 925 self.wvfs.symlink(data, filename)
927 926 else:
928 927 self.wvfs.write(filename, data)
929 928 if 'x' in flags:
930 929 self.wvfs.setflags(filename, False, True)
931 930 return len(data)
932 931
933 932 def wwritedata(self, filename, data):
934 933 return self._filter(self._decodefilterpats, filename, data)
935 934
936 935 def currenttransaction(self):
937 936 """return the current transaction or None if non exists"""
938 937 if self._transref:
939 938 tr = self._transref()
940 939 else:
941 940 tr = None
942 941
943 942 if tr and tr.running():
944 943 return tr
945 944 return None
946 945
947 946 def transaction(self, desc, report=None):
948 947 if (self.ui.configbool('devel', 'all')
949 948 or self.ui.configbool('devel', 'check-locks')):
950 949 l = self._lockref and self._lockref()
951 950 if l is None or not l.held:
952 951 scmutil.develwarn(self.ui, 'transaction with no lock')
953 952 tr = self.currenttransaction()
954 953 if tr is not None:
955 954 return tr.nest()
956 955
957 956 # abort here if the journal already exists
958 957 if self.svfs.exists("journal"):
959 958 raise error.RepoError(
960 959 _("abandoned transaction found"),
961 960 hint=_("run 'hg recover' to clean up transaction"))
962 961
963 962 self.hook('pretxnopen', throw=True, txnname=desc)
964 963
965 964 self._writejournal(desc)
966 965 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
967 966 if report:
968 967 rp = report
969 968 else:
970 969 rp = self.ui.warn
971 970 vfsmap = {'plain': self.vfs} # root of .hg/
972 971 # we must avoid cyclic reference between repo and transaction.
973 972 reporef = weakref.ref(self)
974 973 def validate(tr):
975 974 """will run pre-closing hooks"""
976 975 pending = lambda: tr.writepending() and self.root or ""
977 976 reporef().hook('pretxnclose', throw=True, pending=pending,
978 977 xnname=desc, **tr.hookargs)
979 978
980 979 tr = transaction.transaction(rp, self.sopener, vfsmap,
981 980 "journal",
982 981 "undo",
983 982 aftertrans(renames),
984 983 self.store.createmode,
985 984 validator=validate)
986 985
987 986 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
988 987 tr.hookargs['TXNID'] = trid
989 988 # note: writing the fncache only during finalize mean that the file is
990 989 # outdated when running hooks. As fncache is used for streaming clone,
991 990 # this is not expected to break anything that happen during the hooks.
992 991 tr.addfinalize('flush-fncache', self.store.write)
993 992 def txnclosehook(tr2):
994 993 """To be run if transaction is successful, will schedule a hook run
995 994 """
996 995 def hook():
997 996 reporef().hook('txnclose', throw=False, txnname=desc,
998 997 **tr2.hookargs)
999 998 reporef()._afterlock(hook)
1000 999 tr.addfinalize('txnclose-hook', txnclosehook)
1001 1000 def txnaborthook(tr2):
1002 1001 """To be run if transaction is aborted
1003 1002 """
1004 1003 reporef().hook('txnabort', throw=False, txnname=desc,
1005 1004 **tr2.hookargs)
1006 1005 tr.addabort('txnabort-hook', txnaborthook)
1007 1006 self._transref = weakref.ref(tr)
1008 1007 return tr
1009 1008
1010 1009 def _journalfiles(self):
1011 1010 return ((self.svfs, 'journal'),
1012 1011 (self.vfs, 'journal.dirstate'),
1013 1012 (self.vfs, 'journal.branch'),
1014 1013 (self.vfs, 'journal.desc'),
1015 1014 (self.vfs, 'journal.bookmarks'),
1016 1015 (self.svfs, 'journal.phaseroots'))
1017 1016
1018 1017 def undofiles(self):
1019 1018 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1020 1019
1021 1020 def _writejournal(self, desc):
1022 1021 self.vfs.write("journal.dirstate",
1023 1022 self.vfs.tryread("dirstate"))
1024 1023 self.vfs.write("journal.branch",
1025 1024 encoding.fromlocal(self.dirstate.branch()))
1026 1025 self.vfs.write("journal.desc",
1027 1026 "%d\n%s\n" % (len(self), desc))
1028 1027 self.vfs.write("journal.bookmarks",
1029 1028 self.vfs.tryread("bookmarks"))
1030 1029 self.svfs.write("journal.phaseroots",
1031 1030 self.svfs.tryread("phaseroots"))
1032 1031
1033 1032 def recover(self):
1034 1033 lock = self.lock()
1035 1034 try:
1036 1035 if self.svfs.exists("journal"):
1037 1036 self.ui.status(_("rolling back interrupted transaction\n"))
1038 1037 vfsmap = {'': self.svfs,
1039 1038 'plain': self.vfs,}
1040 1039 transaction.rollback(self.svfs, vfsmap, "journal",
1041 1040 self.ui.warn)
1042 1041 self.invalidate()
1043 1042 return True
1044 1043 else:
1045 1044 self.ui.warn(_("no interrupted transaction available\n"))
1046 1045 return False
1047 1046 finally:
1048 1047 lock.release()
1049 1048
1050 1049 def rollback(self, dryrun=False, force=False):
1051 1050 wlock = lock = None
1052 1051 try:
1053 1052 wlock = self.wlock()
1054 1053 lock = self.lock()
1055 1054 if self.svfs.exists("undo"):
1056 1055 return self._rollback(dryrun, force)
1057 1056 else:
1058 1057 self.ui.warn(_("no rollback information available\n"))
1059 1058 return 1
1060 1059 finally:
1061 1060 release(lock, wlock)
1062 1061
1063 1062 @unfilteredmethod # Until we get smarter cache management
1064 1063 def _rollback(self, dryrun, force):
1065 1064 ui = self.ui
1066 1065 try:
1067 1066 args = self.vfs.read('undo.desc').splitlines()
1068 1067 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1069 1068 if len(args) >= 3:
1070 1069 detail = args[2]
1071 1070 oldtip = oldlen - 1
1072 1071
1073 1072 if detail and ui.verbose:
1074 1073 msg = (_('repository tip rolled back to revision %s'
1075 1074 ' (undo %s: %s)\n')
1076 1075 % (oldtip, desc, detail))
1077 1076 else:
1078 1077 msg = (_('repository tip rolled back to revision %s'
1079 1078 ' (undo %s)\n')
1080 1079 % (oldtip, desc))
1081 1080 except IOError:
1082 1081 msg = _('rolling back unknown transaction\n')
1083 1082 desc = None
1084 1083
1085 1084 if not force and self['.'] != self['tip'] and desc == 'commit':
1086 1085 raise util.Abort(
1087 1086 _('rollback of last commit while not checked out '
1088 1087 'may lose data'), hint=_('use -f to force'))
1089 1088
1090 1089 ui.status(msg)
1091 1090 if dryrun:
1092 1091 return 0
1093 1092
1094 1093 parents = self.dirstate.parents()
1095 1094 self.destroying()
1096 1095 vfsmap = {'plain': self.vfs, '': self.svfs}
1097 1096 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1098 1097 if self.vfs.exists('undo.bookmarks'):
1099 1098 self.vfs.rename('undo.bookmarks', 'bookmarks')
1100 1099 if self.svfs.exists('undo.phaseroots'):
1101 1100 self.svfs.rename('undo.phaseroots', 'phaseroots')
1102 1101 self.invalidate()
1103 1102
1104 1103 parentgone = (parents[0] not in self.changelog.nodemap or
1105 1104 parents[1] not in self.changelog.nodemap)
1106 1105 if parentgone:
1107 1106 self.vfs.rename('undo.dirstate', 'dirstate')
1108 1107 try:
1109 1108 branch = self.vfs.read('undo.branch')
1110 1109 self.dirstate.setbranch(encoding.tolocal(branch))
1111 1110 except IOError:
1112 1111 ui.warn(_('named branch could not be reset: '
1113 1112 'current branch is still \'%s\'\n')
1114 1113 % self.dirstate.branch())
1115 1114
1116 1115 self.dirstate.invalidate()
1117 1116 parents = tuple([p.rev() for p in self.parents()])
1118 1117 if len(parents) > 1:
1119 1118 ui.status(_('working directory now based on '
1120 1119 'revisions %d and %d\n') % parents)
1121 1120 else:
1122 1121 ui.status(_('working directory now based on '
1123 1122 'revision %d\n') % parents)
1124 1123 ms = mergemod.mergestate(self)
1125 1124 ms.reset(self['.'].node())
1126 1125
1127 1126 # TODO: if we know which new heads may result from this rollback, pass
1128 1127 # them to destroy(), which will prevent the branchhead cache from being
1129 1128 # invalidated.
1130 1129 self.destroyed()
1131 1130 return 0
1132 1131
1133 1132 def invalidatecaches(self):
1134 1133
1135 1134 if '_tagscache' in vars(self):
1136 1135 # can't use delattr on proxy
1137 1136 del self.__dict__['_tagscache']
1138 1137
1139 1138 self.unfiltered()._branchcaches.clear()
1140 1139 self.invalidatevolatilesets()
1141 1140
1142 1141 def invalidatevolatilesets(self):
1143 1142 self.filteredrevcache.clear()
1144 1143 obsolete.clearobscaches(self)
1145 1144
1146 1145 def invalidatedirstate(self):
1147 1146 '''Invalidates the dirstate, causing the next call to dirstate
1148 1147 to check if it was modified since the last time it was read,
1149 1148 rereading it if it has.
1150 1149
1151 1150 This is different to dirstate.invalidate() that it doesn't always
1152 1151 rereads the dirstate. Use dirstate.invalidate() if you want to
1153 1152 explicitly read the dirstate again (i.e. restoring it to a previous
1154 1153 known good state).'''
1155 1154 if hasunfilteredcache(self, 'dirstate'):
1156 1155 for k in self.dirstate._filecache:
1157 1156 try:
1158 1157 delattr(self.dirstate, k)
1159 1158 except AttributeError:
1160 1159 pass
1161 1160 delattr(self.unfiltered(), 'dirstate')
1162 1161
1163 1162 def invalidate(self):
1164 1163 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1165 1164 for k in self._filecache:
1166 1165 # dirstate is invalidated separately in invalidatedirstate()
1167 1166 if k == 'dirstate':
1168 1167 continue
1169 1168
1170 1169 try:
1171 1170 delattr(unfiltered, k)
1172 1171 except AttributeError:
1173 1172 pass
1174 1173 self.invalidatecaches()
1175 1174 self.store.invalidatecaches()
1176 1175
1177 1176 def invalidateall(self):
1178 1177 '''Fully invalidates both store and non-store parts, causing the
1179 1178 subsequent operation to reread any outside changes.'''
1180 1179 # extension should hook this to invalidate its caches
1181 1180 self.invalidate()
1182 1181 self.invalidatedirstate()
1183 1182
1184 1183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1185 1184 try:
1186 1185 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1187 1186 except error.LockHeld, inst:
1188 1187 if not wait:
1189 1188 raise
1190 1189 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1191 1190 (desc, inst.locker))
1192 1191 # default to 600 seconds timeout
1193 1192 l = lockmod.lock(vfs, lockname,
1194 1193 int(self.ui.config("ui", "timeout", "600")),
1195 1194 releasefn, desc=desc)
1196 1195 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1197 1196 if acquirefn:
1198 1197 acquirefn()
1199 1198 return l
1200 1199
1201 1200 def _afterlock(self, callback):
1202 1201 """add a callback to be run when the repository is fully unlocked
1203 1202
1204 1203 The callback will be executed when the outermost lock is released
1205 1204 (with wlock being higher level than 'lock')."""
1206 1205 for ref in (self._wlockref, self._lockref):
1207 1206 l = ref and ref()
1208 1207 if l and l.held:
1209 1208 l.postrelease.append(callback)
1210 1209 break
1211 1210 else: # no lock have been found.
1212 1211 callback()
1213 1212
1214 1213 def lock(self, wait=True):
1215 1214 '''Lock the repository store (.hg/store) and return a weak reference
1216 1215 to the lock. Use this before modifying the store (e.g. committing or
1217 1216 stripping). If you are opening a transaction, get a lock as well.)
1218 1217
1219 1218 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1220 1219 'wlock' first to avoid a dead-lock hazard.'''
1221 1220 l = self._lockref and self._lockref()
1222 1221 if l is not None and l.held:
1223 1222 l.lock()
1224 1223 return l
1225 1224
1226 1225 def unlock():
1227 1226 for k, ce in self._filecache.items():
1228 1227 if k == 'dirstate' or k not in self.__dict__:
1229 1228 continue
1230 1229 ce.refresh()
1231 1230
1232 1231 l = self._lock(self.svfs, "lock", wait, unlock,
1233 1232 self.invalidate, _('repository %s') % self.origroot)
1234 1233 self._lockref = weakref.ref(l)
1235 1234 return l
1236 1235
1237 1236 def wlock(self, wait=True):
1238 1237 '''Lock the non-store parts of the repository (everything under
1239 1238 .hg except .hg/store) and return a weak reference to the lock.
1240 1239
1241 1240 Use this before modifying files in .hg.
1242 1241
1243 1242 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1244 1243 'wlock' first to avoid a dead-lock hazard.'''
1245 1244 l = self._wlockref and self._wlockref()
1246 1245 if l is not None and l.held:
1247 1246 l.lock()
1248 1247 return l
1249 1248
1250 1249 # We do not need to check for non-waiting lock aquisition. Such
1251 1250 # acquisition would not cause dead-lock as they would just fail.
1252 1251 if wait and (self.ui.configbool('devel', 'all')
1253 1252 or self.ui.configbool('devel', 'check-locks')):
1254 1253 l = self._lockref and self._lockref()
1255 1254 if l is not None and l.held:
1256 1255 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1257 1256
1258 1257 def unlock():
1259 1258 if self.dirstate.pendingparentchange():
1260 1259 self.dirstate.invalidate()
1261 1260 else:
1262 1261 self.dirstate.write()
1263 1262
1264 1263 self._filecache['dirstate'].refresh()
1265 1264
1266 1265 l = self._lock(self.vfs, "wlock", wait, unlock,
1267 1266 self.invalidatedirstate, _('working directory of %s') %
1268 1267 self.origroot)
1269 1268 self._wlockref = weakref.ref(l)
1270 1269 return l
1271 1270
1272 1271 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1273 1272 """
1274 1273 commit an individual file as part of a larger transaction
1275 1274 """
1276 1275
1277 1276 fname = fctx.path()
1278 1277 fparent1 = manifest1.get(fname, nullid)
1279 1278 fparent2 = manifest2.get(fname, nullid)
1280 1279 if isinstance(fctx, context.filectx):
1281 1280 node = fctx.filenode()
1282 1281 if node in [fparent1, fparent2]:
1283 1282 self.ui.debug('reusing %s filelog entry\n' % fname)
1284 1283 return node
1285 1284
1286 1285 flog = self.file(fname)
1287 1286 meta = {}
1288 1287 copy = fctx.renamed()
1289 1288 if copy and copy[0] != fname:
1290 1289 # Mark the new revision of this file as a copy of another
1291 1290 # file. This copy data will effectively act as a parent
1292 1291 # of this new revision. If this is a merge, the first
1293 1292 # parent will be the nullid (meaning "look up the copy data")
1294 1293 # and the second one will be the other parent. For example:
1295 1294 #
1296 1295 # 0 --- 1 --- 3 rev1 changes file foo
1297 1296 # \ / rev2 renames foo to bar and changes it
1298 1297 # \- 2 -/ rev3 should have bar with all changes and
1299 1298 # should record that bar descends from
1300 1299 # bar in rev2 and foo in rev1
1301 1300 #
1302 1301 # this allows this merge to succeed:
1303 1302 #
1304 1303 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1305 1304 # \ / merging rev3 and rev4 should use bar@rev2
1306 1305 # \- 2 --- 4 as the merge base
1307 1306 #
1308 1307
1309 1308 cfname = copy[0]
1310 1309 crev = manifest1.get(cfname)
1311 1310 newfparent = fparent2
1312 1311
1313 1312 if manifest2: # branch merge
1314 1313 if fparent2 == nullid or crev is None: # copied on remote side
1315 1314 if cfname in manifest2:
1316 1315 crev = manifest2[cfname]
1317 1316 newfparent = fparent1
1318 1317
1319 1318 # Here, we used to search backwards through history to try to find
1320 1319 # where the file copy came from if the source of a copy was not in
1321 1320 # the parent directory. However, this doesn't actually make sense to
1322 1321 # do (what does a copy from something not in your working copy even
1323 1322 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1324 1323 # the user that copy information was dropped, so if they didn't
1325 1324 # expect this outcome it can be fixed, but this is the correct
1326 1325 # behavior in this circumstance.
1327 1326
1328 1327 if crev:
1329 1328 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1330 1329 meta["copy"] = cfname
1331 1330 meta["copyrev"] = hex(crev)
1332 1331 fparent1, fparent2 = nullid, newfparent
1333 1332 else:
1334 1333 self.ui.warn(_("warning: can't find ancestor for '%s' "
1335 1334 "copied from '%s'!\n") % (fname, cfname))
1336 1335
1337 1336 elif fparent1 == nullid:
1338 1337 fparent1, fparent2 = fparent2, nullid
1339 1338 elif fparent2 != nullid:
1340 1339 # is one parent an ancestor of the other?
1341 1340 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1342 1341 if fparent1 in fparentancestors:
1343 1342 fparent1, fparent2 = fparent2, nullid
1344 1343 elif fparent2 in fparentancestors:
1345 1344 fparent2 = nullid
1346 1345
1347 1346 # is the file changed?
1348 1347 text = fctx.data()
1349 1348 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1350 1349 changelist.append(fname)
1351 1350 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1352 1351 # are just the flags changed during merge?
1353 1352 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1354 1353 changelist.append(fname)
1355 1354
1356 1355 return fparent1
1357 1356
1358 1357 @unfilteredmethod
1359 1358 def commit(self, text="", user=None, date=None, match=None, force=False,
1360 1359 editor=False, extra={}):
1361 1360 """Add a new revision to current repository.
1362 1361
1363 1362 Revision information is gathered from the working directory,
1364 1363 match can be used to filter the committed files. If editor is
1365 1364 supplied, it is called to get a commit message.
1366 1365 """
1367 1366
1368 1367 def fail(f, msg):
1369 1368 raise util.Abort('%s: %s' % (f, msg))
1370 1369
1371 1370 if not match:
1372 1371 match = matchmod.always(self.root, '')
1373 1372
1374 1373 if not force:
1375 1374 vdirs = []
1376 1375 match.explicitdir = vdirs.append
1377 1376 match.bad = fail
1378 1377
1379 1378 wlock = self.wlock()
1380 1379 try:
1381 1380 wctx = self[None]
1382 1381 merge = len(wctx.parents()) > 1
1383 1382
1384 1383 if not force and merge and not match.always():
1385 1384 raise util.Abort(_('cannot partially commit a merge '
1386 1385 '(do not specify files or patterns)'))
1387 1386
1388 1387 status = self.status(match=match, clean=force)
1389 1388 if force:
1390 1389 status.modified.extend(status.clean) # mq may commit clean files
1391 1390
1392 1391 # check subrepos
1393 1392 subs = []
1394 1393 commitsubs = set()
1395 1394 newstate = wctx.substate.copy()
1396 1395 # only manage subrepos and .hgsubstate if .hgsub is present
1397 1396 if '.hgsub' in wctx:
1398 1397 # we'll decide whether to track this ourselves, thanks
1399 1398 for c in status.modified, status.added, status.removed:
1400 1399 if '.hgsubstate' in c:
1401 1400 c.remove('.hgsubstate')
1402 1401
1403 1402 # compare current state to last committed state
1404 1403 # build new substate based on last committed state
1405 1404 oldstate = wctx.p1().substate
1406 1405 for s in sorted(newstate.keys()):
1407 1406 if not match(s):
1408 1407 # ignore working copy, use old state if present
1409 1408 if s in oldstate:
1410 1409 newstate[s] = oldstate[s]
1411 1410 continue
1412 1411 if not force:
1413 1412 raise util.Abort(
1414 1413 _("commit with new subrepo %s excluded") % s)
1415 1414 dirtyreason = wctx.sub(s).dirtyreason(True)
1416 1415 if dirtyreason:
1417 1416 if not self.ui.configbool('ui', 'commitsubrepos'):
1418 1417 raise util.Abort(dirtyreason,
1419 1418 hint=_("use --subrepos for recursive commit"))
1420 1419 subs.append(s)
1421 1420 commitsubs.add(s)
1422 1421 else:
1423 1422 bs = wctx.sub(s).basestate()
1424 1423 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1425 1424 if oldstate.get(s, (None, None, None))[1] != bs:
1426 1425 subs.append(s)
1427 1426
1428 1427 # check for removed subrepos
1429 1428 for p in wctx.parents():
1430 1429 r = [s for s in p.substate if s not in newstate]
1431 1430 subs += [s for s in r if match(s)]
1432 1431 if subs:
1433 1432 if (not match('.hgsub') and
1434 1433 '.hgsub' in (wctx.modified() + wctx.added())):
1435 1434 raise util.Abort(
1436 1435 _("can't commit subrepos without .hgsub"))
1437 1436 status.modified.insert(0, '.hgsubstate')
1438 1437
1439 1438 elif '.hgsub' in status.removed:
1440 1439 # clean up .hgsubstate when .hgsub is removed
1441 1440 if ('.hgsubstate' in wctx and
1442 1441 '.hgsubstate' not in (status.modified + status.added +
1443 1442 status.removed)):
1444 1443 status.removed.insert(0, '.hgsubstate')
1445 1444
1446 1445 # make sure all explicit patterns are matched
1447 1446 if not force and match.files():
1448 1447 matched = set(status.modified + status.added + status.removed)
1449 1448
1450 1449 for f in match.files():
1451 1450 f = self.dirstate.normalize(f)
1452 1451 if f == '.' or f in matched or f in wctx.substate:
1453 1452 continue
1454 1453 if f in status.deleted:
1455 1454 fail(f, _('file not found!'))
1456 1455 if f in vdirs: # visited directory
1457 1456 d = f + '/'
1458 1457 for mf in matched:
1459 1458 if mf.startswith(d):
1460 1459 break
1461 1460 else:
1462 1461 fail(f, _("no match under directory!"))
1463 1462 elif f not in self.dirstate:
1464 1463 fail(f, _("file not tracked!"))
1465 1464
1466 1465 cctx = context.workingcommitctx(self, status,
1467 1466 text, user, date, extra)
1468 1467
1469 1468 if (not force and not extra.get("close") and not merge
1470 1469 and not cctx.files()
1471 1470 and wctx.branch() == wctx.p1().branch()):
1472 1471 return None
1473 1472
1474 1473 if merge and cctx.deleted():
1475 1474 raise util.Abort(_("cannot commit merge with missing files"))
1476 1475
1477 1476 ms = mergemod.mergestate(self)
1478 1477 for f in status.modified:
1479 1478 if f in ms and ms[f] == 'u':
1480 1479 raise util.Abort(_('unresolved merge conflicts '
1481 1480 '(see "hg help resolve")'))
1482 1481
1483 1482 if editor:
1484 1483 cctx._text = editor(self, cctx, subs)
1485 1484 edited = (text != cctx._text)
1486 1485
1487 1486 # Save commit message in case this transaction gets rolled back
1488 1487 # (e.g. by a pretxncommit hook). Leave the content alone on
1489 1488 # the assumption that the user will use the same editor again.
1490 1489 msgfn = self.savecommitmessage(cctx._text)
1491 1490
1492 1491 # commit subs and write new state
1493 1492 if subs:
1494 1493 for s in sorted(commitsubs):
1495 1494 sub = wctx.sub(s)
1496 1495 self.ui.status(_('committing subrepository %s\n') %
1497 1496 subrepo.subrelpath(sub))
1498 1497 sr = sub.commit(cctx._text, user, date)
1499 1498 newstate[s] = (newstate[s][0], sr)
1500 1499 subrepo.writestate(self, newstate)
1501 1500
1502 1501 p1, p2 = self.dirstate.parents()
1503 1502 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1504 1503 try:
1505 1504 self.hook("precommit", throw=True, parent1=hookp1,
1506 1505 parent2=hookp2)
1507 1506 ret = self.commitctx(cctx, True)
1508 1507 except: # re-raises
1509 1508 if edited:
1510 1509 self.ui.write(
1511 1510 _('note: commit message saved in %s\n') % msgfn)
1512 1511 raise
1513 1512
1514 1513 # update bookmarks, dirstate and mergestate
1515 1514 bookmarks.update(self, [p1, p2], ret)
1516 1515 cctx.markcommitted(ret)
1517 1516 ms.reset()
1518 1517 finally:
1519 1518 wlock.release()
1520 1519
1521 1520 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1522 1521 # hack for command that use a temporary commit (eg: histedit)
1523 1522 # temporary commit got stripped before hook release
1524 1523 if node in self:
1525 1524 self.hook("commit", node=node, parent1=parent1,
1526 1525 parent2=parent2)
1527 1526 self._afterlock(commithook)
1528 1527 return ret
1529 1528
1530 1529 @unfilteredmethod
1531 1530 def commitctx(self, ctx, error=False):
1532 1531 """Add a new revision to current repository.
1533 1532 Revision information is passed via the context argument.
1534 1533 """
1535 1534
1536 1535 tr = None
1537 1536 p1, p2 = ctx.p1(), ctx.p2()
1538 1537 user = ctx.user()
1539 1538
1540 1539 lock = self.lock()
1541 1540 try:
1542 1541 tr = self.transaction("commit")
1543 1542 trp = weakref.proxy(tr)
1544 1543
1545 1544 if ctx.files():
1546 1545 m1 = p1.manifest()
1547 1546 m2 = p2.manifest()
1548 1547 m = m1.copy()
1549 1548
1550 1549 # check in files
1551 1550 added = []
1552 1551 changed = []
1553 1552 removed = list(ctx.removed())
1554 1553 linkrev = len(self)
1555 1554 self.ui.note(_("committing files:\n"))
1556 1555 for f in sorted(ctx.modified() + ctx.added()):
1557 1556 self.ui.note(f + "\n")
1558 1557 try:
1559 1558 fctx = ctx[f]
1560 1559 if fctx is None:
1561 1560 removed.append(f)
1562 1561 else:
1563 1562 added.append(f)
1564 1563 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1565 1564 trp, changed)
1566 1565 m.setflag(f, fctx.flags())
1567 1566 except OSError, inst:
1568 1567 self.ui.warn(_("trouble committing %s!\n") % f)
1569 1568 raise
1570 1569 except IOError, inst:
1571 1570 errcode = getattr(inst, 'errno', errno.ENOENT)
1572 1571 if error or errcode and errcode != errno.ENOENT:
1573 1572 self.ui.warn(_("trouble committing %s!\n") % f)
1574 1573 raise
1575 1574
1576 1575 # update manifest
1577 1576 self.ui.note(_("committing manifest\n"))
1578 1577 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1579 1578 drop = [f for f in removed if f in m]
1580 1579 for f in drop:
1581 1580 del m[f]
1582 1581 mn = self.manifest.add(m, trp, linkrev,
1583 1582 p1.manifestnode(), p2.manifestnode(),
1584 1583 added, drop)
1585 1584 files = changed + removed
1586 1585 else:
1587 1586 mn = p1.manifestnode()
1588 1587 files = []
1589 1588
1590 1589 # update changelog
1591 1590 self.ui.note(_("committing changelog\n"))
1592 1591 self.changelog.delayupdate(tr)
1593 1592 n = self.changelog.add(mn, files, ctx.description(),
1594 1593 trp, p1.node(), p2.node(),
1595 1594 user, ctx.date(), ctx.extra().copy())
1596 1595 p = lambda: tr.writepending() and self.root or ""
1597 1596 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1598 1597 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1599 1598 parent2=xp2, pending=p)
1600 1599 # set the new commit is proper phase
1601 1600 targetphase = subrepo.newcommitphase(self.ui, ctx)
1602 1601 if targetphase:
1603 1602 # retract boundary do not alter parent changeset.
1604 1603 # if a parent have higher the resulting phase will
1605 1604 # be compliant anyway
1606 1605 #
1607 1606 # if minimal phase was 0 we don't need to retract anything
1608 1607 phases.retractboundary(self, tr, targetphase, [n])
1609 1608 tr.close()
1610 1609 branchmap.updatecache(self.filtered('served'))
1611 1610 return n
1612 1611 finally:
1613 1612 if tr:
1614 1613 tr.release()
1615 1614 lock.release()
1616 1615
1617 1616 @unfilteredmethod
1618 1617 def destroying(self):
1619 1618 '''Inform the repository that nodes are about to be destroyed.
1620 1619 Intended for use by strip and rollback, so there's a common
1621 1620 place for anything that has to be done before destroying history.
1622 1621
1623 1622 This is mostly useful for saving state that is in memory and waiting
1624 1623 to be flushed when the current lock is released. Because a call to
1625 1624 destroyed is imminent, the repo will be invalidated causing those
1626 1625 changes to stay in memory (waiting for the next unlock), or vanish
1627 1626 completely.
1628 1627 '''
1629 1628 # When using the same lock to commit and strip, the phasecache is left
1630 1629 # dirty after committing. Then when we strip, the repo is invalidated,
1631 1630 # causing those changes to disappear.
1632 1631 if '_phasecache' in vars(self):
1633 1632 self._phasecache.write()
1634 1633
1635 1634 @unfilteredmethod
1636 1635 def destroyed(self):
1637 1636 '''Inform the repository that nodes have been destroyed.
1638 1637 Intended for use by strip and rollback, so there's a common
1639 1638 place for anything that has to be done after destroying history.
1640 1639 '''
1641 1640 # When one tries to:
1642 1641 # 1) destroy nodes thus calling this method (e.g. strip)
1643 1642 # 2) use phasecache somewhere (e.g. commit)
1644 1643 #
1645 1644 # then 2) will fail because the phasecache contains nodes that were
1646 1645 # removed. We can either remove phasecache from the filecache,
1647 1646 # causing it to reload next time it is accessed, or simply filter
1648 1647 # the removed nodes now and write the updated cache.
1649 1648 self._phasecache.filterunknown(self)
1650 1649 self._phasecache.write()
1651 1650
1652 1651 # update the 'served' branch cache to help read only server process
1653 1652 # Thanks to branchcache collaboration this is done from the nearest
1654 1653 # filtered subset and it is expected to be fast.
1655 1654 branchmap.updatecache(self.filtered('served'))
1656 1655
1657 1656 # Ensure the persistent tag cache is updated. Doing it now
1658 1657 # means that the tag cache only has to worry about destroyed
1659 1658 # heads immediately after a strip/rollback. That in turn
1660 1659 # guarantees that "cachetip == currenttip" (comparing both rev
1661 1660 # and node) always means no nodes have been added or destroyed.
1662 1661
1663 1662 # XXX this is suboptimal when qrefresh'ing: we strip the current
1664 1663 # head, refresh the tag cache, then immediately add a new head.
1665 1664 # But I think doing it this way is necessary for the "instant
1666 1665 # tag cache retrieval" case to work.
1667 1666 self.invalidate()
1668 1667
1669 1668 def walk(self, match, node=None):
1670 1669 '''
1671 1670 walk recursively through the directory tree or a given
1672 1671 changeset, finding all files matched by the match
1673 1672 function
1674 1673 '''
1675 1674 return self[node].walk(match)
1676 1675
1677 1676 def status(self, node1='.', node2=None, match=None,
1678 1677 ignored=False, clean=False, unknown=False,
1679 1678 listsubrepos=False):
1680 1679 '''a convenience method that calls node1.status(node2)'''
1681 1680 return self[node1].status(node2, match, ignored, clean, unknown,
1682 1681 listsubrepos)
1683 1682
1684 1683 def heads(self, start=None):
1685 1684 heads = self.changelog.heads(start)
1686 1685 # sort the output in rev descending order
1687 1686 return sorted(heads, key=self.changelog.rev, reverse=True)
1688 1687
1689 1688 def branchheads(self, branch=None, start=None, closed=False):
1690 1689 '''return a (possibly filtered) list of heads for the given branch
1691 1690
1692 1691 Heads are returned in topological order, from newest to oldest.
1693 1692 If branch is None, use the dirstate branch.
1694 1693 If start is not None, return only heads reachable from start.
1695 1694 If closed is True, return heads that are marked as closed as well.
1696 1695 '''
1697 1696 if branch is None:
1698 1697 branch = self[None].branch()
1699 1698 branches = self.branchmap()
1700 1699 if branch not in branches:
1701 1700 return []
1702 1701 # the cache returns heads ordered lowest to highest
1703 1702 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1704 1703 if start is not None:
1705 1704 # filter out the heads that cannot be reached from startrev
1706 1705 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1707 1706 bheads = [h for h in bheads if h in fbheads]
1708 1707 return bheads
1709 1708
1710 1709 def branches(self, nodes):
1711 1710 if not nodes:
1712 1711 nodes = [self.changelog.tip()]
1713 1712 b = []
1714 1713 for n in nodes:
1715 1714 t = n
1716 1715 while True:
1717 1716 p = self.changelog.parents(n)
1718 1717 if p[1] != nullid or p[0] == nullid:
1719 1718 b.append((t, n, p[0], p[1]))
1720 1719 break
1721 1720 n = p[0]
1722 1721 return b
1723 1722
1724 1723 def between(self, pairs):
1725 1724 r = []
1726 1725
1727 1726 for top, bottom in pairs:
1728 1727 n, l, i = top, [], 0
1729 1728 f = 1
1730 1729
1731 1730 while n != bottom and n != nullid:
1732 1731 p = self.changelog.parents(n)[0]
1733 1732 if i == f:
1734 1733 l.append(n)
1735 1734 f = f * 2
1736 1735 n = p
1737 1736 i += 1
1738 1737
1739 1738 r.append(l)
1740 1739
1741 1740 return r
1742 1741
1743 1742 def checkpush(self, pushop):
1744 1743 """Extensions can override this function if additional checks have
1745 1744 to be performed before pushing, or call it if they override push
1746 1745 command.
1747 1746 """
1748 1747 pass
1749 1748
1750 1749 @unfilteredpropertycache
1751 1750 def prepushoutgoinghooks(self):
1752 1751 """Return util.hooks consists of "(repo, remote, outgoing)"
1753 1752 functions, which are called before pushing changesets.
1754 1753 """
1755 1754 return util.hooks()
1756 1755
1757 1756 def stream_in(self, remote, requirements):
1758 1757 lock = self.lock()
1759 1758 try:
1760 1759 # Save remote branchmap. We will use it later
1761 1760 # to speed up branchcache creation
1762 1761 rbranchmap = None
1763 1762 if remote.capable("branchmap"):
1764 1763 rbranchmap = remote.branchmap()
1765 1764
1766 1765 fp = remote.stream_out()
1767 1766 l = fp.readline()
1768 1767 try:
1769 1768 resp = int(l)
1770 1769 except ValueError:
1771 1770 raise error.ResponseError(
1772 1771 _('unexpected response from remote server:'), l)
1773 1772 if resp == 1:
1774 1773 raise util.Abort(_('operation forbidden by server'))
1775 1774 elif resp == 2:
1776 1775 raise util.Abort(_('locking the remote repository failed'))
1777 1776 elif resp != 0:
1778 1777 raise util.Abort(_('the server sent an unknown error code'))
1779 1778 self.ui.status(_('streaming all changes\n'))
1780 1779 l = fp.readline()
1781 1780 try:
1782 1781 total_files, total_bytes = map(int, l.split(' ', 1))
1783 1782 except (ValueError, TypeError):
1784 1783 raise error.ResponseError(
1785 1784 _('unexpected response from remote server:'), l)
1786 1785 self.ui.status(_('%d files to transfer, %s of data\n') %
1787 1786 (total_files, util.bytecount(total_bytes)))
1788 1787 handled_bytes = 0
1789 1788 self.ui.progress(_('clone'), 0, total=total_bytes)
1790 1789 start = time.time()
1791 1790
1792 1791 tr = self.transaction(_('clone'))
1793 1792 try:
1794 1793 for i in xrange(total_files):
1795 1794 # XXX doesn't support '\n' or '\r' in filenames
1796 1795 l = fp.readline()
1797 1796 try:
1798 1797 name, size = l.split('\0', 1)
1799 1798 size = int(size)
1800 1799 except (ValueError, TypeError):
1801 1800 raise error.ResponseError(
1802 1801 _('unexpected response from remote server:'), l)
1803 1802 if self.ui.debugflag:
1804 1803 self.ui.debug('adding %s (%s)\n' %
1805 1804 (name, util.bytecount(size)))
1806 1805 # for backwards compat, name was partially encoded
1807 1806 ofp = self.svfs(store.decodedir(name), 'w')
1808 1807 for chunk in util.filechunkiter(fp, limit=size):
1809 1808 handled_bytes += len(chunk)
1810 1809 self.ui.progress(_('clone'), handled_bytes,
1811 1810 total=total_bytes)
1812 1811 ofp.write(chunk)
1813 1812 ofp.close()
1814 1813 tr.close()
1815 1814 finally:
1816 1815 tr.release()
1817 1816
1818 1817 # Writing straight to files circumvented the inmemory caches
1819 1818 self.invalidate()
1820 1819
1821 1820 elapsed = time.time() - start
1822 1821 if elapsed <= 0:
1823 1822 elapsed = 0.001
1824 1823 self.ui.progress(_('clone'), None)
1825 1824 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1826 1825 (util.bytecount(total_bytes), elapsed,
1827 1826 util.bytecount(total_bytes / elapsed)))
1828 1827
1829 1828 # new requirements = old non-format requirements +
1830 1829 # new format-related
1831 1830 # requirements from the streamed-in repository
1832 1831 requirements.update(set(self.requirements) - self.supportedformats)
1833 1832 self._applyrequirements(requirements)
1834 1833 self._writerequirements()
1835 1834
1836 1835 if rbranchmap:
1837 1836 rbheads = []
1838 1837 closed = []
1839 1838 for bheads in rbranchmap.itervalues():
1840 1839 rbheads.extend(bheads)
1841 1840 for h in bheads:
1842 1841 r = self.changelog.rev(h)
1843 1842 b, c = self.changelog.branchinfo(r)
1844 1843 if c:
1845 1844 closed.append(h)
1846 1845
1847 1846 if rbheads:
1848 1847 rtiprev = max((int(self.changelog.rev(node))
1849 1848 for node in rbheads))
1850 1849 cache = branchmap.branchcache(rbranchmap,
1851 1850 self[rtiprev].node(),
1852 1851 rtiprev,
1853 1852 closednodes=closed)
1854 1853 # Try to stick it as low as possible
1855 1854 # filter above served are unlikely to be fetch from a clone
1856 1855 for candidate in ('base', 'immutable', 'served'):
1857 1856 rview = self.filtered(candidate)
1858 1857 if cache.validfor(rview):
1859 1858 self._branchcaches[candidate] = cache
1860 1859 cache.write(rview)
1861 1860 break
1862 1861 self.invalidate()
1863 1862 return len(self.heads()) + 1
1864 1863 finally:
1865 1864 lock.release()
1866 1865
1867 1866 def clone(self, remote, heads=[], stream=None):
1868 1867 '''clone remote repository.
1869 1868
1870 1869 keyword arguments:
1871 1870 heads: list of revs to clone (forces use of pull)
1872 1871 stream: use streaming clone if possible'''
1873 1872
1874 1873 # now, all clients that can request uncompressed clones can
1875 1874 # read repo formats supported by all servers that can serve
1876 1875 # them.
1877 1876
1878 1877 # if revlog format changes, client will have to check version
1879 1878 # and format flags on "stream" capability, and use
1880 1879 # uncompressed only if compatible.
1881 1880
1882 1881 if stream is None:
1883 1882 # if the server explicitly prefers to stream (for fast LANs)
1884 1883 stream = remote.capable('stream-preferred')
1885 1884
1886 1885 if stream and not heads:
1887 1886 # 'stream' means remote revlog format is revlogv1 only
1888 1887 if remote.capable('stream'):
1889 1888 self.stream_in(remote, set(('revlogv1',)))
1890 1889 else:
1891 1890 # otherwise, 'streamreqs' contains the remote revlog format
1892 1891 streamreqs = remote.capable('streamreqs')
1893 1892 if streamreqs:
1894 1893 streamreqs = set(streamreqs.split(','))
1895 1894 # if we support it, stream in and adjust our requirements
1896 1895 if not streamreqs - self.supportedformats:
1897 1896 self.stream_in(remote, streamreqs)
1898 1897
1899 1898 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1900 1899 try:
1901 1900 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1902 1901 ret = exchange.pull(self, remote, heads).cgresult
1903 1902 finally:
1904 1903 self.ui.restoreconfig(quiet)
1905 1904 return ret
1906 1905
1907 1906 def pushkey(self, namespace, key, old, new):
1908 1907 try:
1909 1908 tr = self.currenttransaction()
1910 1909 hookargs = {}
1911 1910 if tr is not None:
1912 1911 hookargs.update(tr.hookargs)
1913 1912 pending = lambda: tr.writepending() and self.root or ""
1914 1913 hookargs['pending'] = pending
1915 1914 hookargs['namespace'] = namespace
1916 1915 hookargs['key'] = key
1917 1916 hookargs['old'] = old
1918 1917 hookargs['new'] = new
1919 1918 self.hook('prepushkey', throw=True, **hookargs)
1920 1919 except error.HookAbort, exc:
1921 1920 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1922 1921 if exc.hint:
1923 1922 self.ui.write_err(_("(%s)\n") % exc.hint)
1924 1923 return False
1925 1924 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1926 1925 ret = pushkey.push(self, namespace, key, old, new)
1927 1926 def runhook():
1928 1927 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1929 1928 ret=ret)
1930 1929 self._afterlock(runhook)
1931 1930 return ret
1932 1931
1933 1932 def listkeys(self, namespace):
1934 1933 self.hook('prelistkeys', throw=True, namespace=namespace)
1935 1934 self.ui.debug('listing keys for "%s"\n' % namespace)
1936 1935 values = pushkey.list(self, namespace)
1937 1936 self.hook('listkeys', namespace=namespace, values=values)
1938 1937 return values
1939 1938
1940 1939 def debugwireargs(self, one, two, three=None, four=None, five=None):
1941 1940 '''used to test argument passing over the wire'''
1942 1941 return "%s %s %s %s %s" % (one, two, three, four, five)
1943 1942
1944 1943 def savecommitmessage(self, text):
1945 1944 fp = self.vfs('last-message.txt', 'wb')
1946 1945 try:
1947 1946 fp.write(text)
1948 1947 finally:
1949 1948 fp.close()
1950 1949 return self.pathto(fp.name[len(self.root) + 1:])
1951 1950
1952 1951 # used to avoid circular references so destructors work
1953 1952 def aftertrans(files):
1954 1953 renamefiles = [tuple(t) for t in files]
1955 1954 def a():
1956 1955 for vfs, src, dest in renamefiles:
1957 1956 try:
1958 1957 vfs.rename(src, dest)
1959 1958 except OSError: # journal file does not yet exist
1960 1959 pass
1961 1960 return a
1962 1961
1963 1962 def undoname(fn):
1964 1963 base, name = os.path.split(fn)
1965 1964 assert name.startswith('journal')
1966 1965 return os.path.join(base, name.replace('journal', 'undo', 1))
1967 1966
1968 1967 def instance(ui, path, create):
1969 1968 return localrepository(ui, util.urllocalpath(path), create)
1970 1969
1971 1970 def islocal(path):
1972 1971 return True
General Comments 0
You need to be logged in to leave comments. Login now