##// END OF EJS Templates
localrepo: eliminate local requirements var in init...
Drew Gottlieb -
r24918:2eac3ae0 default
parent child Browse files
Show More
@@ -1,1971 +1,1972
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception, exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced, exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return changegroup.addchangegroup(self._repo, cg, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
196 196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 197 'dotencode'))
198 198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
199 199 filtername = None
200 200
201 201 # a list of (ui, featureset) functions.
202 202 # only functions defined in module of enabled extensions are invoked
203 203 featuresetupfuncs = set()
204 204
205 205 def _baserequirements(self, create):
206 206 return ['revlogv1']
207 207
208 208 def __init__(self, baseui, path=None, create=False):
209 self.requirements = set()
209 210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
210 211 self.wopener = self.wvfs
211 212 self.root = self.wvfs.base
212 213 self.path = self.wvfs.join(".hg")
213 214 self.origroot = path
214 215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
215 216 self.vfs = scmutil.vfs(self.path)
216 217 self.opener = self.vfs
217 218 self.baseui = baseui
218 219 self.ui = baseui.copy()
219 220 self.ui.copy = baseui.copy # prevent copying repo configuration
220 221 # A list of callback to shape the phase if no data were found.
221 222 # Callback are in the form: func(repo, roots) --> processed root.
222 223 # This list it to be filled by extension during repo setup
223 224 self._phasedefaults = []
224 225 try:
225 226 self.ui.readconfig(self.join("hgrc"), self.root)
226 227 extensions.loadall(self.ui)
227 228 except IOError:
228 229 pass
229 230
230 231 if self.featuresetupfuncs:
231 232 self.supported = set(self._basesupported) # use private copy
232 233 extmods = set(m.__name__ for n, m
233 234 in extensions.extensions(self.ui))
234 235 for setupfunc in self.featuresetupfuncs:
235 236 if setupfunc.__module__ in extmods:
236 237 setupfunc(self.ui, self.supported)
237 238 else:
238 239 self.supported = self._basesupported
239 240
240 241 if not self.vfs.isdir():
241 242 if create:
242 243 if not self.wvfs.exists():
243 244 self.wvfs.makedirs()
244 245 self.vfs.makedir(notindexed=True)
245 requirements = set(self._baserequirements(create))
246 self.requirements.update(self._baserequirements(create))
246 247 if self.ui.configbool('format', 'usestore', True):
247 248 self.vfs.mkdir("store")
248 requirements.add("store")
249 self.requirements.add("store")
249 250 if self.ui.configbool('format', 'usefncache', True):
250 requirements.add("fncache")
251 self.requirements.add("fncache")
251 252 if self.ui.configbool('format', 'dotencode', True):
252 requirements.add('dotencode')
253 self.requirements.add('dotencode')
253 254 # create an invalid changelog
254 255 self.vfs.append(
255 256 "00changelog.i",
256 257 '\0\0\0\2' # represents revlogv2
257 258 ' dummy changelog to prevent using the old repo layout'
258 259 )
259 260 if self.ui.configbool('format', 'generaldelta', False):
260 requirements.add("generaldelta")
261 self.requirements.add("generaldelta")
261 262 if self.ui.configbool('experimental', 'manifestv2', False):
262 requirements.add("manifestv2")
263 self.requirements.add("manifestv2")
263 264 else:
264 265 raise error.RepoError(_("repository %s not found") % path)
265 266 elif create:
266 267 raise error.RepoError(_("repository %s already exists") % path)
267 268 else:
268 269 try:
269 requirements = scmutil.readrequires(self.vfs, self.supported)
270 self.requirements = scmutil.readrequires(
271 self.vfs, self.supported)
270 272 except IOError, inst:
271 273 if inst.errno != errno.ENOENT:
272 274 raise
273 requirements = set()
274 275
275 276 self.sharedpath = self.path
276 277 try:
277 278 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
278 279 realpath=True)
279 280 s = vfs.base
280 281 if not vfs.exists():
281 282 raise error.RepoError(
282 283 _('.hg/sharedpath points to nonexistent directory %s') % s)
283 284 self.sharedpath = s
284 285 except IOError, inst:
285 286 if inst.errno != errno.ENOENT:
286 287 raise
287 288
288 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
289 self.store = store.store(
290 self.requirements, self.sharedpath, scmutil.vfs)
289 291 self.spath = self.store.path
290 292 self.svfs = self.store.vfs
291 293 self.sopener = self.svfs
292 294 self.sjoin = self.store.join
293 295 self.vfs.createmode = self.store.createmode
294 self.requirements = requirements
295 296 self._applyopenerreqs()
296 297 if create:
297 298 self._writerequirements()
298 299
299 300
300 301 self._branchcaches = {}
301 302 self._revbranchcache = None
302 303 self.filterpats = {}
303 304 self._datafilters = {}
304 305 self._transref = self._lockref = self._wlockref = None
305 306
306 307 # A cache for various files under .hg/ that tracks file changes,
307 308 # (used by the filecache decorator)
308 309 #
309 310 # Maps a property name to its util.filecacheentry
310 311 self._filecache = {}
311 312
312 313 # hold sets of revision to be filtered
313 314 # should be cleared when something might have changed the filter value:
314 315 # - new changesets,
315 316 # - phase change,
316 317 # - new obsolescence marker,
317 318 # - working directory parent change,
318 319 # - bookmark changes
319 320 self.filteredrevcache = {}
320 321
321 322 # generic mapping between names and nodes
322 323 self.names = namespaces.namespaces()
323 324
324 325 def close(self):
325 326 self._writecaches()
326 327
327 328 def _writecaches(self):
328 329 if self._revbranchcache:
329 330 self._revbranchcache.write()
330 331
331 332 def _restrictcapabilities(self, caps):
332 333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
333 334 caps = set(caps)
334 335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
335 336 caps.add('bundle2=' + urllib.quote(capsblob))
336 337 return caps
337 338
338 339 def _applyopenerreqs(self):
339 340 self.svfs.options = dict((r, 1) for r in self.requirements
340 341 if r in self.openerreqs)
341 342 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
342 343 if chunkcachesize is not None:
343 344 self.svfs.options['chunkcachesize'] = chunkcachesize
344 345 maxchainlen = self.ui.configint('format', 'maxchainlen')
345 346 if maxchainlen is not None:
346 347 self.svfs.options['maxchainlen'] = maxchainlen
347 348 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
348 349 if manifestcachesize is not None:
349 350 self.svfs.options['manifestcachesize'] = manifestcachesize
350 351 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
351 352 if usetreemanifest is not None:
352 353 self.svfs.options['usetreemanifest'] = usetreemanifest
353 354
354 355 def _writerequirements(self):
355 356 reqfile = self.vfs("requires", "w")
356 357 for r in sorted(self.requirements):
357 358 reqfile.write("%s\n" % r)
358 359 reqfile.close()
359 360
360 361 def _checknested(self, path):
361 362 """Determine if path is a legal nested repository."""
362 363 if not path.startswith(self.root):
363 364 return False
364 365 subpath = path[len(self.root) + 1:]
365 366 normsubpath = util.pconvert(subpath)
366 367
367 368 # XXX: Checking against the current working copy is wrong in
368 369 # the sense that it can reject things like
369 370 #
370 371 # $ hg cat -r 10 sub/x.txt
371 372 #
372 373 # if sub/ is no longer a subrepository in the working copy
373 374 # parent revision.
374 375 #
375 376 # However, it can of course also allow things that would have
376 377 # been rejected before, such as the above cat command if sub/
377 378 # is a subrepository now, but was a normal directory before.
378 379 # The old path auditor would have rejected by mistake since it
379 380 # panics when it sees sub/.hg/.
380 381 #
381 382 # All in all, checking against the working copy seems sensible
382 383 # since we want to prevent access to nested repositories on
383 384 # the filesystem *now*.
384 385 ctx = self[None]
385 386 parts = util.splitpath(subpath)
386 387 while parts:
387 388 prefix = '/'.join(parts)
388 389 if prefix in ctx.substate:
389 390 if prefix == normsubpath:
390 391 return True
391 392 else:
392 393 sub = ctx.sub(prefix)
393 394 return sub.checknested(subpath[len(prefix) + 1:])
394 395 else:
395 396 parts.pop()
396 397 return False
397 398
398 399 def peer(self):
399 400 return localpeer(self) # not cached to avoid reference cycle
400 401
401 402 def unfiltered(self):
402 403 """Return unfiltered version of the repository
403 404
404 405 Intended to be overwritten by filtered repo."""
405 406 return self
406 407
407 408 def filtered(self, name):
408 409 """Return a filtered version of a repository"""
409 410 # build a new class with the mixin and the current class
410 411 # (possibly subclass of the repo)
411 412 class proxycls(repoview.repoview, self.unfiltered().__class__):
412 413 pass
413 414 return proxycls(self, name)
414 415
415 416 @repofilecache('bookmarks')
416 417 def _bookmarks(self):
417 418 return bookmarks.bmstore(self)
418 419
419 420 @repofilecache('bookmarks.current')
420 421 def _bookmarkcurrent(self):
421 422 return bookmarks.readcurrent(self)
422 423
423 424 def bookmarkheads(self, bookmark):
424 425 name = bookmark.split('@', 1)[0]
425 426 heads = []
426 427 for mark, n in self._bookmarks.iteritems():
427 428 if mark.split('@', 1)[0] == name:
428 429 heads.append(n)
429 430 return heads
430 431
431 432 @storecache('phaseroots')
432 433 def _phasecache(self):
433 434 return phases.phasecache(self, self._phasedefaults)
434 435
435 436 @storecache('obsstore')
436 437 def obsstore(self):
437 438 # read default format for new obsstore.
438 439 defaultformat = self.ui.configint('format', 'obsstore-version', None)
439 440 # rely on obsstore class default when possible.
440 441 kwargs = {}
441 442 if defaultformat is not None:
442 443 kwargs['defaultformat'] = defaultformat
443 444 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
444 445 store = obsolete.obsstore(self.svfs, readonly=readonly,
445 446 **kwargs)
446 447 if store and readonly:
447 448 self.ui.warn(
448 449 _('obsolete feature not enabled but %i markers found!\n')
449 450 % len(list(store)))
450 451 return store
451 452
452 453 @storecache('00changelog.i')
453 454 def changelog(self):
454 455 c = changelog.changelog(self.svfs)
455 456 if 'HG_PENDING' in os.environ:
456 457 p = os.environ['HG_PENDING']
457 458 if p.startswith(self.root):
458 459 c.readpending('00changelog.i.a')
459 460 return c
460 461
461 462 @storecache('00manifest.i')
462 463 def manifest(self):
463 464 return manifest.manifest(self.svfs)
464 465
465 466 @repofilecache('dirstate')
466 467 def dirstate(self):
467 468 warned = [0]
468 469 def validate(node):
469 470 try:
470 471 self.changelog.rev(node)
471 472 return node
472 473 except error.LookupError:
473 474 if not warned[0]:
474 475 warned[0] = True
475 476 self.ui.warn(_("warning: ignoring unknown"
476 477 " working parent %s!\n") % short(node))
477 478 return nullid
478 479
479 480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
480 481
481 482 def __getitem__(self, changeid):
482 483 if changeid is None:
483 484 return context.workingctx(self)
484 485 if isinstance(changeid, slice):
485 486 return [context.changectx(self, i)
486 487 for i in xrange(*changeid.indices(len(self)))
487 488 if i not in self.changelog.filteredrevs]
488 489 return context.changectx(self, changeid)
489 490
490 491 def __contains__(self, changeid):
491 492 try:
492 493 self[changeid]
493 494 return True
494 495 except error.RepoLookupError:
495 496 return False
496 497
497 498 def __nonzero__(self):
498 499 return True
499 500
500 501 def __len__(self):
501 502 return len(self.changelog)
502 503
503 504 def __iter__(self):
504 505 return iter(self.changelog)
505 506
506 507 def revs(self, expr, *args):
507 508 '''Return a list of revisions matching the given revset'''
508 509 expr = revset.formatspec(expr, *args)
509 510 m = revset.match(None, expr)
510 511 return m(self)
511 512
512 513 def set(self, expr, *args):
513 514 '''
514 515 Yield a context for each matching revision, after doing arg
515 516 replacement via revset.formatspec
516 517 '''
517 518 for r in self.revs(expr, *args):
518 519 yield self[r]
519 520
520 521 def url(self):
521 522 return 'file:' + self.root
522 523
523 524 def hook(self, name, throw=False, **args):
524 525 """Call a hook, passing this repo instance.
525 526
526 527 This a convenience method to aid invoking hooks. Extensions likely
527 528 won't call this unless they have registered a custom hook or are
528 529 replacing code that is expected to call a hook.
529 530 """
530 531 return hook.hook(self.ui, self, name, throw, **args)
531 532
532 533 @unfilteredmethod
533 534 def _tag(self, names, node, message, local, user, date, extra={},
534 535 editor=False):
535 536 if isinstance(names, str):
536 537 names = (names,)
537 538
538 539 branches = self.branchmap()
539 540 for name in names:
540 541 self.hook('pretag', throw=True, node=hex(node), tag=name,
541 542 local=local)
542 543 if name in branches:
543 544 self.ui.warn(_("warning: tag %s conflicts with existing"
544 545 " branch name\n") % name)
545 546
546 547 def writetags(fp, names, munge, prevtags):
547 548 fp.seek(0, 2)
548 549 if prevtags and prevtags[-1] != '\n':
549 550 fp.write('\n')
550 551 for name in names:
551 552 if munge:
552 553 m = munge(name)
553 554 else:
554 555 m = name
555 556
556 557 if (self._tagscache.tagtypes and
557 558 name in self._tagscache.tagtypes):
558 559 old = self.tags().get(name, nullid)
559 560 fp.write('%s %s\n' % (hex(old), m))
560 561 fp.write('%s %s\n' % (hex(node), m))
561 562 fp.close()
562 563
563 564 prevtags = ''
564 565 if local:
565 566 try:
566 567 fp = self.vfs('localtags', 'r+')
567 568 except IOError:
568 569 fp = self.vfs('localtags', 'a')
569 570 else:
570 571 prevtags = fp.read()
571 572
572 573 # local tags are stored in the current charset
573 574 writetags(fp, names, None, prevtags)
574 575 for name in names:
575 576 self.hook('tag', node=hex(node), tag=name, local=local)
576 577 return
577 578
578 579 try:
579 580 fp = self.wfile('.hgtags', 'rb+')
580 581 except IOError, e:
581 582 if e.errno != errno.ENOENT:
582 583 raise
583 584 fp = self.wfile('.hgtags', 'ab')
584 585 else:
585 586 prevtags = fp.read()
586 587
587 588 # committed tags are stored in UTF-8
588 589 writetags(fp, names, encoding.fromlocal, prevtags)
589 590
590 591 fp.close()
591 592
592 593 self.invalidatecaches()
593 594
594 595 if '.hgtags' not in self.dirstate:
595 596 self[None].add(['.hgtags'])
596 597
597 598 m = matchmod.exact(self.root, '', ['.hgtags'])
598 599 tagnode = self.commit(message, user, date, extra=extra, match=m,
599 600 editor=editor)
600 601
601 602 for name in names:
602 603 self.hook('tag', node=hex(node), tag=name, local=local)
603 604
604 605 return tagnode
605 606
606 607 def tag(self, names, node, message, local, user, date, editor=False):
607 608 '''tag a revision with one or more symbolic names.
608 609
609 610 names is a list of strings or, when adding a single tag, names may be a
610 611 string.
611 612
612 613 if local is True, the tags are stored in a per-repository file.
613 614 otherwise, they are stored in the .hgtags file, and a new
614 615 changeset is committed with the change.
615 616
616 617 keyword arguments:
617 618
618 619 local: whether to store tags in non-version-controlled file
619 620 (default False)
620 621
621 622 message: commit message to use if committing
622 623
623 624 user: name of user to use if committing
624 625
625 626 date: date tuple to use if committing'''
626 627
627 628 if not local:
628 629 m = matchmod.exact(self.root, '', ['.hgtags'])
629 630 if util.any(self.status(match=m, unknown=True, ignored=True)):
630 631 raise util.Abort(_('working copy of .hgtags is changed'),
631 632 hint=_('please commit .hgtags manually'))
632 633
633 634 self.tags() # instantiate the cache
634 635 self._tag(names, node, message, local, user, date, editor=editor)
635 636
636 637 @filteredpropertycache
637 638 def _tagscache(self):
638 639 '''Returns a tagscache object that contains various tags related
639 640 caches.'''
640 641
641 642 # This simplifies its cache management by having one decorated
642 643 # function (this one) and the rest simply fetch things from it.
643 644 class tagscache(object):
644 645 def __init__(self):
645 646 # These two define the set of tags for this repository. tags
646 647 # maps tag name to node; tagtypes maps tag name to 'global' or
647 648 # 'local'. (Global tags are defined by .hgtags across all
648 649 # heads, and local tags are defined in .hg/localtags.)
649 650 # They constitute the in-memory cache of tags.
650 651 self.tags = self.tagtypes = None
651 652
652 653 self.nodetagscache = self.tagslist = None
653 654
654 655 cache = tagscache()
655 656 cache.tags, cache.tagtypes = self._findtags()
656 657
657 658 return cache
658 659
659 660 def tags(self):
660 661 '''return a mapping of tag to node'''
661 662 t = {}
662 663 if self.changelog.filteredrevs:
663 664 tags, tt = self._findtags()
664 665 else:
665 666 tags = self._tagscache.tags
666 667 for k, v in tags.iteritems():
667 668 try:
668 669 # ignore tags to unknown nodes
669 670 self.changelog.rev(v)
670 671 t[k] = v
671 672 except (error.LookupError, ValueError):
672 673 pass
673 674 return t
674 675
675 676 def _findtags(self):
676 677 '''Do the hard work of finding tags. Return a pair of dicts
677 678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
678 679 maps tag name to a string like \'global\' or \'local\'.
679 680 Subclasses or extensions are free to add their own tags, but
680 681 should be aware that the returned dicts will be retained for the
681 682 duration of the localrepo object.'''
682 683
683 684 # XXX what tagtype should subclasses/extensions use? Currently
684 685 # mq and bookmarks add tags, but do not set the tagtype at all.
685 686 # Should each extension invent its own tag type? Should there
686 687 # be one tagtype for all such "virtual" tags? Or is the status
687 688 # quo fine?
688 689
689 690 alltags = {} # map tag name to (node, hist)
690 691 tagtypes = {}
691 692
692 693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
693 694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
694 695
695 696 # Build the return dicts. Have to re-encode tag names because
696 697 # the tags module always uses UTF-8 (in order not to lose info
697 698 # writing to the cache), but the rest of Mercurial wants them in
698 699 # local encoding.
699 700 tags = {}
700 701 for (name, (node, hist)) in alltags.iteritems():
701 702 if node != nullid:
702 703 tags[encoding.tolocal(name)] = node
703 704 tags['tip'] = self.changelog.tip()
704 705 tagtypes = dict([(encoding.tolocal(name), value)
705 706 for (name, value) in tagtypes.iteritems()])
706 707 return (tags, tagtypes)
707 708
708 709 def tagtype(self, tagname):
709 710 '''
710 711 return the type of the given tag. result can be:
711 712
712 713 'local' : a local tag
713 714 'global' : a global tag
714 715 None : tag does not exist
715 716 '''
716 717
717 718 return self._tagscache.tagtypes.get(tagname)
718 719
719 720 def tagslist(self):
720 721 '''return a list of tags ordered by revision'''
721 722 if not self._tagscache.tagslist:
722 723 l = []
723 724 for t, n in self.tags().iteritems():
724 725 l.append((self.changelog.rev(n), t, n))
725 726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
726 727
727 728 return self._tagscache.tagslist
728 729
729 730 def nodetags(self, node):
730 731 '''return the tags associated with a node'''
731 732 if not self._tagscache.nodetagscache:
732 733 nodetagscache = {}
733 734 for t, n in self._tagscache.tags.iteritems():
734 735 nodetagscache.setdefault(n, []).append(t)
735 736 for tags in nodetagscache.itervalues():
736 737 tags.sort()
737 738 self._tagscache.nodetagscache = nodetagscache
738 739 return self._tagscache.nodetagscache.get(node, [])
739 740
740 741 def nodebookmarks(self, node):
741 742 marks = []
742 743 for bookmark, n in self._bookmarks.iteritems():
743 744 if n == node:
744 745 marks.append(bookmark)
745 746 return sorted(marks)
746 747
747 748 def branchmap(self):
748 749 '''returns a dictionary {branch: [branchheads]} with branchheads
749 750 ordered by increasing revision number'''
750 751 branchmap.updatecache(self)
751 752 return self._branchcaches[self.filtername]
752 753
753 754 @unfilteredmethod
754 755 def revbranchcache(self):
755 756 if not self._revbranchcache:
756 757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
757 758 return self._revbranchcache
758 759
759 760 def branchtip(self, branch, ignoremissing=False):
760 761 '''return the tip node for a given branch
761 762
762 763 If ignoremissing is True, then this method will not raise an error.
763 764 This is helpful for callers that only expect None for a missing branch
764 765 (e.g. namespace).
765 766
766 767 '''
767 768 try:
768 769 return self.branchmap().branchtip(branch)
769 770 except KeyError:
770 771 if not ignoremissing:
771 772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
772 773 else:
773 774 pass
774 775
775 776 def lookup(self, key):
776 777 return self[key].node()
777 778
778 779 def lookupbranch(self, key, remote=None):
779 780 repo = remote or self
780 781 if key in repo.branchmap():
781 782 return key
782 783
783 784 repo = (remote and remote.local()) and remote or self
784 785 return repo[key].branch()
785 786
786 787 def known(self, nodes):
787 788 nm = self.changelog.nodemap
788 789 pc = self._phasecache
789 790 result = []
790 791 for n in nodes:
791 792 r = nm.get(n)
792 793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 794 result.append(resp)
794 795 return result
795 796
796 797 def local(self):
797 798 return self
798 799
799 800 def cancopy(self):
800 801 # so statichttprepo's override of local() works
801 802 if not self.local():
802 803 return False
803 804 if not self.ui.configbool('phases', 'publish', True):
804 805 return True
805 806 # if publishing we can't copy if there is filtered content
806 807 return not self.filtered('visible').changelog.filteredrevs
807 808
808 809 def shared(self):
809 810 '''the type of shared repository (None if not shared)'''
810 811 if self.sharedpath != self.path:
811 812 return 'store'
812 813 return None
813 814
814 815 def join(self, f, *insidef):
815 816 return self.vfs.join(os.path.join(f, *insidef))
816 817
817 818 def wjoin(self, f, *insidef):
818 819 return self.vfs.reljoin(self.root, f, *insidef)
819 820
820 821 def file(self, f):
821 822 if f[0] == '/':
822 823 f = f[1:]
823 824 return filelog.filelog(self.svfs, f)
824 825
825 826 def changectx(self, changeid):
826 827 return self[changeid]
827 828
828 829 def parents(self, changeid=None):
829 830 '''get list of changectxs for parents of changeid'''
830 831 return self[changeid].parents()
831 832
832 833 def setparents(self, p1, p2=nullid):
833 834 self.dirstate.beginparentchange()
834 835 copies = self.dirstate.setparents(p1, p2)
835 836 pctx = self[p1]
836 837 if copies:
837 838 # Adjust copy records, the dirstate cannot do it, it
838 839 # requires access to parents manifests. Preserve them
839 840 # only for entries added to first parent.
840 841 for f in copies:
841 842 if f not in pctx and copies[f] in pctx:
842 843 self.dirstate.copy(copies[f], f)
843 844 if p2 == nullid:
844 845 for f, s in sorted(self.dirstate.copies().items()):
845 846 if f not in pctx and s not in pctx:
846 847 self.dirstate.copy(None, f)
847 848 self.dirstate.endparentchange()
848 849
849 850 def filectx(self, path, changeid=None, fileid=None):
850 851 """changeid can be a changeset revision, node, or tag.
851 852 fileid can be a file revision or node."""
852 853 return context.filectx(self, path, changeid, fileid)
853 854
854 855 def getcwd(self):
855 856 return self.dirstate.getcwd()
856 857
857 858 def pathto(self, f, cwd=None):
858 859 return self.dirstate.pathto(f, cwd)
859 860
860 861 def wfile(self, f, mode='r'):
861 862 return self.wvfs(f, mode)
862 863
863 864 def _link(self, f):
864 865 return self.wvfs.islink(f)
865 866
866 867 def _loadfilter(self, filter):
867 868 if filter not in self.filterpats:
868 869 l = []
869 870 for pat, cmd in self.ui.configitems(filter):
870 871 if cmd == '!':
871 872 continue
872 873 mf = matchmod.match(self.root, '', [pat])
873 874 fn = None
874 875 params = cmd
875 876 for name, filterfn in self._datafilters.iteritems():
876 877 if cmd.startswith(name):
877 878 fn = filterfn
878 879 params = cmd[len(name):].lstrip()
879 880 break
880 881 if not fn:
881 882 fn = lambda s, c, **kwargs: util.filter(s, c)
882 883 # Wrap old filters not supporting keyword arguments
883 884 if not inspect.getargspec(fn)[2]:
884 885 oldfn = fn
885 886 fn = lambda s, c, **kwargs: oldfn(s, c)
886 887 l.append((mf, fn, params))
887 888 self.filterpats[filter] = l
888 889 return self.filterpats[filter]
889 890
890 891 def _filter(self, filterpats, filename, data):
891 892 for mf, fn, cmd in filterpats:
892 893 if mf(filename):
893 894 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 895 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 896 break
896 897
897 898 return data
898 899
899 900 @unfilteredpropertycache
900 901 def _encodefilterpats(self):
901 902 return self._loadfilter('encode')
902 903
903 904 @unfilteredpropertycache
904 905 def _decodefilterpats(self):
905 906 return self._loadfilter('decode')
906 907
907 908 def adddatafilter(self, name, filter):
908 909 self._datafilters[name] = filter
909 910
910 911 def wread(self, filename):
911 912 if self._link(filename):
912 913 data = self.wvfs.readlink(filename)
913 914 else:
914 915 data = self.wvfs.read(filename)
915 916 return self._filter(self._encodefilterpats, filename, data)
916 917
917 918 def wwrite(self, filename, data, flags):
918 919 """write ``data`` into ``filename`` in the working directory
919 920
920 921 This returns length of written (maybe decoded) data.
921 922 """
922 923 data = self._filter(self._decodefilterpats, filename, data)
923 924 if 'l' in flags:
924 925 self.wvfs.symlink(data, filename)
925 926 else:
926 927 self.wvfs.write(filename, data)
927 928 if 'x' in flags:
928 929 self.wvfs.setflags(filename, False, True)
929 930 return len(data)
930 931
931 932 def wwritedata(self, filename, data):
932 933 return self._filter(self._decodefilterpats, filename, data)
933 934
934 935 def currenttransaction(self):
935 936 """return the current transaction or None if non exists"""
936 937 if self._transref:
937 938 tr = self._transref()
938 939 else:
939 940 tr = None
940 941
941 942 if tr and tr.running():
942 943 return tr
943 944 return None
944 945
945 946 def transaction(self, desc, report=None):
946 947 if (self.ui.configbool('devel', 'all')
947 948 or self.ui.configbool('devel', 'check-locks')):
948 949 l = self._lockref and self._lockref()
949 950 if l is None or not l.held:
950 951 scmutil.develwarn(self.ui, 'transaction with no lock')
951 952 tr = self.currenttransaction()
952 953 if tr is not None:
953 954 return tr.nest()
954 955
955 956 # abort here if the journal already exists
956 957 if self.svfs.exists("journal"):
957 958 raise error.RepoError(
958 959 _("abandoned transaction found"),
959 960 hint=_("run 'hg recover' to clean up transaction"))
960 961
961 962 self.hook('pretxnopen', throw=True, txnname=desc)
962 963
963 964 self._writejournal(desc)
964 965 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
965 966 if report:
966 967 rp = report
967 968 else:
968 969 rp = self.ui.warn
969 970 vfsmap = {'plain': self.vfs} # root of .hg/
970 971 # we must avoid cyclic reference between repo and transaction.
971 972 reporef = weakref.ref(self)
972 973 def validate(tr):
973 974 """will run pre-closing hooks"""
974 975 pending = lambda: tr.writepending() and self.root or ""
975 976 reporef().hook('pretxnclose', throw=True, pending=pending,
976 977 xnname=desc, **tr.hookargs)
977 978
978 979 tr = transaction.transaction(rp, self.sopener, vfsmap,
979 980 "journal",
980 981 "undo",
981 982 aftertrans(renames),
982 983 self.store.createmode,
983 984 validator=validate)
984 985
985 986 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
986 987 tr.hookargs['TXNID'] = trid
987 988 # note: writing the fncache only during finalize mean that the file is
988 989 # outdated when running hooks. As fncache is used for streaming clone,
989 990 # this is not expected to break anything that happen during the hooks.
990 991 tr.addfinalize('flush-fncache', self.store.write)
991 992 def txnclosehook(tr2):
992 993 """To be run if transaction is successful, will schedule a hook run
993 994 """
994 995 def hook():
995 996 reporef().hook('txnclose', throw=False, txnname=desc,
996 997 **tr2.hookargs)
997 998 reporef()._afterlock(hook)
998 999 tr.addfinalize('txnclose-hook', txnclosehook)
999 1000 def txnaborthook(tr2):
1000 1001 """To be run if transaction is aborted
1001 1002 """
1002 1003 reporef().hook('txnabort', throw=False, txnname=desc,
1003 1004 **tr2.hookargs)
1004 1005 tr.addabort('txnabort-hook', txnaborthook)
1005 1006 self._transref = weakref.ref(tr)
1006 1007 return tr
1007 1008
1008 1009 def _journalfiles(self):
1009 1010 return ((self.svfs, 'journal'),
1010 1011 (self.vfs, 'journal.dirstate'),
1011 1012 (self.vfs, 'journal.branch'),
1012 1013 (self.vfs, 'journal.desc'),
1013 1014 (self.vfs, 'journal.bookmarks'),
1014 1015 (self.svfs, 'journal.phaseroots'))
1015 1016
1016 1017 def undofiles(self):
1017 1018 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1018 1019
1019 1020 def _writejournal(self, desc):
1020 1021 self.vfs.write("journal.dirstate",
1021 1022 self.vfs.tryread("dirstate"))
1022 1023 self.vfs.write("journal.branch",
1023 1024 encoding.fromlocal(self.dirstate.branch()))
1024 1025 self.vfs.write("journal.desc",
1025 1026 "%d\n%s\n" % (len(self), desc))
1026 1027 self.vfs.write("journal.bookmarks",
1027 1028 self.vfs.tryread("bookmarks"))
1028 1029 self.svfs.write("journal.phaseroots",
1029 1030 self.svfs.tryread("phaseroots"))
1030 1031
1031 1032 def recover(self):
1032 1033 lock = self.lock()
1033 1034 try:
1034 1035 if self.svfs.exists("journal"):
1035 1036 self.ui.status(_("rolling back interrupted transaction\n"))
1036 1037 vfsmap = {'': self.svfs,
1037 1038 'plain': self.vfs,}
1038 1039 transaction.rollback(self.svfs, vfsmap, "journal",
1039 1040 self.ui.warn)
1040 1041 self.invalidate()
1041 1042 return True
1042 1043 else:
1043 1044 self.ui.warn(_("no interrupted transaction available\n"))
1044 1045 return False
1045 1046 finally:
1046 1047 lock.release()
1047 1048
1048 1049 def rollback(self, dryrun=False, force=False):
1049 1050 wlock = lock = None
1050 1051 try:
1051 1052 wlock = self.wlock()
1052 1053 lock = self.lock()
1053 1054 if self.svfs.exists("undo"):
1054 1055 return self._rollback(dryrun, force)
1055 1056 else:
1056 1057 self.ui.warn(_("no rollback information available\n"))
1057 1058 return 1
1058 1059 finally:
1059 1060 release(lock, wlock)
1060 1061
1061 1062 @unfilteredmethod # Until we get smarter cache management
1062 1063 def _rollback(self, dryrun, force):
1063 1064 ui = self.ui
1064 1065 try:
1065 1066 args = self.vfs.read('undo.desc').splitlines()
1066 1067 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1067 1068 if len(args) >= 3:
1068 1069 detail = args[2]
1069 1070 oldtip = oldlen - 1
1070 1071
1071 1072 if detail and ui.verbose:
1072 1073 msg = (_('repository tip rolled back to revision %s'
1073 1074 ' (undo %s: %s)\n')
1074 1075 % (oldtip, desc, detail))
1075 1076 else:
1076 1077 msg = (_('repository tip rolled back to revision %s'
1077 1078 ' (undo %s)\n')
1078 1079 % (oldtip, desc))
1079 1080 except IOError:
1080 1081 msg = _('rolling back unknown transaction\n')
1081 1082 desc = None
1082 1083
1083 1084 if not force and self['.'] != self['tip'] and desc == 'commit':
1084 1085 raise util.Abort(
1085 1086 _('rollback of last commit while not checked out '
1086 1087 'may lose data'), hint=_('use -f to force'))
1087 1088
1088 1089 ui.status(msg)
1089 1090 if dryrun:
1090 1091 return 0
1091 1092
1092 1093 parents = self.dirstate.parents()
1093 1094 self.destroying()
1094 1095 vfsmap = {'plain': self.vfs, '': self.svfs}
1095 1096 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1096 1097 if self.vfs.exists('undo.bookmarks'):
1097 1098 self.vfs.rename('undo.bookmarks', 'bookmarks')
1098 1099 if self.svfs.exists('undo.phaseroots'):
1099 1100 self.svfs.rename('undo.phaseroots', 'phaseroots')
1100 1101 self.invalidate()
1101 1102
1102 1103 parentgone = (parents[0] not in self.changelog.nodemap or
1103 1104 parents[1] not in self.changelog.nodemap)
1104 1105 if parentgone:
1105 1106 self.vfs.rename('undo.dirstate', 'dirstate')
1106 1107 try:
1107 1108 branch = self.vfs.read('undo.branch')
1108 1109 self.dirstate.setbranch(encoding.tolocal(branch))
1109 1110 except IOError:
1110 1111 ui.warn(_('named branch could not be reset: '
1111 1112 'current branch is still \'%s\'\n')
1112 1113 % self.dirstate.branch())
1113 1114
1114 1115 self.dirstate.invalidate()
1115 1116 parents = tuple([p.rev() for p in self.parents()])
1116 1117 if len(parents) > 1:
1117 1118 ui.status(_('working directory now based on '
1118 1119 'revisions %d and %d\n') % parents)
1119 1120 else:
1120 1121 ui.status(_('working directory now based on '
1121 1122 'revision %d\n') % parents)
1122 1123 ms = mergemod.mergestate(self)
1123 1124 ms.reset(self['.'].node())
1124 1125
1125 1126 # TODO: if we know which new heads may result from this rollback, pass
1126 1127 # them to destroy(), which will prevent the branchhead cache from being
1127 1128 # invalidated.
1128 1129 self.destroyed()
1129 1130 return 0
1130 1131
1131 1132 def invalidatecaches(self):
1132 1133
1133 1134 if '_tagscache' in vars(self):
1134 1135 # can't use delattr on proxy
1135 1136 del self.__dict__['_tagscache']
1136 1137
1137 1138 self.unfiltered()._branchcaches.clear()
1138 1139 self.invalidatevolatilesets()
1139 1140
1140 1141 def invalidatevolatilesets(self):
1141 1142 self.filteredrevcache.clear()
1142 1143 obsolete.clearobscaches(self)
1143 1144
1144 1145 def invalidatedirstate(self):
1145 1146 '''Invalidates the dirstate, causing the next call to dirstate
1146 1147 to check if it was modified since the last time it was read,
1147 1148 rereading it if it has.
1148 1149
1149 1150 This is different to dirstate.invalidate() that it doesn't always
1150 1151 rereads the dirstate. Use dirstate.invalidate() if you want to
1151 1152 explicitly read the dirstate again (i.e. restoring it to a previous
1152 1153 known good state).'''
1153 1154 if hasunfilteredcache(self, 'dirstate'):
1154 1155 for k in self.dirstate._filecache:
1155 1156 try:
1156 1157 delattr(self.dirstate, k)
1157 1158 except AttributeError:
1158 1159 pass
1159 1160 delattr(self.unfiltered(), 'dirstate')
1160 1161
1161 1162 def invalidate(self):
1162 1163 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1163 1164 for k in self._filecache:
1164 1165 # dirstate is invalidated separately in invalidatedirstate()
1165 1166 if k == 'dirstate':
1166 1167 continue
1167 1168
1168 1169 try:
1169 1170 delattr(unfiltered, k)
1170 1171 except AttributeError:
1171 1172 pass
1172 1173 self.invalidatecaches()
1173 1174 self.store.invalidatecaches()
1174 1175
1175 1176 def invalidateall(self):
1176 1177 '''Fully invalidates both store and non-store parts, causing the
1177 1178 subsequent operation to reread any outside changes.'''
1178 1179 # extension should hook this to invalidate its caches
1179 1180 self.invalidate()
1180 1181 self.invalidatedirstate()
1181 1182
1182 1183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1183 1184 try:
1184 1185 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1185 1186 except error.LockHeld, inst:
1186 1187 if not wait:
1187 1188 raise
1188 1189 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1189 1190 (desc, inst.locker))
1190 1191 # default to 600 seconds timeout
1191 1192 l = lockmod.lock(vfs, lockname,
1192 1193 int(self.ui.config("ui", "timeout", "600")),
1193 1194 releasefn, desc=desc)
1194 1195 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1195 1196 if acquirefn:
1196 1197 acquirefn()
1197 1198 return l
1198 1199
1199 1200 def _afterlock(self, callback):
1200 1201 """add a callback to be run when the repository is fully unlocked
1201 1202
1202 1203 The callback will be executed when the outermost lock is released
1203 1204 (with wlock being higher level than 'lock')."""
1204 1205 for ref in (self._wlockref, self._lockref):
1205 1206 l = ref and ref()
1206 1207 if l and l.held:
1207 1208 l.postrelease.append(callback)
1208 1209 break
1209 1210 else: # no lock have been found.
1210 1211 callback()
1211 1212
1212 1213 def lock(self, wait=True):
1213 1214 '''Lock the repository store (.hg/store) and return a weak reference
1214 1215 to the lock. Use this before modifying the store (e.g. committing or
1215 1216 stripping). If you are opening a transaction, get a lock as well.)
1216 1217
1217 1218 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1218 1219 'wlock' first to avoid a dead-lock hazard.'''
1219 1220 l = self._lockref and self._lockref()
1220 1221 if l is not None and l.held:
1221 1222 l.lock()
1222 1223 return l
1223 1224
1224 1225 def unlock():
1225 1226 for k, ce in self._filecache.items():
1226 1227 if k == 'dirstate' or k not in self.__dict__:
1227 1228 continue
1228 1229 ce.refresh()
1229 1230
1230 1231 l = self._lock(self.svfs, "lock", wait, unlock,
1231 1232 self.invalidate, _('repository %s') % self.origroot)
1232 1233 self._lockref = weakref.ref(l)
1233 1234 return l
1234 1235
1235 1236 def wlock(self, wait=True):
1236 1237 '''Lock the non-store parts of the repository (everything under
1237 1238 .hg except .hg/store) and return a weak reference to the lock.
1238 1239
1239 1240 Use this before modifying files in .hg.
1240 1241
1241 1242 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1242 1243 'wlock' first to avoid a dead-lock hazard.'''
1243 1244 l = self._wlockref and self._wlockref()
1244 1245 if l is not None and l.held:
1245 1246 l.lock()
1246 1247 return l
1247 1248
1248 1249 # We do not need to check for non-waiting lock aquisition. Such
1249 1250 # acquisition would not cause dead-lock as they would just fail.
1250 1251 if wait and (self.ui.configbool('devel', 'all')
1251 1252 or self.ui.configbool('devel', 'check-locks')):
1252 1253 l = self._lockref and self._lockref()
1253 1254 if l is not None and l.held:
1254 1255 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1255 1256
1256 1257 def unlock():
1257 1258 if self.dirstate.pendingparentchange():
1258 1259 self.dirstate.invalidate()
1259 1260 else:
1260 1261 self.dirstate.write()
1261 1262
1262 1263 self._filecache['dirstate'].refresh()
1263 1264
1264 1265 l = self._lock(self.vfs, "wlock", wait, unlock,
1265 1266 self.invalidatedirstate, _('working directory of %s') %
1266 1267 self.origroot)
1267 1268 self._wlockref = weakref.ref(l)
1268 1269 return l
1269 1270
1270 1271 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1271 1272 """
1272 1273 commit an individual file as part of a larger transaction
1273 1274 """
1274 1275
1275 1276 fname = fctx.path()
1276 1277 fparent1 = manifest1.get(fname, nullid)
1277 1278 fparent2 = manifest2.get(fname, nullid)
1278 1279 if isinstance(fctx, context.filectx):
1279 1280 node = fctx.filenode()
1280 1281 if node in [fparent1, fparent2]:
1281 1282 self.ui.debug('reusing %s filelog entry\n' % fname)
1282 1283 return node
1283 1284
1284 1285 flog = self.file(fname)
1285 1286 meta = {}
1286 1287 copy = fctx.renamed()
1287 1288 if copy and copy[0] != fname:
1288 1289 # Mark the new revision of this file as a copy of another
1289 1290 # file. This copy data will effectively act as a parent
1290 1291 # of this new revision. If this is a merge, the first
1291 1292 # parent will be the nullid (meaning "look up the copy data")
1292 1293 # and the second one will be the other parent. For example:
1293 1294 #
1294 1295 # 0 --- 1 --- 3 rev1 changes file foo
1295 1296 # \ / rev2 renames foo to bar and changes it
1296 1297 # \- 2 -/ rev3 should have bar with all changes and
1297 1298 # should record that bar descends from
1298 1299 # bar in rev2 and foo in rev1
1299 1300 #
1300 1301 # this allows this merge to succeed:
1301 1302 #
1302 1303 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1303 1304 # \ / merging rev3 and rev4 should use bar@rev2
1304 1305 # \- 2 --- 4 as the merge base
1305 1306 #
1306 1307
1307 1308 cfname = copy[0]
1308 1309 crev = manifest1.get(cfname)
1309 1310 newfparent = fparent2
1310 1311
1311 1312 if manifest2: # branch merge
1312 1313 if fparent2 == nullid or crev is None: # copied on remote side
1313 1314 if cfname in manifest2:
1314 1315 crev = manifest2[cfname]
1315 1316 newfparent = fparent1
1316 1317
1317 1318 # Here, we used to search backwards through history to try to find
1318 1319 # where the file copy came from if the source of a copy was not in
1319 1320 # the parent directory. However, this doesn't actually make sense to
1320 1321 # do (what does a copy from something not in your working copy even
1321 1322 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1322 1323 # the user that copy information was dropped, so if they didn't
1323 1324 # expect this outcome it can be fixed, but this is the correct
1324 1325 # behavior in this circumstance.
1325 1326
1326 1327 if crev:
1327 1328 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1328 1329 meta["copy"] = cfname
1329 1330 meta["copyrev"] = hex(crev)
1330 1331 fparent1, fparent2 = nullid, newfparent
1331 1332 else:
1332 1333 self.ui.warn(_("warning: can't find ancestor for '%s' "
1333 1334 "copied from '%s'!\n") % (fname, cfname))
1334 1335
1335 1336 elif fparent1 == nullid:
1336 1337 fparent1, fparent2 = fparent2, nullid
1337 1338 elif fparent2 != nullid:
1338 1339 # is one parent an ancestor of the other?
1339 1340 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1340 1341 if fparent1 in fparentancestors:
1341 1342 fparent1, fparent2 = fparent2, nullid
1342 1343 elif fparent2 in fparentancestors:
1343 1344 fparent2 = nullid
1344 1345
1345 1346 # is the file changed?
1346 1347 text = fctx.data()
1347 1348 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1348 1349 changelist.append(fname)
1349 1350 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1350 1351 # are just the flags changed during merge?
1351 1352 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1352 1353 changelist.append(fname)
1353 1354
1354 1355 return fparent1
1355 1356
1356 1357 @unfilteredmethod
1357 1358 def commit(self, text="", user=None, date=None, match=None, force=False,
1358 1359 editor=False, extra={}):
1359 1360 """Add a new revision to current repository.
1360 1361
1361 1362 Revision information is gathered from the working directory,
1362 1363 match can be used to filter the committed files. If editor is
1363 1364 supplied, it is called to get a commit message.
1364 1365 """
1365 1366
1366 1367 def fail(f, msg):
1367 1368 raise util.Abort('%s: %s' % (f, msg))
1368 1369
1369 1370 if not match:
1370 1371 match = matchmod.always(self.root, '')
1371 1372
1372 1373 if not force:
1373 1374 vdirs = []
1374 1375 match.explicitdir = vdirs.append
1375 1376 match.bad = fail
1376 1377
1377 1378 wlock = self.wlock()
1378 1379 try:
1379 1380 wctx = self[None]
1380 1381 merge = len(wctx.parents()) > 1
1381 1382
1382 1383 if not force and merge and not match.always():
1383 1384 raise util.Abort(_('cannot partially commit a merge '
1384 1385 '(do not specify files or patterns)'))
1385 1386
1386 1387 status = self.status(match=match, clean=force)
1387 1388 if force:
1388 1389 status.modified.extend(status.clean) # mq may commit clean files
1389 1390
1390 1391 # check subrepos
1391 1392 subs = []
1392 1393 commitsubs = set()
1393 1394 newstate = wctx.substate.copy()
1394 1395 # only manage subrepos and .hgsubstate if .hgsub is present
1395 1396 if '.hgsub' in wctx:
1396 1397 # we'll decide whether to track this ourselves, thanks
1397 1398 for c in status.modified, status.added, status.removed:
1398 1399 if '.hgsubstate' in c:
1399 1400 c.remove('.hgsubstate')
1400 1401
1401 1402 # compare current state to last committed state
1402 1403 # build new substate based on last committed state
1403 1404 oldstate = wctx.p1().substate
1404 1405 for s in sorted(newstate.keys()):
1405 1406 if not match(s):
1406 1407 # ignore working copy, use old state if present
1407 1408 if s in oldstate:
1408 1409 newstate[s] = oldstate[s]
1409 1410 continue
1410 1411 if not force:
1411 1412 raise util.Abort(
1412 1413 _("commit with new subrepo %s excluded") % s)
1413 1414 dirtyreason = wctx.sub(s).dirtyreason(True)
1414 1415 if dirtyreason:
1415 1416 if not self.ui.configbool('ui', 'commitsubrepos'):
1416 1417 raise util.Abort(dirtyreason,
1417 1418 hint=_("use --subrepos for recursive commit"))
1418 1419 subs.append(s)
1419 1420 commitsubs.add(s)
1420 1421 else:
1421 1422 bs = wctx.sub(s).basestate()
1422 1423 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1423 1424 if oldstate.get(s, (None, None, None))[1] != bs:
1424 1425 subs.append(s)
1425 1426
1426 1427 # check for removed subrepos
1427 1428 for p in wctx.parents():
1428 1429 r = [s for s in p.substate if s not in newstate]
1429 1430 subs += [s for s in r if match(s)]
1430 1431 if subs:
1431 1432 if (not match('.hgsub') and
1432 1433 '.hgsub' in (wctx.modified() + wctx.added())):
1433 1434 raise util.Abort(
1434 1435 _("can't commit subrepos without .hgsub"))
1435 1436 status.modified.insert(0, '.hgsubstate')
1436 1437
1437 1438 elif '.hgsub' in status.removed:
1438 1439 # clean up .hgsubstate when .hgsub is removed
1439 1440 if ('.hgsubstate' in wctx and
1440 1441 '.hgsubstate' not in (status.modified + status.added +
1441 1442 status.removed)):
1442 1443 status.removed.insert(0, '.hgsubstate')
1443 1444
1444 1445 # make sure all explicit patterns are matched
1445 1446 if not force and match.files():
1446 1447 matched = set(status.modified + status.added + status.removed)
1447 1448
1448 1449 for f in match.files():
1449 1450 f = self.dirstate.normalize(f)
1450 1451 if f == '.' or f in matched or f in wctx.substate:
1451 1452 continue
1452 1453 if f in status.deleted:
1453 1454 fail(f, _('file not found!'))
1454 1455 if f in vdirs: # visited directory
1455 1456 d = f + '/'
1456 1457 for mf in matched:
1457 1458 if mf.startswith(d):
1458 1459 break
1459 1460 else:
1460 1461 fail(f, _("no match under directory!"))
1461 1462 elif f not in self.dirstate:
1462 1463 fail(f, _("file not tracked!"))
1463 1464
1464 1465 cctx = context.workingcommitctx(self, status,
1465 1466 text, user, date, extra)
1466 1467
1467 1468 if (not force and not extra.get("close") and not merge
1468 1469 and not cctx.files()
1469 1470 and wctx.branch() == wctx.p1().branch()):
1470 1471 return None
1471 1472
1472 1473 if merge and cctx.deleted():
1473 1474 raise util.Abort(_("cannot commit merge with missing files"))
1474 1475
1475 1476 ms = mergemod.mergestate(self)
1476 1477 for f in status.modified:
1477 1478 if f in ms and ms[f] == 'u':
1478 1479 raise util.Abort(_('unresolved merge conflicts '
1479 1480 '(see "hg help resolve")'))
1480 1481
1481 1482 if editor:
1482 1483 cctx._text = editor(self, cctx, subs)
1483 1484 edited = (text != cctx._text)
1484 1485
1485 1486 # Save commit message in case this transaction gets rolled back
1486 1487 # (e.g. by a pretxncommit hook). Leave the content alone on
1487 1488 # the assumption that the user will use the same editor again.
1488 1489 msgfn = self.savecommitmessage(cctx._text)
1489 1490
1490 1491 # commit subs and write new state
1491 1492 if subs:
1492 1493 for s in sorted(commitsubs):
1493 1494 sub = wctx.sub(s)
1494 1495 self.ui.status(_('committing subrepository %s\n') %
1495 1496 subrepo.subrelpath(sub))
1496 1497 sr = sub.commit(cctx._text, user, date)
1497 1498 newstate[s] = (newstate[s][0], sr)
1498 1499 subrepo.writestate(self, newstate)
1499 1500
1500 1501 p1, p2 = self.dirstate.parents()
1501 1502 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1502 1503 try:
1503 1504 self.hook("precommit", throw=True, parent1=hookp1,
1504 1505 parent2=hookp2)
1505 1506 ret = self.commitctx(cctx, True)
1506 1507 except: # re-raises
1507 1508 if edited:
1508 1509 self.ui.write(
1509 1510 _('note: commit message saved in %s\n') % msgfn)
1510 1511 raise
1511 1512
1512 1513 # update bookmarks, dirstate and mergestate
1513 1514 bookmarks.update(self, [p1, p2], ret)
1514 1515 cctx.markcommitted(ret)
1515 1516 ms.reset()
1516 1517 finally:
1517 1518 wlock.release()
1518 1519
1519 1520 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1520 1521 # hack for command that use a temporary commit (eg: histedit)
1521 1522 # temporary commit got stripped before hook release
1522 1523 if node in self:
1523 1524 self.hook("commit", node=node, parent1=parent1,
1524 1525 parent2=parent2)
1525 1526 self._afterlock(commithook)
1526 1527 return ret
1527 1528
1528 1529 @unfilteredmethod
1529 1530 def commitctx(self, ctx, error=False):
1530 1531 """Add a new revision to current repository.
1531 1532 Revision information is passed via the context argument.
1532 1533 """
1533 1534
1534 1535 tr = None
1535 1536 p1, p2 = ctx.p1(), ctx.p2()
1536 1537 user = ctx.user()
1537 1538
1538 1539 lock = self.lock()
1539 1540 try:
1540 1541 tr = self.transaction("commit")
1541 1542 trp = weakref.proxy(tr)
1542 1543
1543 1544 if ctx.files():
1544 1545 m1 = p1.manifest()
1545 1546 m2 = p2.manifest()
1546 1547 m = m1.copy()
1547 1548
1548 1549 # check in files
1549 1550 added = []
1550 1551 changed = []
1551 1552 removed = list(ctx.removed())
1552 1553 linkrev = len(self)
1553 1554 self.ui.note(_("committing files:\n"))
1554 1555 for f in sorted(ctx.modified() + ctx.added()):
1555 1556 self.ui.note(f + "\n")
1556 1557 try:
1557 1558 fctx = ctx[f]
1558 1559 if fctx is None:
1559 1560 removed.append(f)
1560 1561 else:
1561 1562 added.append(f)
1562 1563 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1563 1564 trp, changed)
1564 1565 m.setflag(f, fctx.flags())
1565 1566 except OSError, inst:
1566 1567 self.ui.warn(_("trouble committing %s!\n") % f)
1567 1568 raise
1568 1569 except IOError, inst:
1569 1570 errcode = getattr(inst, 'errno', errno.ENOENT)
1570 1571 if error or errcode and errcode != errno.ENOENT:
1571 1572 self.ui.warn(_("trouble committing %s!\n") % f)
1572 1573 raise
1573 1574
1574 1575 # update manifest
1575 1576 self.ui.note(_("committing manifest\n"))
1576 1577 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1577 1578 drop = [f for f in removed if f in m]
1578 1579 for f in drop:
1579 1580 del m[f]
1580 1581 mn = self.manifest.add(m, trp, linkrev,
1581 1582 p1.manifestnode(), p2.manifestnode(),
1582 1583 added, drop)
1583 1584 files = changed + removed
1584 1585 else:
1585 1586 mn = p1.manifestnode()
1586 1587 files = []
1587 1588
1588 1589 # update changelog
1589 1590 self.ui.note(_("committing changelog\n"))
1590 1591 self.changelog.delayupdate(tr)
1591 1592 n = self.changelog.add(mn, files, ctx.description(),
1592 1593 trp, p1.node(), p2.node(),
1593 1594 user, ctx.date(), ctx.extra().copy())
1594 1595 p = lambda: tr.writepending() and self.root or ""
1595 1596 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1596 1597 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1597 1598 parent2=xp2, pending=p)
1598 1599 # set the new commit is proper phase
1599 1600 targetphase = subrepo.newcommitphase(self.ui, ctx)
1600 1601 if targetphase:
1601 1602 # retract boundary do not alter parent changeset.
1602 1603 # if a parent have higher the resulting phase will
1603 1604 # be compliant anyway
1604 1605 #
1605 1606 # if minimal phase was 0 we don't need to retract anything
1606 1607 phases.retractboundary(self, tr, targetphase, [n])
1607 1608 tr.close()
1608 1609 branchmap.updatecache(self.filtered('served'))
1609 1610 return n
1610 1611 finally:
1611 1612 if tr:
1612 1613 tr.release()
1613 1614 lock.release()
1614 1615
1615 1616 @unfilteredmethod
1616 1617 def destroying(self):
1617 1618 '''Inform the repository that nodes are about to be destroyed.
1618 1619 Intended for use by strip and rollback, so there's a common
1619 1620 place for anything that has to be done before destroying history.
1620 1621
1621 1622 This is mostly useful for saving state that is in memory and waiting
1622 1623 to be flushed when the current lock is released. Because a call to
1623 1624 destroyed is imminent, the repo will be invalidated causing those
1624 1625 changes to stay in memory (waiting for the next unlock), or vanish
1625 1626 completely.
1626 1627 '''
1627 1628 # When using the same lock to commit and strip, the phasecache is left
1628 1629 # dirty after committing. Then when we strip, the repo is invalidated,
1629 1630 # causing those changes to disappear.
1630 1631 if '_phasecache' in vars(self):
1631 1632 self._phasecache.write()
1632 1633
1633 1634 @unfilteredmethod
1634 1635 def destroyed(self):
1635 1636 '''Inform the repository that nodes have been destroyed.
1636 1637 Intended for use by strip and rollback, so there's a common
1637 1638 place for anything that has to be done after destroying history.
1638 1639 '''
1639 1640 # When one tries to:
1640 1641 # 1) destroy nodes thus calling this method (e.g. strip)
1641 1642 # 2) use phasecache somewhere (e.g. commit)
1642 1643 #
1643 1644 # then 2) will fail because the phasecache contains nodes that were
1644 1645 # removed. We can either remove phasecache from the filecache,
1645 1646 # causing it to reload next time it is accessed, or simply filter
1646 1647 # the removed nodes now and write the updated cache.
1647 1648 self._phasecache.filterunknown(self)
1648 1649 self._phasecache.write()
1649 1650
1650 1651 # update the 'served' branch cache to help read only server process
1651 1652 # Thanks to branchcache collaboration this is done from the nearest
1652 1653 # filtered subset and it is expected to be fast.
1653 1654 branchmap.updatecache(self.filtered('served'))
1654 1655
1655 1656 # Ensure the persistent tag cache is updated. Doing it now
1656 1657 # means that the tag cache only has to worry about destroyed
1657 1658 # heads immediately after a strip/rollback. That in turn
1658 1659 # guarantees that "cachetip == currenttip" (comparing both rev
1659 1660 # and node) always means no nodes have been added or destroyed.
1660 1661
1661 1662 # XXX this is suboptimal when qrefresh'ing: we strip the current
1662 1663 # head, refresh the tag cache, then immediately add a new head.
1663 1664 # But I think doing it this way is necessary for the "instant
1664 1665 # tag cache retrieval" case to work.
1665 1666 self.invalidate()
1666 1667
1667 1668 def walk(self, match, node=None):
1668 1669 '''
1669 1670 walk recursively through the directory tree or a given
1670 1671 changeset, finding all files matched by the match
1671 1672 function
1672 1673 '''
1673 1674 return self[node].walk(match)
1674 1675
1675 1676 def status(self, node1='.', node2=None, match=None,
1676 1677 ignored=False, clean=False, unknown=False,
1677 1678 listsubrepos=False):
1678 1679 '''a convenience method that calls node1.status(node2)'''
1679 1680 return self[node1].status(node2, match, ignored, clean, unknown,
1680 1681 listsubrepos)
1681 1682
1682 1683 def heads(self, start=None):
1683 1684 heads = self.changelog.heads(start)
1684 1685 # sort the output in rev descending order
1685 1686 return sorted(heads, key=self.changelog.rev, reverse=True)
1686 1687
1687 1688 def branchheads(self, branch=None, start=None, closed=False):
1688 1689 '''return a (possibly filtered) list of heads for the given branch
1689 1690
1690 1691 Heads are returned in topological order, from newest to oldest.
1691 1692 If branch is None, use the dirstate branch.
1692 1693 If start is not None, return only heads reachable from start.
1693 1694 If closed is True, return heads that are marked as closed as well.
1694 1695 '''
1695 1696 if branch is None:
1696 1697 branch = self[None].branch()
1697 1698 branches = self.branchmap()
1698 1699 if branch not in branches:
1699 1700 return []
1700 1701 # the cache returns heads ordered lowest to highest
1701 1702 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1702 1703 if start is not None:
1703 1704 # filter out the heads that cannot be reached from startrev
1704 1705 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1705 1706 bheads = [h for h in bheads if h in fbheads]
1706 1707 return bheads
1707 1708
1708 1709 def branches(self, nodes):
1709 1710 if not nodes:
1710 1711 nodes = [self.changelog.tip()]
1711 1712 b = []
1712 1713 for n in nodes:
1713 1714 t = n
1714 1715 while True:
1715 1716 p = self.changelog.parents(n)
1716 1717 if p[1] != nullid or p[0] == nullid:
1717 1718 b.append((t, n, p[0], p[1]))
1718 1719 break
1719 1720 n = p[0]
1720 1721 return b
1721 1722
1722 1723 def between(self, pairs):
1723 1724 r = []
1724 1725
1725 1726 for top, bottom in pairs:
1726 1727 n, l, i = top, [], 0
1727 1728 f = 1
1728 1729
1729 1730 while n != bottom and n != nullid:
1730 1731 p = self.changelog.parents(n)[0]
1731 1732 if i == f:
1732 1733 l.append(n)
1733 1734 f = f * 2
1734 1735 n = p
1735 1736 i += 1
1736 1737
1737 1738 r.append(l)
1738 1739
1739 1740 return r
1740 1741
1741 1742 def checkpush(self, pushop):
1742 1743 """Extensions can override this function if additional checks have
1743 1744 to be performed before pushing, or call it if they override push
1744 1745 command.
1745 1746 """
1746 1747 pass
1747 1748
1748 1749 @unfilteredpropertycache
1749 1750 def prepushoutgoinghooks(self):
1750 1751 """Return util.hooks consists of "(repo, remote, outgoing)"
1751 1752 functions, which are called before pushing changesets.
1752 1753 """
1753 1754 return util.hooks()
1754 1755
1755 1756 def stream_in(self, remote, remotereqs):
1756 1757 lock = self.lock()
1757 1758 try:
1758 1759 # Save remote branchmap. We will use it later
1759 1760 # to speed up branchcache creation
1760 1761 rbranchmap = None
1761 1762 if remote.capable("branchmap"):
1762 1763 rbranchmap = remote.branchmap()
1763 1764
1764 1765 fp = remote.stream_out()
1765 1766 l = fp.readline()
1766 1767 try:
1767 1768 resp = int(l)
1768 1769 except ValueError:
1769 1770 raise error.ResponseError(
1770 1771 _('unexpected response from remote server:'), l)
1771 1772 if resp == 1:
1772 1773 raise util.Abort(_('operation forbidden by server'))
1773 1774 elif resp == 2:
1774 1775 raise util.Abort(_('locking the remote repository failed'))
1775 1776 elif resp != 0:
1776 1777 raise util.Abort(_('the server sent an unknown error code'))
1777 1778 self.ui.status(_('streaming all changes\n'))
1778 1779 l = fp.readline()
1779 1780 try:
1780 1781 total_files, total_bytes = map(int, l.split(' ', 1))
1781 1782 except (ValueError, TypeError):
1782 1783 raise error.ResponseError(
1783 1784 _('unexpected response from remote server:'), l)
1784 1785 self.ui.status(_('%d files to transfer, %s of data\n') %
1785 1786 (total_files, util.bytecount(total_bytes)))
1786 1787 handled_bytes = 0
1787 1788 self.ui.progress(_('clone'), 0, total=total_bytes)
1788 1789 start = time.time()
1789 1790
1790 1791 tr = self.transaction(_('clone'))
1791 1792 try:
1792 1793 for i in xrange(total_files):
1793 1794 # XXX doesn't support '\n' or '\r' in filenames
1794 1795 l = fp.readline()
1795 1796 try:
1796 1797 name, size = l.split('\0', 1)
1797 1798 size = int(size)
1798 1799 except (ValueError, TypeError):
1799 1800 raise error.ResponseError(
1800 1801 _('unexpected response from remote server:'), l)
1801 1802 if self.ui.debugflag:
1802 1803 self.ui.debug('adding %s (%s)\n' %
1803 1804 (name, util.bytecount(size)))
1804 1805 # for backwards compat, name was partially encoded
1805 1806 ofp = self.svfs(store.decodedir(name), 'w')
1806 1807 for chunk in util.filechunkiter(fp, limit=size):
1807 1808 handled_bytes += len(chunk)
1808 1809 self.ui.progress(_('clone'), handled_bytes,
1809 1810 total=total_bytes)
1810 1811 ofp.write(chunk)
1811 1812 ofp.close()
1812 1813 tr.close()
1813 1814 finally:
1814 1815 tr.release()
1815 1816
1816 1817 # Writing straight to files circumvented the inmemory caches
1817 1818 self.invalidate()
1818 1819
1819 1820 elapsed = time.time() - start
1820 1821 if elapsed <= 0:
1821 1822 elapsed = 0.001
1822 1823 self.ui.progress(_('clone'), None)
1823 1824 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1824 1825 (util.bytecount(total_bytes), elapsed,
1825 1826 util.bytecount(total_bytes / elapsed)))
1826 1827
1827 1828 # new requirements = old non-format requirements +
1828 1829 # new format-related remote requirements
1829 1830 # requirements from the streamed-in repository
1830 1831 self.requirements = remotereqs | (
1831 1832 self.requirements - self.supportedformats)
1832 1833 self._applyopenerreqs()
1833 1834 self._writerequirements()
1834 1835
1835 1836 if rbranchmap:
1836 1837 rbheads = []
1837 1838 closed = []
1838 1839 for bheads in rbranchmap.itervalues():
1839 1840 rbheads.extend(bheads)
1840 1841 for h in bheads:
1841 1842 r = self.changelog.rev(h)
1842 1843 b, c = self.changelog.branchinfo(r)
1843 1844 if c:
1844 1845 closed.append(h)
1845 1846
1846 1847 if rbheads:
1847 1848 rtiprev = max((int(self.changelog.rev(node))
1848 1849 for node in rbheads))
1849 1850 cache = branchmap.branchcache(rbranchmap,
1850 1851 self[rtiprev].node(),
1851 1852 rtiprev,
1852 1853 closednodes=closed)
1853 1854 # Try to stick it as low as possible
1854 1855 # filter above served are unlikely to be fetch from a clone
1855 1856 for candidate in ('base', 'immutable', 'served'):
1856 1857 rview = self.filtered(candidate)
1857 1858 if cache.validfor(rview):
1858 1859 self._branchcaches[candidate] = cache
1859 1860 cache.write(rview)
1860 1861 break
1861 1862 self.invalidate()
1862 1863 return len(self.heads()) + 1
1863 1864 finally:
1864 1865 lock.release()
1865 1866
1866 1867 def clone(self, remote, heads=[], stream=None):
1867 1868 '''clone remote repository.
1868 1869
1869 1870 keyword arguments:
1870 1871 heads: list of revs to clone (forces use of pull)
1871 1872 stream: use streaming clone if possible'''
1872 1873
1873 1874 # now, all clients that can request uncompressed clones can
1874 1875 # read repo formats supported by all servers that can serve
1875 1876 # them.
1876 1877
1877 1878 # if revlog format changes, client will have to check version
1878 1879 # and format flags on "stream" capability, and use
1879 1880 # uncompressed only if compatible.
1880 1881
1881 1882 if stream is None:
1882 1883 # if the server explicitly prefers to stream (for fast LANs)
1883 1884 stream = remote.capable('stream-preferred')
1884 1885
1885 1886 if stream and not heads:
1886 1887 # 'stream' means remote revlog format is revlogv1 only
1887 1888 if remote.capable('stream'):
1888 1889 self.stream_in(remote, set(('revlogv1',)))
1889 1890 else:
1890 1891 # otherwise, 'streamreqs' contains the remote revlog format
1891 1892 streamreqs = remote.capable('streamreqs')
1892 1893 if streamreqs:
1893 1894 streamreqs = set(streamreqs.split(','))
1894 1895 # if we support it, stream in and adjust our requirements
1895 1896 if not streamreqs - self.supportedformats:
1896 1897 self.stream_in(remote, streamreqs)
1897 1898
1898 1899 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1899 1900 try:
1900 1901 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1901 1902 ret = exchange.pull(self, remote, heads).cgresult
1902 1903 finally:
1903 1904 self.ui.restoreconfig(quiet)
1904 1905 return ret
1905 1906
1906 1907 def pushkey(self, namespace, key, old, new):
1907 1908 try:
1908 1909 tr = self.currenttransaction()
1909 1910 hookargs = {}
1910 1911 if tr is not None:
1911 1912 hookargs.update(tr.hookargs)
1912 1913 pending = lambda: tr.writepending() and self.root or ""
1913 1914 hookargs['pending'] = pending
1914 1915 hookargs['namespace'] = namespace
1915 1916 hookargs['key'] = key
1916 1917 hookargs['old'] = old
1917 1918 hookargs['new'] = new
1918 1919 self.hook('prepushkey', throw=True, **hookargs)
1919 1920 except error.HookAbort, exc:
1920 1921 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1921 1922 if exc.hint:
1922 1923 self.ui.write_err(_("(%s)\n") % exc.hint)
1923 1924 return False
1924 1925 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1925 1926 ret = pushkey.push(self, namespace, key, old, new)
1926 1927 def runhook():
1927 1928 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1928 1929 ret=ret)
1929 1930 self._afterlock(runhook)
1930 1931 return ret
1931 1932
1932 1933 def listkeys(self, namespace):
1933 1934 self.hook('prelistkeys', throw=True, namespace=namespace)
1934 1935 self.ui.debug('listing keys for "%s"\n' % namespace)
1935 1936 values = pushkey.list(self, namespace)
1936 1937 self.hook('listkeys', namespace=namespace, values=values)
1937 1938 return values
1938 1939
1939 1940 def debugwireargs(self, one, two, three=None, four=None, five=None):
1940 1941 '''used to test argument passing over the wire'''
1941 1942 return "%s %s %s %s %s" % (one, two, three, four, five)
1942 1943
1943 1944 def savecommitmessage(self, text):
1944 1945 fp = self.vfs('last-message.txt', 'wb')
1945 1946 try:
1946 1947 fp.write(text)
1947 1948 finally:
1948 1949 fp.close()
1949 1950 return self.pathto(fp.name[len(self.root) + 1:])
1950 1951
1951 1952 # used to avoid circular references so destructors work
1952 1953 def aftertrans(files):
1953 1954 renamefiles = [tuple(t) for t in files]
1954 1955 def a():
1955 1956 for vfs, src, dest in renamefiles:
1956 1957 try:
1957 1958 vfs.rename(src, dest)
1958 1959 except OSError: # journal file does not yet exist
1959 1960 pass
1960 1961 return a
1961 1962
1962 1963 def undoname(fn):
1963 1964 base, name = os.path.split(fn)
1964 1965 assert name.startswith('journal')
1965 1966 return os.path.join(base, name.replace('journal', 'undo', 1))
1966 1967
1967 1968 def instance(ui, path, create):
1968 1969 return localrepository(ui, util.urllocalpath(path), create)
1969 1970
1970 1971 def islocal(path):
1971 1972 return True
General Comments 0
You need to be logged in to leave comments. Login now