##// END OF EJS Templates
localrepo: rename requirements parameter in stream_in()...
Drew Gottlieb -
r24917:71a738a6 default
parent child Browse files
Show More
@@ -1,1971 +1,1971 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception, exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced, exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return changegroup.addchangegroup(self._repo, cg, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
196 196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 197 'dotencode'))
198 198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
199 199 filtername = None
200 200
201 201 # a list of (ui, featureset) functions.
202 202 # only functions defined in module of enabled extensions are invoked
203 203 featuresetupfuncs = set()
204 204
205 205 def _baserequirements(self, create):
206 206 return ['revlogv1']
207 207
208 208 def __init__(self, baseui, path=None, create=False):
209 209 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
210 210 self.wopener = self.wvfs
211 211 self.root = self.wvfs.base
212 212 self.path = self.wvfs.join(".hg")
213 213 self.origroot = path
214 214 self.auditor = pathutil.pathauditor(self.root, self._checknested)
215 215 self.vfs = scmutil.vfs(self.path)
216 216 self.opener = self.vfs
217 217 self.baseui = baseui
218 218 self.ui = baseui.copy()
219 219 self.ui.copy = baseui.copy # prevent copying repo configuration
220 220 # A list of callback to shape the phase if no data were found.
221 221 # Callback are in the form: func(repo, roots) --> processed root.
222 222 # This list it to be filled by extension during repo setup
223 223 self._phasedefaults = []
224 224 try:
225 225 self.ui.readconfig(self.join("hgrc"), self.root)
226 226 extensions.loadall(self.ui)
227 227 except IOError:
228 228 pass
229 229
230 230 if self.featuresetupfuncs:
231 231 self.supported = set(self._basesupported) # use private copy
232 232 extmods = set(m.__name__ for n, m
233 233 in extensions.extensions(self.ui))
234 234 for setupfunc in self.featuresetupfuncs:
235 235 if setupfunc.__module__ in extmods:
236 236 setupfunc(self.ui, self.supported)
237 237 else:
238 238 self.supported = self._basesupported
239 239
240 240 if not self.vfs.isdir():
241 241 if create:
242 242 if not self.wvfs.exists():
243 243 self.wvfs.makedirs()
244 244 self.vfs.makedir(notindexed=True)
245 245 requirements = set(self._baserequirements(create))
246 246 if self.ui.configbool('format', 'usestore', True):
247 247 self.vfs.mkdir("store")
248 248 requirements.add("store")
249 249 if self.ui.configbool('format', 'usefncache', True):
250 250 requirements.add("fncache")
251 251 if self.ui.configbool('format', 'dotencode', True):
252 252 requirements.add('dotencode')
253 253 # create an invalid changelog
254 254 self.vfs.append(
255 255 "00changelog.i",
256 256 '\0\0\0\2' # represents revlogv2
257 257 ' dummy changelog to prevent using the old repo layout'
258 258 )
259 259 if self.ui.configbool('format', 'generaldelta', False):
260 260 requirements.add("generaldelta")
261 261 if self.ui.configbool('experimental', 'manifestv2', False):
262 262 requirements.add("manifestv2")
263 263 else:
264 264 raise error.RepoError(_("repository %s not found") % path)
265 265 elif create:
266 266 raise error.RepoError(_("repository %s already exists") % path)
267 267 else:
268 268 try:
269 269 requirements = scmutil.readrequires(self.vfs, self.supported)
270 270 except IOError, inst:
271 271 if inst.errno != errno.ENOENT:
272 272 raise
273 273 requirements = set()
274 274
275 275 self.sharedpath = self.path
276 276 try:
277 277 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
278 278 realpath=True)
279 279 s = vfs.base
280 280 if not vfs.exists():
281 281 raise error.RepoError(
282 282 _('.hg/sharedpath points to nonexistent directory %s') % s)
283 283 self.sharedpath = s
284 284 except IOError, inst:
285 285 if inst.errno != errno.ENOENT:
286 286 raise
287 287
288 288 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
289 289 self.spath = self.store.path
290 290 self.svfs = self.store.vfs
291 291 self.sopener = self.svfs
292 292 self.sjoin = self.store.join
293 293 self.vfs.createmode = self.store.createmode
294 294 self.requirements = requirements
295 295 self._applyopenerreqs()
296 296 if create:
297 297 self._writerequirements()
298 298
299 299
300 300 self._branchcaches = {}
301 301 self._revbranchcache = None
302 302 self.filterpats = {}
303 303 self._datafilters = {}
304 304 self._transref = self._lockref = self._wlockref = None
305 305
306 306 # A cache for various files under .hg/ that tracks file changes,
307 307 # (used by the filecache decorator)
308 308 #
309 309 # Maps a property name to its util.filecacheentry
310 310 self._filecache = {}
311 311
312 312 # hold sets of revision to be filtered
313 313 # should be cleared when something might have changed the filter value:
314 314 # - new changesets,
315 315 # - phase change,
316 316 # - new obsolescence marker,
317 317 # - working directory parent change,
318 318 # - bookmark changes
319 319 self.filteredrevcache = {}
320 320
321 321 # generic mapping between names and nodes
322 322 self.names = namespaces.namespaces()
323 323
324 324 def close(self):
325 325 self._writecaches()
326 326
327 327 def _writecaches(self):
328 328 if self._revbranchcache:
329 329 self._revbranchcache.write()
330 330
331 331 def _restrictcapabilities(self, caps):
332 332 if self.ui.configbool('experimental', 'bundle2-advertise', True):
333 333 caps = set(caps)
334 334 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
335 335 caps.add('bundle2=' + urllib.quote(capsblob))
336 336 return caps
337 337
338 338 def _applyopenerreqs(self):
339 339 self.svfs.options = dict((r, 1) for r in self.requirements
340 340 if r in self.openerreqs)
341 341 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
342 342 if chunkcachesize is not None:
343 343 self.svfs.options['chunkcachesize'] = chunkcachesize
344 344 maxchainlen = self.ui.configint('format', 'maxchainlen')
345 345 if maxchainlen is not None:
346 346 self.svfs.options['maxchainlen'] = maxchainlen
347 347 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
348 348 if manifestcachesize is not None:
349 349 self.svfs.options['manifestcachesize'] = manifestcachesize
350 350 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
351 351 if usetreemanifest is not None:
352 352 self.svfs.options['usetreemanifest'] = usetreemanifest
353 353
354 354 def _writerequirements(self):
355 355 reqfile = self.vfs("requires", "w")
356 356 for r in sorted(self.requirements):
357 357 reqfile.write("%s\n" % r)
358 358 reqfile.close()
359 359
360 360 def _checknested(self, path):
361 361 """Determine if path is a legal nested repository."""
362 362 if not path.startswith(self.root):
363 363 return False
364 364 subpath = path[len(self.root) + 1:]
365 365 normsubpath = util.pconvert(subpath)
366 366
367 367 # XXX: Checking against the current working copy is wrong in
368 368 # the sense that it can reject things like
369 369 #
370 370 # $ hg cat -r 10 sub/x.txt
371 371 #
372 372 # if sub/ is no longer a subrepository in the working copy
373 373 # parent revision.
374 374 #
375 375 # However, it can of course also allow things that would have
376 376 # been rejected before, such as the above cat command if sub/
377 377 # is a subrepository now, but was a normal directory before.
378 378 # The old path auditor would have rejected by mistake since it
379 379 # panics when it sees sub/.hg/.
380 380 #
381 381 # All in all, checking against the working copy seems sensible
382 382 # since we want to prevent access to nested repositories on
383 383 # the filesystem *now*.
384 384 ctx = self[None]
385 385 parts = util.splitpath(subpath)
386 386 while parts:
387 387 prefix = '/'.join(parts)
388 388 if prefix in ctx.substate:
389 389 if prefix == normsubpath:
390 390 return True
391 391 else:
392 392 sub = ctx.sub(prefix)
393 393 return sub.checknested(subpath[len(prefix) + 1:])
394 394 else:
395 395 parts.pop()
396 396 return False
397 397
398 398 def peer(self):
399 399 return localpeer(self) # not cached to avoid reference cycle
400 400
401 401 def unfiltered(self):
402 402 """Return unfiltered version of the repository
403 403
404 404 Intended to be overwritten by filtered repo."""
405 405 return self
406 406
407 407 def filtered(self, name):
408 408 """Return a filtered version of a repository"""
409 409 # build a new class with the mixin and the current class
410 410 # (possibly subclass of the repo)
411 411 class proxycls(repoview.repoview, self.unfiltered().__class__):
412 412 pass
413 413 return proxycls(self, name)
414 414
415 415 @repofilecache('bookmarks')
416 416 def _bookmarks(self):
417 417 return bookmarks.bmstore(self)
418 418
419 419 @repofilecache('bookmarks.current')
420 420 def _bookmarkcurrent(self):
421 421 return bookmarks.readcurrent(self)
422 422
423 423 def bookmarkheads(self, bookmark):
424 424 name = bookmark.split('@', 1)[0]
425 425 heads = []
426 426 for mark, n in self._bookmarks.iteritems():
427 427 if mark.split('@', 1)[0] == name:
428 428 heads.append(n)
429 429 return heads
430 430
431 431 @storecache('phaseroots')
432 432 def _phasecache(self):
433 433 return phases.phasecache(self, self._phasedefaults)
434 434
435 435 @storecache('obsstore')
436 436 def obsstore(self):
437 437 # read default format for new obsstore.
438 438 defaultformat = self.ui.configint('format', 'obsstore-version', None)
439 439 # rely on obsstore class default when possible.
440 440 kwargs = {}
441 441 if defaultformat is not None:
442 442 kwargs['defaultformat'] = defaultformat
443 443 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
444 444 store = obsolete.obsstore(self.svfs, readonly=readonly,
445 445 **kwargs)
446 446 if store and readonly:
447 447 self.ui.warn(
448 448 _('obsolete feature not enabled but %i markers found!\n')
449 449 % len(list(store)))
450 450 return store
451 451
452 452 @storecache('00changelog.i')
453 453 def changelog(self):
454 454 c = changelog.changelog(self.svfs)
455 455 if 'HG_PENDING' in os.environ:
456 456 p = os.environ['HG_PENDING']
457 457 if p.startswith(self.root):
458 458 c.readpending('00changelog.i.a')
459 459 return c
460 460
461 461 @storecache('00manifest.i')
462 462 def manifest(self):
463 463 return manifest.manifest(self.svfs)
464 464
465 465 @repofilecache('dirstate')
466 466 def dirstate(self):
467 467 warned = [0]
468 468 def validate(node):
469 469 try:
470 470 self.changelog.rev(node)
471 471 return node
472 472 except error.LookupError:
473 473 if not warned[0]:
474 474 warned[0] = True
475 475 self.ui.warn(_("warning: ignoring unknown"
476 476 " working parent %s!\n") % short(node))
477 477 return nullid
478 478
479 479 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
480 480
481 481 def __getitem__(self, changeid):
482 482 if changeid is None:
483 483 return context.workingctx(self)
484 484 if isinstance(changeid, slice):
485 485 return [context.changectx(self, i)
486 486 for i in xrange(*changeid.indices(len(self)))
487 487 if i not in self.changelog.filteredrevs]
488 488 return context.changectx(self, changeid)
489 489
490 490 def __contains__(self, changeid):
491 491 try:
492 492 self[changeid]
493 493 return True
494 494 except error.RepoLookupError:
495 495 return False
496 496
497 497 def __nonzero__(self):
498 498 return True
499 499
500 500 def __len__(self):
501 501 return len(self.changelog)
502 502
503 503 def __iter__(self):
504 504 return iter(self.changelog)
505 505
506 506 def revs(self, expr, *args):
507 507 '''Return a list of revisions matching the given revset'''
508 508 expr = revset.formatspec(expr, *args)
509 509 m = revset.match(None, expr)
510 510 return m(self)
511 511
512 512 def set(self, expr, *args):
513 513 '''
514 514 Yield a context for each matching revision, after doing arg
515 515 replacement via revset.formatspec
516 516 '''
517 517 for r in self.revs(expr, *args):
518 518 yield self[r]
519 519
520 520 def url(self):
521 521 return 'file:' + self.root
522 522
523 523 def hook(self, name, throw=False, **args):
524 524 """Call a hook, passing this repo instance.
525 525
526 526 This a convenience method to aid invoking hooks. Extensions likely
527 527 won't call this unless they have registered a custom hook or are
528 528 replacing code that is expected to call a hook.
529 529 """
530 530 return hook.hook(self.ui, self, name, throw, **args)
531 531
532 532 @unfilteredmethod
533 533 def _tag(self, names, node, message, local, user, date, extra={},
534 534 editor=False):
535 535 if isinstance(names, str):
536 536 names = (names,)
537 537
538 538 branches = self.branchmap()
539 539 for name in names:
540 540 self.hook('pretag', throw=True, node=hex(node), tag=name,
541 541 local=local)
542 542 if name in branches:
543 543 self.ui.warn(_("warning: tag %s conflicts with existing"
544 544 " branch name\n") % name)
545 545
546 546 def writetags(fp, names, munge, prevtags):
547 547 fp.seek(0, 2)
548 548 if prevtags and prevtags[-1] != '\n':
549 549 fp.write('\n')
550 550 for name in names:
551 551 if munge:
552 552 m = munge(name)
553 553 else:
554 554 m = name
555 555
556 556 if (self._tagscache.tagtypes and
557 557 name in self._tagscache.tagtypes):
558 558 old = self.tags().get(name, nullid)
559 559 fp.write('%s %s\n' % (hex(old), m))
560 560 fp.write('%s %s\n' % (hex(node), m))
561 561 fp.close()
562 562
563 563 prevtags = ''
564 564 if local:
565 565 try:
566 566 fp = self.vfs('localtags', 'r+')
567 567 except IOError:
568 568 fp = self.vfs('localtags', 'a')
569 569 else:
570 570 prevtags = fp.read()
571 571
572 572 # local tags are stored in the current charset
573 573 writetags(fp, names, None, prevtags)
574 574 for name in names:
575 575 self.hook('tag', node=hex(node), tag=name, local=local)
576 576 return
577 577
578 578 try:
579 579 fp = self.wfile('.hgtags', 'rb+')
580 580 except IOError, e:
581 581 if e.errno != errno.ENOENT:
582 582 raise
583 583 fp = self.wfile('.hgtags', 'ab')
584 584 else:
585 585 prevtags = fp.read()
586 586
587 587 # committed tags are stored in UTF-8
588 588 writetags(fp, names, encoding.fromlocal, prevtags)
589 589
590 590 fp.close()
591 591
592 592 self.invalidatecaches()
593 593
594 594 if '.hgtags' not in self.dirstate:
595 595 self[None].add(['.hgtags'])
596 596
597 597 m = matchmod.exact(self.root, '', ['.hgtags'])
598 598 tagnode = self.commit(message, user, date, extra=extra, match=m,
599 599 editor=editor)
600 600
601 601 for name in names:
602 602 self.hook('tag', node=hex(node), tag=name, local=local)
603 603
604 604 return tagnode
605 605
606 606 def tag(self, names, node, message, local, user, date, editor=False):
607 607 '''tag a revision with one or more symbolic names.
608 608
609 609 names is a list of strings or, when adding a single tag, names may be a
610 610 string.
611 611
612 612 if local is True, the tags are stored in a per-repository file.
613 613 otherwise, they are stored in the .hgtags file, and a new
614 614 changeset is committed with the change.
615 615
616 616 keyword arguments:
617 617
618 618 local: whether to store tags in non-version-controlled file
619 619 (default False)
620 620
621 621 message: commit message to use if committing
622 622
623 623 user: name of user to use if committing
624 624
625 625 date: date tuple to use if committing'''
626 626
627 627 if not local:
628 628 m = matchmod.exact(self.root, '', ['.hgtags'])
629 629 if util.any(self.status(match=m, unknown=True, ignored=True)):
630 630 raise util.Abort(_('working copy of .hgtags is changed'),
631 631 hint=_('please commit .hgtags manually'))
632 632
633 633 self.tags() # instantiate the cache
634 634 self._tag(names, node, message, local, user, date, editor=editor)
635 635
636 636 @filteredpropertycache
637 637 def _tagscache(self):
638 638 '''Returns a tagscache object that contains various tags related
639 639 caches.'''
640 640
641 641 # This simplifies its cache management by having one decorated
642 642 # function (this one) and the rest simply fetch things from it.
643 643 class tagscache(object):
644 644 def __init__(self):
645 645 # These two define the set of tags for this repository. tags
646 646 # maps tag name to node; tagtypes maps tag name to 'global' or
647 647 # 'local'. (Global tags are defined by .hgtags across all
648 648 # heads, and local tags are defined in .hg/localtags.)
649 649 # They constitute the in-memory cache of tags.
650 650 self.tags = self.tagtypes = None
651 651
652 652 self.nodetagscache = self.tagslist = None
653 653
654 654 cache = tagscache()
655 655 cache.tags, cache.tagtypes = self._findtags()
656 656
657 657 return cache
658 658
659 659 def tags(self):
660 660 '''return a mapping of tag to node'''
661 661 t = {}
662 662 if self.changelog.filteredrevs:
663 663 tags, tt = self._findtags()
664 664 else:
665 665 tags = self._tagscache.tags
666 666 for k, v in tags.iteritems():
667 667 try:
668 668 # ignore tags to unknown nodes
669 669 self.changelog.rev(v)
670 670 t[k] = v
671 671 except (error.LookupError, ValueError):
672 672 pass
673 673 return t
674 674
675 675 def _findtags(self):
676 676 '''Do the hard work of finding tags. Return a pair of dicts
677 677 (tags, tagtypes) where tags maps tag name to node, and tagtypes
678 678 maps tag name to a string like \'global\' or \'local\'.
679 679 Subclasses or extensions are free to add their own tags, but
680 680 should be aware that the returned dicts will be retained for the
681 681 duration of the localrepo object.'''
682 682
683 683 # XXX what tagtype should subclasses/extensions use? Currently
684 684 # mq and bookmarks add tags, but do not set the tagtype at all.
685 685 # Should each extension invent its own tag type? Should there
686 686 # be one tagtype for all such "virtual" tags? Or is the status
687 687 # quo fine?
688 688
689 689 alltags = {} # map tag name to (node, hist)
690 690 tagtypes = {}
691 691
692 692 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
693 693 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
694 694
695 695 # Build the return dicts. Have to re-encode tag names because
696 696 # the tags module always uses UTF-8 (in order not to lose info
697 697 # writing to the cache), but the rest of Mercurial wants them in
698 698 # local encoding.
699 699 tags = {}
700 700 for (name, (node, hist)) in alltags.iteritems():
701 701 if node != nullid:
702 702 tags[encoding.tolocal(name)] = node
703 703 tags['tip'] = self.changelog.tip()
704 704 tagtypes = dict([(encoding.tolocal(name), value)
705 705 for (name, value) in tagtypes.iteritems()])
706 706 return (tags, tagtypes)
707 707
708 708 def tagtype(self, tagname):
709 709 '''
710 710 return the type of the given tag. result can be:
711 711
712 712 'local' : a local tag
713 713 'global' : a global tag
714 714 None : tag does not exist
715 715 '''
716 716
717 717 return self._tagscache.tagtypes.get(tagname)
718 718
719 719 def tagslist(self):
720 720 '''return a list of tags ordered by revision'''
721 721 if not self._tagscache.tagslist:
722 722 l = []
723 723 for t, n in self.tags().iteritems():
724 724 l.append((self.changelog.rev(n), t, n))
725 725 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
726 726
727 727 return self._tagscache.tagslist
728 728
729 729 def nodetags(self, node):
730 730 '''return the tags associated with a node'''
731 731 if not self._tagscache.nodetagscache:
732 732 nodetagscache = {}
733 733 for t, n in self._tagscache.tags.iteritems():
734 734 nodetagscache.setdefault(n, []).append(t)
735 735 for tags in nodetagscache.itervalues():
736 736 tags.sort()
737 737 self._tagscache.nodetagscache = nodetagscache
738 738 return self._tagscache.nodetagscache.get(node, [])
739 739
740 740 def nodebookmarks(self, node):
741 741 marks = []
742 742 for bookmark, n in self._bookmarks.iteritems():
743 743 if n == node:
744 744 marks.append(bookmark)
745 745 return sorted(marks)
746 746
747 747 def branchmap(self):
748 748 '''returns a dictionary {branch: [branchheads]} with branchheads
749 749 ordered by increasing revision number'''
750 750 branchmap.updatecache(self)
751 751 return self._branchcaches[self.filtername]
752 752
753 753 @unfilteredmethod
754 754 def revbranchcache(self):
755 755 if not self._revbranchcache:
756 756 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
757 757 return self._revbranchcache
758 758
759 759 def branchtip(self, branch, ignoremissing=False):
760 760 '''return the tip node for a given branch
761 761
762 762 If ignoremissing is True, then this method will not raise an error.
763 763 This is helpful for callers that only expect None for a missing branch
764 764 (e.g. namespace).
765 765
766 766 '''
767 767 try:
768 768 return self.branchmap().branchtip(branch)
769 769 except KeyError:
770 770 if not ignoremissing:
771 771 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
772 772 else:
773 773 pass
774 774
775 775 def lookup(self, key):
776 776 return self[key].node()
777 777
778 778 def lookupbranch(self, key, remote=None):
779 779 repo = remote or self
780 780 if key in repo.branchmap():
781 781 return key
782 782
783 783 repo = (remote and remote.local()) and remote or self
784 784 return repo[key].branch()
785 785
786 786 def known(self, nodes):
787 787 nm = self.changelog.nodemap
788 788 pc = self._phasecache
789 789 result = []
790 790 for n in nodes:
791 791 r = nm.get(n)
792 792 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 793 result.append(resp)
794 794 return result
795 795
796 796 def local(self):
797 797 return self
798 798
799 799 def cancopy(self):
800 800 # so statichttprepo's override of local() works
801 801 if not self.local():
802 802 return False
803 803 if not self.ui.configbool('phases', 'publish', True):
804 804 return True
805 805 # if publishing we can't copy if there is filtered content
806 806 return not self.filtered('visible').changelog.filteredrevs
807 807
808 808 def shared(self):
809 809 '''the type of shared repository (None if not shared)'''
810 810 if self.sharedpath != self.path:
811 811 return 'store'
812 812 return None
813 813
814 814 def join(self, f, *insidef):
815 815 return self.vfs.join(os.path.join(f, *insidef))
816 816
817 817 def wjoin(self, f, *insidef):
818 818 return self.vfs.reljoin(self.root, f, *insidef)
819 819
820 820 def file(self, f):
821 821 if f[0] == '/':
822 822 f = f[1:]
823 823 return filelog.filelog(self.svfs, f)
824 824
825 825 def changectx(self, changeid):
826 826 return self[changeid]
827 827
828 828 def parents(self, changeid=None):
829 829 '''get list of changectxs for parents of changeid'''
830 830 return self[changeid].parents()
831 831
832 832 def setparents(self, p1, p2=nullid):
833 833 self.dirstate.beginparentchange()
834 834 copies = self.dirstate.setparents(p1, p2)
835 835 pctx = self[p1]
836 836 if copies:
837 837 # Adjust copy records, the dirstate cannot do it, it
838 838 # requires access to parents manifests. Preserve them
839 839 # only for entries added to first parent.
840 840 for f in copies:
841 841 if f not in pctx and copies[f] in pctx:
842 842 self.dirstate.copy(copies[f], f)
843 843 if p2 == nullid:
844 844 for f, s in sorted(self.dirstate.copies().items()):
845 845 if f not in pctx and s not in pctx:
846 846 self.dirstate.copy(None, f)
847 847 self.dirstate.endparentchange()
848 848
849 849 def filectx(self, path, changeid=None, fileid=None):
850 850 """changeid can be a changeset revision, node, or tag.
851 851 fileid can be a file revision or node."""
852 852 return context.filectx(self, path, changeid, fileid)
853 853
854 854 def getcwd(self):
855 855 return self.dirstate.getcwd()
856 856
857 857 def pathto(self, f, cwd=None):
858 858 return self.dirstate.pathto(f, cwd)
859 859
860 860 def wfile(self, f, mode='r'):
861 861 return self.wvfs(f, mode)
862 862
863 863 def _link(self, f):
864 864 return self.wvfs.islink(f)
865 865
866 866 def _loadfilter(self, filter):
867 867 if filter not in self.filterpats:
868 868 l = []
869 869 for pat, cmd in self.ui.configitems(filter):
870 870 if cmd == '!':
871 871 continue
872 872 mf = matchmod.match(self.root, '', [pat])
873 873 fn = None
874 874 params = cmd
875 875 for name, filterfn in self._datafilters.iteritems():
876 876 if cmd.startswith(name):
877 877 fn = filterfn
878 878 params = cmd[len(name):].lstrip()
879 879 break
880 880 if not fn:
881 881 fn = lambda s, c, **kwargs: util.filter(s, c)
882 882 # Wrap old filters not supporting keyword arguments
883 883 if not inspect.getargspec(fn)[2]:
884 884 oldfn = fn
885 885 fn = lambda s, c, **kwargs: oldfn(s, c)
886 886 l.append((mf, fn, params))
887 887 self.filterpats[filter] = l
888 888 return self.filterpats[filter]
889 889
890 890 def _filter(self, filterpats, filename, data):
891 891 for mf, fn, cmd in filterpats:
892 892 if mf(filename):
893 893 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
894 894 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
895 895 break
896 896
897 897 return data
898 898
899 899 @unfilteredpropertycache
900 900 def _encodefilterpats(self):
901 901 return self._loadfilter('encode')
902 902
903 903 @unfilteredpropertycache
904 904 def _decodefilterpats(self):
905 905 return self._loadfilter('decode')
906 906
907 907 def adddatafilter(self, name, filter):
908 908 self._datafilters[name] = filter
909 909
910 910 def wread(self, filename):
911 911 if self._link(filename):
912 912 data = self.wvfs.readlink(filename)
913 913 else:
914 914 data = self.wvfs.read(filename)
915 915 return self._filter(self._encodefilterpats, filename, data)
916 916
917 917 def wwrite(self, filename, data, flags):
918 918 """write ``data`` into ``filename`` in the working directory
919 919
920 920 This returns length of written (maybe decoded) data.
921 921 """
922 922 data = self._filter(self._decodefilterpats, filename, data)
923 923 if 'l' in flags:
924 924 self.wvfs.symlink(data, filename)
925 925 else:
926 926 self.wvfs.write(filename, data)
927 927 if 'x' in flags:
928 928 self.wvfs.setflags(filename, False, True)
929 929 return len(data)
930 930
931 931 def wwritedata(self, filename, data):
932 932 return self._filter(self._decodefilterpats, filename, data)
933 933
934 934 def currenttransaction(self):
935 935 """return the current transaction or None if non exists"""
936 936 if self._transref:
937 937 tr = self._transref()
938 938 else:
939 939 tr = None
940 940
941 941 if tr and tr.running():
942 942 return tr
943 943 return None
944 944
945 945 def transaction(self, desc, report=None):
946 946 if (self.ui.configbool('devel', 'all')
947 947 or self.ui.configbool('devel', 'check-locks')):
948 948 l = self._lockref and self._lockref()
949 949 if l is None or not l.held:
950 950 scmutil.develwarn(self.ui, 'transaction with no lock')
951 951 tr = self.currenttransaction()
952 952 if tr is not None:
953 953 return tr.nest()
954 954
955 955 # abort here if the journal already exists
956 956 if self.svfs.exists("journal"):
957 957 raise error.RepoError(
958 958 _("abandoned transaction found"),
959 959 hint=_("run 'hg recover' to clean up transaction"))
960 960
961 961 self.hook('pretxnopen', throw=True, txnname=desc)
962 962
963 963 self._writejournal(desc)
964 964 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
965 965 if report:
966 966 rp = report
967 967 else:
968 968 rp = self.ui.warn
969 969 vfsmap = {'plain': self.vfs} # root of .hg/
970 970 # we must avoid cyclic reference between repo and transaction.
971 971 reporef = weakref.ref(self)
972 972 def validate(tr):
973 973 """will run pre-closing hooks"""
974 974 pending = lambda: tr.writepending() and self.root or ""
975 975 reporef().hook('pretxnclose', throw=True, pending=pending,
976 976 xnname=desc, **tr.hookargs)
977 977
978 978 tr = transaction.transaction(rp, self.sopener, vfsmap,
979 979 "journal",
980 980 "undo",
981 981 aftertrans(renames),
982 982 self.store.createmode,
983 983 validator=validate)
984 984
985 985 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
986 986 tr.hookargs['TXNID'] = trid
987 987 # note: writing the fncache only during finalize mean that the file is
988 988 # outdated when running hooks. As fncache is used for streaming clone,
989 989 # this is not expected to break anything that happen during the hooks.
990 990 tr.addfinalize('flush-fncache', self.store.write)
991 991 def txnclosehook(tr2):
992 992 """To be run if transaction is successful, will schedule a hook run
993 993 """
994 994 def hook():
995 995 reporef().hook('txnclose', throw=False, txnname=desc,
996 996 **tr2.hookargs)
997 997 reporef()._afterlock(hook)
998 998 tr.addfinalize('txnclose-hook', txnclosehook)
999 999 def txnaborthook(tr2):
1000 1000 """To be run if transaction is aborted
1001 1001 """
1002 1002 reporef().hook('txnabort', throw=False, txnname=desc,
1003 1003 **tr2.hookargs)
1004 1004 tr.addabort('txnabort-hook', txnaborthook)
1005 1005 self._transref = weakref.ref(tr)
1006 1006 return tr
1007 1007
1008 1008 def _journalfiles(self):
1009 1009 return ((self.svfs, 'journal'),
1010 1010 (self.vfs, 'journal.dirstate'),
1011 1011 (self.vfs, 'journal.branch'),
1012 1012 (self.vfs, 'journal.desc'),
1013 1013 (self.vfs, 'journal.bookmarks'),
1014 1014 (self.svfs, 'journal.phaseroots'))
1015 1015
1016 1016 def undofiles(self):
1017 1017 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1018 1018
1019 1019 def _writejournal(self, desc):
1020 1020 self.vfs.write("journal.dirstate",
1021 1021 self.vfs.tryread("dirstate"))
1022 1022 self.vfs.write("journal.branch",
1023 1023 encoding.fromlocal(self.dirstate.branch()))
1024 1024 self.vfs.write("journal.desc",
1025 1025 "%d\n%s\n" % (len(self), desc))
1026 1026 self.vfs.write("journal.bookmarks",
1027 1027 self.vfs.tryread("bookmarks"))
1028 1028 self.svfs.write("journal.phaseroots",
1029 1029 self.svfs.tryread("phaseroots"))
1030 1030
1031 1031 def recover(self):
1032 1032 lock = self.lock()
1033 1033 try:
1034 1034 if self.svfs.exists("journal"):
1035 1035 self.ui.status(_("rolling back interrupted transaction\n"))
1036 1036 vfsmap = {'': self.svfs,
1037 1037 'plain': self.vfs,}
1038 1038 transaction.rollback(self.svfs, vfsmap, "journal",
1039 1039 self.ui.warn)
1040 1040 self.invalidate()
1041 1041 return True
1042 1042 else:
1043 1043 self.ui.warn(_("no interrupted transaction available\n"))
1044 1044 return False
1045 1045 finally:
1046 1046 lock.release()
1047 1047
1048 1048 def rollback(self, dryrun=False, force=False):
1049 1049 wlock = lock = None
1050 1050 try:
1051 1051 wlock = self.wlock()
1052 1052 lock = self.lock()
1053 1053 if self.svfs.exists("undo"):
1054 1054 return self._rollback(dryrun, force)
1055 1055 else:
1056 1056 self.ui.warn(_("no rollback information available\n"))
1057 1057 return 1
1058 1058 finally:
1059 1059 release(lock, wlock)
1060 1060
1061 1061 @unfilteredmethod # Until we get smarter cache management
1062 1062 def _rollback(self, dryrun, force):
1063 1063 ui = self.ui
1064 1064 try:
1065 1065 args = self.vfs.read('undo.desc').splitlines()
1066 1066 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1067 1067 if len(args) >= 3:
1068 1068 detail = args[2]
1069 1069 oldtip = oldlen - 1
1070 1070
1071 1071 if detail and ui.verbose:
1072 1072 msg = (_('repository tip rolled back to revision %s'
1073 1073 ' (undo %s: %s)\n')
1074 1074 % (oldtip, desc, detail))
1075 1075 else:
1076 1076 msg = (_('repository tip rolled back to revision %s'
1077 1077 ' (undo %s)\n')
1078 1078 % (oldtip, desc))
1079 1079 except IOError:
1080 1080 msg = _('rolling back unknown transaction\n')
1081 1081 desc = None
1082 1082
1083 1083 if not force and self['.'] != self['tip'] and desc == 'commit':
1084 1084 raise util.Abort(
1085 1085 _('rollback of last commit while not checked out '
1086 1086 'may lose data'), hint=_('use -f to force'))
1087 1087
1088 1088 ui.status(msg)
1089 1089 if dryrun:
1090 1090 return 0
1091 1091
1092 1092 parents = self.dirstate.parents()
1093 1093 self.destroying()
1094 1094 vfsmap = {'plain': self.vfs, '': self.svfs}
1095 1095 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1096 1096 if self.vfs.exists('undo.bookmarks'):
1097 1097 self.vfs.rename('undo.bookmarks', 'bookmarks')
1098 1098 if self.svfs.exists('undo.phaseroots'):
1099 1099 self.svfs.rename('undo.phaseroots', 'phaseroots')
1100 1100 self.invalidate()
1101 1101
1102 1102 parentgone = (parents[0] not in self.changelog.nodemap or
1103 1103 parents[1] not in self.changelog.nodemap)
1104 1104 if parentgone:
1105 1105 self.vfs.rename('undo.dirstate', 'dirstate')
1106 1106 try:
1107 1107 branch = self.vfs.read('undo.branch')
1108 1108 self.dirstate.setbranch(encoding.tolocal(branch))
1109 1109 except IOError:
1110 1110 ui.warn(_('named branch could not be reset: '
1111 1111 'current branch is still \'%s\'\n')
1112 1112 % self.dirstate.branch())
1113 1113
1114 1114 self.dirstate.invalidate()
1115 1115 parents = tuple([p.rev() for p in self.parents()])
1116 1116 if len(parents) > 1:
1117 1117 ui.status(_('working directory now based on '
1118 1118 'revisions %d and %d\n') % parents)
1119 1119 else:
1120 1120 ui.status(_('working directory now based on '
1121 1121 'revision %d\n') % parents)
1122 1122 ms = mergemod.mergestate(self)
1123 1123 ms.reset(self['.'].node())
1124 1124
1125 1125 # TODO: if we know which new heads may result from this rollback, pass
1126 1126 # them to destroy(), which will prevent the branchhead cache from being
1127 1127 # invalidated.
1128 1128 self.destroyed()
1129 1129 return 0
1130 1130
1131 1131 def invalidatecaches(self):
1132 1132
1133 1133 if '_tagscache' in vars(self):
1134 1134 # can't use delattr on proxy
1135 1135 del self.__dict__['_tagscache']
1136 1136
1137 1137 self.unfiltered()._branchcaches.clear()
1138 1138 self.invalidatevolatilesets()
1139 1139
1140 1140 def invalidatevolatilesets(self):
1141 1141 self.filteredrevcache.clear()
1142 1142 obsolete.clearobscaches(self)
1143 1143
1144 1144 def invalidatedirstate(self):
1145 1145 '''Invalidates the dirstate, causing the next call to dirstate
1146 1146 to check if it was modified since the last time it was read,
1147 1147 rereading it if it has.
1148 1148
1149 1149 This is different to dirstate.invalidate() that it doesn't always
1150 1150 rereads the dirstate. Use dirstate.invalidate() if you want to
1151 1151 explicitly read the dirstate again (i.e. restoring it to a previous
1152 1152 known good state).'''
1153 1153 if hasunfilteredcache(self, 'dirstate'):
1154 1154 for k in self.dirstate._filecache:
1155 1155 try:
1156 1156 delattr(self.dirstate, k)
1157 1157 except AttributeError:
1158 1158 pass
1159 1159 delattr(self.unfiltered(), 'dirstate')
1160 1160
1161 1161 def invalidate(self):
1162 1162 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1163 1163 for k in self._filecache:
1164 1164 # dirstate is invalidated separately in invalidatedirstate()
1165 1165 if k == 'dirstate':
1166 1166 continue
1167 1167
1168 1168 try:
1169 1169 delattr(unfiltered, k)
1170 1170 except AttributeError:
1171 1171 pass
1172 1172 self.invalidatecaches()
1173 1173 self.store.invalidatecaches()
1174 1174
1175 1175 def invalidateall(self):
1176 1176 '''Fully invalidates both store and non-store parts, causing the
1177 1177 subsequent operation to reread any outside changes.'''
1178 1178 # extension should hook this to invalidate its caches
1179 1179 self.invalidate()
1180 1180 self.invalidatedirstate()
1181 1181
1182 1182 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1183 1183 try:
1184 1184 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1185 1185 except error.LockHeld, inst:
1186 1186 if not wait:
1187 1187 raise
1188 1188 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1189 1189 (desc, inst.locker))
1190 1190 # default to 600 seconds timeout
1191 1191 l = lockmod.lock(vfs, lockname,
1192 1192 int(self.ui.config("ui", "timeout", "600")),
1193 1193 releasefn, desc=desc)
1194 1194 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1195 1195 if acquirefn:
1196 1196 acquirefn()
1197 1197 return l
1198 1198
1199 1199 def _afterlock(self, callback):
1200 1200 """add a callback to be run when the repository is fully unlocked
1201 1201
1202 1202 The callback will be executed when the outermost lock is released
1203 1203 (with wlock being higher level than 'lock')."""
1204 1204 for ref in (self._wlockref, self._lockref):
1205 1205 l = ref and ref()
1206 1206 if l and l.held:
1207 1207 l.postrelease.append(callback)
1208 1208 break
1209 1209 else: # no lock have been found.
1210 1210 callback()
1211 1211
1212 1212 def lock(self, wait=True):
1213 1213 '''Lock the repository store (.hg/store) and return a weak reference
1214 1214 to the lock. Use this before modifying the store (e.g. committing or
1215 1215 stripping). If you are opening a transaction, get a lock as well.)
1216 1216
1217 1217 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1218 1218 'wlock' first to avoid a dead-lock hazard.'''
1219 1219 l = self._lockref and self._lockref()
1220 1220 if l is not None and l.held:
1221 1221 l.lock()
1222 1222 return l
1223 1223
1224 1224 def unlock():
1225 1225 for k, ce in self._filecache.items():
1226 1226 if k == 'dirstate' or k not in self.__dict__:
1227 1227 continue
1228 1228 ce.refresh()
1229 1229
1230 1230 l = self._lock(self.svfs, "lock", wait, unlock,
1231 1231 self.invalidate, _('repository %s') % self.origroot)
1232 1232 self._lockref = weakref.ref(l)
1233 1233 return l
1234 1234
1235 1235 def wlock(self, wait=True):
1236 1236 '''Lock the non-store parts of the repository (everything under
1237 1237 .hg except .hg/store) and return a weak reference to the lock.
1238 1238
1239 1239 Use this before modifying files in .hg.
1240 1240
1241 1241 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1242 1242 'wlock' first to avoid a dead-lock hazard.'''
1243 1243 l = self._wlockref and self._wlockref()
1244 1244 if l is not None and l.held:
1245 1245 l.lock()
1246 1246 return l
1247 1247
1248 1248 # We do not need to check for non-waiting lock aquisition. Such
1249 1249 # acquisition would not cause dead-lock as they would just fail.
1250 1250 if wait and (self.ui.configbool('devel', 'all')
1251 1251 or self.ui.configbool('devel', 'check-locks')):
1252 1252 l = self._lockref and self._lockref()
1253 1253 if l is not None and l.held:
1254 1254 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1255 1255
1256 1256 def unlock():
1257 1257 if self.dirstate.pendingparentchange():
1258 1258 self.dirstate.invalidate()
1259 1259 else:
1260 1260 self.dirstate.write()
1261 1261
1262 1262 self._filecache['dirstate'].refresh()
1263 1263
1264 1264 l = self._lock(self.vfs, "wlock", wait, unlock,
1265 1265 self.invalidatedirstate, _('working directory of %s') %
1266 1266 self.origroot)
1267 1267 self._wlockref = weakref.ref(l)
1268 1268 return l
1269 1269
1270 1270 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1271 1271 """
1272 1272 commit an individual file as part of a larger transaction
1273 1273 """
1274 1274
1275 1275 fname = fctx.path()
1276 1276 fparent1 = manifest1.get(fname, nullid)
1277 1277 fparent2 = manifest2.get(fname, nullid)
1278 1278 if isinstance(fctx, context.filectx):
1279 1279 node = fctx.filenode()
1280 1280 if node in [fparent1, fparent2]:
1281 1281 self.ui.debug('reusing %s filelog entry\n' % fname)
1282 1282 return node
1283 1283
1284 1284 flog = self.file(fname)
1285 1285 meta = {}
1286 1286 copy = fctx.renamed()
1287 1287 if copy and copy[0] != fname:
1288 1288 # Mark the new revision of this file as a copy of another
1289 1289 # file. This copy data will effectively act as a parent
1290 1290 # of this new revision. If this is a merge, the first
1291 1291 # parent will be the nullid (meaning "look up the copy data")
1292 1292 # and the second one will be the other parent. For example:
1293 1293 #
1294 1294 # 0 --- 1 --- 3 rev1 changes file foo
1295 1295 # \ / rev2 renames foo to bar and changes it
1296 1296 # \- 2 -/ rev3 should have bar with all changes and
1297 1297 # should record that bar descends from
1298 1298 # bar in rev2 and foo in rev1
1299 1299 #
1300 1300 # this allows this merge to succeed:
1301 1301 #
1302 1302 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1303 1303 # \ / merging rev3 and rev4 should use bar@rev2
1304 1304 # \- 2 --- 4 as the merge base
1305 1305 #
1306 1306
1307 1307 cfname = copy[0]
1308 1308 crev = manifest1.get(cfname)
1309 1309 newfparent = fparent2
1310 1310
1311 1311 if manifest2: # branch merge
1312 1312 if fparent2 == nullid or crev is None: # copied on remote side
1313 1313 if cfname in manifest2:
1314 1314 crev = manifest2[cfname]
1315 1315 newfparent = fparent1
1316 1316
1317 1317 # Here, we used to search backwards through history to try to find
1318 1318 # where the file copy came from if the source of a copy was not in
1319 1319 # the parent directory. However, this doesn't actually make sense to
1320 1320 # do (what does a copy from something not in your working copy even
1321 1321 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1322 1322 # the user that copy information was dropped, so if they didn't
1323 1323 # expect this outcome it can be fixed, but this is the correct
1324 1324 # behavior in this circumstance.
1325 1325
1326 1326 if crev:
1327 1327 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1328 1328 meta["copy"] = cfname
1329 1329 meta["copyrev"] = hex(crev)
1330 1330 fparent1, fparent2 = nullid, newfparent
1331 1331 else:
1332 1332 self.ui.warn(_("warning: can't find ancestor for '%s' "
1333 1333 "copied from '%s'!\n") % (fname, cfname))
1334 1334
1335 1335 elif fparent1 == nullid:
1336 1336 fparent1, fparent2 = fparent2, nullid
1337 1337 elif fparent2 != nullid:
1338 1338 # is one parent an ancestor of the other?
1339 1339 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1340 1340 if fparent1 in fparentancestors:
1341 1341 fparent1, fparent2 = fparent2, nullid
1342 1342 elif fparent2 in fparentancestors:
1343 1343 fparent2 = nullid
1344 1344
1345 1345 # is the file changed?
1346 1346 text = fctx.data()
1347 1347 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1348 1348 changelist.append(fname)
1349 1349 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1350 1350 # are just the flags changed during merge?
1351 1351 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1352 1352 changelist.append(fname)
1353 1353
1354 1354 return fparent1
1355 1355
1356 1356 @unfilteredmethod
1357 1357 def commit(self, text="", user=None, date=None, match=None, force=False,
1358 1358 editor=False, extra={}):
1359 1359 """Add a new revision to current repository.
1360 1360
1361 1361 Revision information is gathered from the working directory,
1362 1362 match can be used to filter the committed files. If editor is
1363 1363 supplied, it is called to get a commit message.
1364 1364 """
1365 1365
1366 1366 def fail(f, msg):
1367 1367 raise util.Abort('%s: %s' % (f, msg))
1368 1368
1369 1369 if not match:
1370 1370 match = matchmod.always(self.root, '')
1371 1371
1372 1372 if not force:
1373 1373 vdirs = []
1374 1374 match.explicitdir = vdirs.append
1375 1375 match.bad = fail
1376 1376
1377 1377 wlock = self.wlock()
1378 1378 try:
1379 1379 wctx = self[None]
1380 1380 merge = len(wctx.parents()) > 1
1381 1381
1382 1382 if not force and merge and not match.always():
1383 1383 raise util.Abort(_('cannot partially commit a merge '
1384 1384 '(do not specify files or patterns)'))
1385 1385
1386 1386 status = self.status(match=match, clean=force)
1387 1387 if force:
1388 1388 status.modified.extend(status.clean) # mq may commit clean files
1389 1389
1390 1390 # check subrepos
1391 1391 subs = []
1392 1392 commitsubs = set()
1393 1393 newstate = wctx.substate.copy()
1394 1394 # only manage subrepos and .hgsubstate if .hgsub is present
1395 1395 if '.hgsub' in wctx:
1396 1396 # we'll decide whether to track this ourselves, thanks
1397 1397 for c in status.modified, status.added, status.removed:
1398 1398 if '.hgsubstate' in c:
1399 1399 c.remove('.hgsubstate')
1400 1400
1401 1401 # compare current state to last committed state
1402 1402 # build new substate based on last committed state
1403 1403 oldstate = wctx.p1().substate
1404 1404 for s in sorted(newstate.keys()):
1405 1405 if not match(s):
1406 1406 # ignore working copy, use old state if present
1407 1407 if s in oldstate:
1408 1408 newstate[s] = oldstate[s]
1409 1409 continue
1410 1410 if not force:
1411 1411 raise util.Abort(
1412 1412 _("commit with new subrepo %s excluded") % s)
1413 1413 dirtyreason = wctx.sub(s).dirtyreason(True)
1414 1414 if dirtyreason:
1415 1415 if not self.ui.configbool('ui', 'commitsubrepos'):
1416 1416 raise util.Abort(dirtyreason,
1417 1417 hint=_("use --subrepos for recursive commit"))
1418 1418 subs.append(s)
1419 1419 commitsubs.add(s)
1420 1420 else:
1421 1421 bs = wctx.sub(s).basestate()
1422 1422 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1423 1423 if oldstate.get(s, (None, None, None))[1] != bs:
1424 1424 subs.append(s)
1425 1425
1426 1426 # check for removed subrepos
1427 1427 for p in wctx.parents():
1428 1428 r = [s for s in p.substate if s not in newstate]
1429 1429 subs += [s for s in r if match(s)]
1430 1430 if subs:
1431 1431 if (not match('.hgsub') and
1432 1432 '.hgsub' in (wctx.modified() + wctx.added())):
1433 1433 raise util.Abort(
1434 1434 _("can't commit subrepos without .hgsub"))
1435 1435 status.modified.insert(0, '.hgsubstate')
1436 1436
1437 1437 elif '.hgsub' in status.removed:
1438 1438 # clean up .hgsubstate when .hgsub is removed
1439 1439 if ('.hgsubstate' in wctx and
1440 1440 '.hgsubstate' not in (status.modified + status.added +
1441 1441 status.removed)):
1442 1442 status.removed.insert(0, '.hgsubstate')
1443 1443
1444 1444 # make sure all explicit patterns are matched
1445 1445 if not force and match.files():
1446 1446 matched = set(status.modified + status.added + status.removed)
1447 1447
1448 1448 for f in match.files():
1449 1449 f = self.dirstate.normalize(f)
1450 1450 if f == '.' or f in matched or f in wctx.substate:
1451 1451 continue
1452 1452 if f in status.deleted:
1453 1453 fail(f, _('file not found!'))
1454 1454 if f in vdirs: # visited directory
1455 1455 d = f + '/'
1456 1456 for mf in matched:
1457 1457 if mf.startswith(d):
1458 1458 break
1459 1459 else:
1460 1460 fail(f, _("no match under directory!"))
1461 1461 elif f not in self.dirstate:
1462 1462 fail(f, _("file not tracked!"))
1463 1463
1464 1464 cctx = context.workingcommitctx(self, status,
1465 1465 text, user, date, extra)
1466 1466
1467 1467 if (not force and not extra.get("close") and not merge
1468 1468 and not cctx.files()
1469 1469 and wctx.branch() == wctx.p1().branch()):
1470 1470 return None
1471 1471
1472 1472 if merge and cctx.deleted():
1473 1473 raise util.Abort(_("cannot commit merge with missing files"))
1474 1474
1475 1475 ms = mergemod.mergestate(self)
1476 1476 for f in status.modified:
1477 1477 if f in ms and ms[f] == 'u':
1478 1478 raise util.Abort(_('unresolved merge conflicts '
1479 1479 '(see "hg help resolve")'))
1480 1480
1481 1481 if editor:
1482 1482 cctx._text = editor(self, cctx, subs)
1483 1483 edited = (text != cctx._text)
1484 1484
1485 1485 # Save commit message in case this transaction gets rolled back
1486 1486 # (e.g. by a pretxncommit hook). Leave the content alone on
1487 1487 # the assumption that the user will use the same editor again.
1488 1488 msgfn = self.savecommitmessage(cctx._text)
1489 1489
1490 1490 # commit subs and write new state
1491 1491 if subs:
1492 1492 for s in sorted(commitsubs):
1493 1493 sub = wctx.sub(s)
1494 1494 self.ui.status(_('committing subrepository %s\n') %
1495 1495 subrepo.subrelpath(sub))
1496 1496 sr = sub.commit(cctx._text, user, date)
1497 1497 newstate[s] = (newstate[s][0], sr)
1498 1498 subrepo.writestate(self, newstate)
1499 1499
1500 1500 p1, p2 = self.dirstate.parents()
1501 1501 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1502 1502 try:
1503 1503 self.hook("precommit", throw=True, parent1=hookp1,
1504 1504 parent2=hookp2)
1505 1505 ret = self.commitctx(cctx, True)
1506 1506 except: # re-raises
1507 1507 if edited:
1508 1508 self.ui.write(
1509 1509 _('note: commit message saved in %s\n') % msgfn)
1510 1510 raise
1511 1511
1512 1512 # update bookmarks, dirstate and mergestate
1513 1513 bookmarks.update(self, [p1, p2], ret)
1514 1514 cctx.markcommitted(ret)
1515 1515 ms.reset()
1516 1516 finally:
1517 1517 wlock.release()
1518 1518
1519 1519 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1520 1520 # hack for command that use a temporary commit (eg: histedit)
1521 1521 # temporary commit got stripped before hook release
1522 1522 if node in self:
1523 1523 self.hook("commit", node=node, parent1=parent1,
1524 1524 parent2=parent2)
1525 1525 self._afterlock(commithook)
1526 1526 return ret
1527 1527
1528 1528 @unfilteredmethod
1529 1529 def commitctx(self, ctx, error=False):
1530 1530 """Add a new revision to current repository.
1531 1531 Revision information is passed via the context argument.
1532 1532 """
1533 1533
1534 1534 tr = None
1535 1535 p1, p2 = ctx.p1(), ctx.p2()
1536 1536 user = ctx.user()
1537 1537
1538 1538 lock = self.lock()
1539 1539 try:
1540 1540 tr = self.transaction("commit")
1541 1541 trp = weakref.proxy(tr)
1542 1542
1543 1543 if ctx.files():
1544 1544 m1 = p1.manifest()
1545 1545 m2 = p2.manifest()
1546 1546 m = m1.copy()
1547 1547
1548 1548 # check in files
1549 1549 added = []
1550 1550 changed = []
1551 1551 removed = list(ctx.removed())
1552 1552 linkrev = len(self)
1553 1553 self.ui.note(_("committing files:\n"))
1554 1554 for f in sorted(ctx.modified() + ctx.added()):
1555 1555 self.ui.note(f + "\n")
1556 1556 try:
1557 1557 fctx = ctx[f]
1558 1558 if fctx is None:
1559 1559 removed.append(f)
1560 1560 else:
1561 1561 added.append(f)
1562 1562 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1563 1563 trp, changed)
1564 1564 m.setflag(f, fctx.flags())
1565 1565 except OSError, inst:
1566 1566 self.ui.warn(_("trouble committing %s!\n") % f)
1567 1567 raise
1568 1568 except IOError, inst:
1569 1569 errcode = getattr(inst, 'errno', errno.ENOENT)
1570 1570 if error or errcode and errcode != errno.ENOENT:
1571 1571 self.ui.warn(_("trouble committing %s!\n") % f)
1572 1572 raise
1573 1573
1574 1574 # update manifest
1575 1575 self.ui.note(_("committing manifest\n"))
1576 1576 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1577 1577 drop = [f for f in removed if f in m]
1578 1578 for f in drop:
1579 1579 del m[f]
1580 1580 mn = self.manifest.add(m, trp, linkrev,
1581 1581 p1.manifestnode(), p2.manifestnode(),
1582 1582 added, drop)
1583 1583 files = changed + removed
1584 1584 else:
1585 1585 mn = p1.manifestnode()
1586 1586 files = []
1587 1587
1588 1588 # update changelog
1589 1589 self.ui.note(_("committing changelog\n"))
1590 1590 self.changelog.delayupdate(tr)
1591 1591 n = self.changelog.add(mn, files, ctx.description(),
1592 1592 trp, p1.node(), p2.node(),
1593 1593 user, ctx.date(), ctx.extra().copy())
1594 1594 p = lambda: tr.writepending() and self.root or ""
1595 1595 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1596 1596 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1597 1597 parent2=xp2, pending=p)
1598 1598 # set the new commit is proper phase
1599 1599 targetphase = subrepo.newcommitphase(self.ui, ctx)
1600 1600 if targetphase:
1601 1601 # retract boundary do not alter parent changeset.
1602 1602 # if a parent have higher the resulting phase will
1603 1603 # be compliant anyway
1604 1604 #
1605 1605 # if minimal phase was 0 we don't need to retract anything
1606 1606 phases.retractboundary(self, tr, targetphase, [n])
1607 1607 tr.close()
1608 1608 branchmap.updatecache(self.filtered('served'))
1609 1609 return n
1610 1610 finally:
1611 1611 if tr:
1612 1612 tr.release()
1613 1613 lock.release()
1614 1614
1615 1615 @unfilteredmethod
1616 1616 def destroying(self):
1617 1617 '''Inform the repository that nodes are about to be destroyed.
1618 1618 Intended for use by strip and rollback, so there's a common
1619 1619 place for anything that has to be done before destroying history.
1620 1620
1621 1621 This is mostly useful for saving state that is in memory and waiting
1622 1622 to be flushed when the current lock is released. Because a call to
1623 1623 destroyed is imminent, the repo will be invalidated causing those
1624 1624 changes to stay in memory (waiting for the next unlock), or vanish
1625 1625 completely.
1626 1626 '''
1627 1627 # When using the same lock to commit and strip, the phasecache is left
1628 1628 # dirty after committing. Then when we strip, the repo is invalidated,
1629 1629 # causing those changes to disappear.
1630 1630 if '_phasecache' in vars(self):
1631 1631 self._phasecache.write()
1632 1632
1633 1633 @unfilteredmethod
1634 1634 def destroyed(self):
1635 1635 '''Inform the repository that nodes have been destroyed.
1636 1636 Intended for use by strip and rollback, so there's a common
1637 1637 place for anything that has to be done after destroying history.
1638 1638 '''
1639 1639 # When one tries to:
1640 1640 # 1) destroy nodes thus calling this method (e.g. strip)
1641 1641 # 2) use phasecache somewhere (e.g. commit)
1642 1642 #
1643 1643 # then 2) will fail because the phasecache contains nodes that were
1644 1644 # removed. We can either remove phasecache from the filecache,
1645 1645 # causing it to reload next time it is accessed, or simply filter
1646 1646 # the removed nodes now and write the updated cache.
1647 1647 self._phasecache.filterunknown(self)
1648 1648 self._phasecache.write()
1649 1649
1650 1650 # update the 'served' branch cache to help read only server process
1651 1651 # Thanks to branchcache collaboration this is done from the nearest
1652 1652 # filtered subset and it is expected to be fast.
1653 1653 branchmap.updatecache(self.filtered('served'))
1654 1654
1655 1655 # Ensure the persistent tag cache is updated. Doing it now
1656 1656 # means that the tag cache only has to worry about destroyed
1657 1657 # heads immediately after a strip/rollback. That in turn
1658 1658 # guarantees that "cachetip == currenttip" (comparing both rev
1659 1659 # and node) always means no nodes have been added or destroyed.
1660 1660
1661 1661 # XXX this is suboptimal when qrefresh'ing: we strip the current
1662 1662 # head, refresh the tag cache, then immediately add a new head.
1663 1663 # But I think doing it this way is necessary for the "instant
1664 1664 # tag cache retrieval" case to work.
1665 1665 self.invalidate()
1666 1666
1667 1667 def walk(self, match, node=None):
1668 1668 '''
1669 1669 walk recursively through the directory tree or a given
1670 1670 changeset, finding all files matched by the match
1671 1671 function
1672 1672 '''
1673 1673 return self[node].walk(match)
1674 1674
1675 1675 def status(self, node1='.', node2=None, match=None,
1676 1676 ignored=False, clean=False, unknown=False,
1677 1677 listsubrepos=False):
1678 1678 '''a convenience method that calls node1.status(node2)'''
1679 1679 return self[node1].status(node2, match, ignored, clean, unknown,
1680 1680 listsubrepos)
1681 1681
1682 1682 def heads(self, start=None):
1683 1683 heads = self.changelog.heads(start)
1684 1684 # sort the output in rev descending order
1685 1685 return sorted(heads, key=self.changelog.rev, reverse=True)
1686 1686
1687 1687 def branchheads(self, branch=None, start=None, closed=False):
1688 1688 '''return a (possibly filtered) list of heads for the given branch
1689 1689
1690 1690 Heads are returned in topological order, from newest to oldest.
1691 1691 If branch is None, use the dirstate branch.
1692 1692 If start is not None, return only heads reachable from start.
1693 1693 If closed is True, return heads that are marked as closed as well.
1694 1694 '''
1695 1695 if branch is None:
1696 1696 branch = self[None].branch()
1697 1697 branches = self.branchmap()
1698 1698 if branch not in branches:
1699 1699 return []
1700 1700 # the cache returns heads ordered lowest to highest
1701 1701 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1702 1702 if start is not None:
1703 1703 # filter out the heads that cannot be reached from startrev
1704 1704 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1705 1705 bheads = [h for h in bheads if h in fbheads]
1706 1706 return bheads
1707 1707
1708 1708 def branches(self, nodes):
1709 1709 if not nodes:
1710 1710 nodes = [self.changelog.tip()]
1711 1711 b = []
1712 1712 for n in nodes:
1713 1713 t = n
1714 1714 while True:
1715 1715 p = self.changelog.parents(n)
1716 1716 if p[1] != nullid or p[0] == nullid:
1717 1717 b.append((t, n, p[0], p[1]))
1718 1718 break
1719 1719 n = p[0]
1720 1720 return b
1721 1721
1722 1722 def between(self, pairs):
1723 1723 r = []
1724 1724
1725 1725 for top, bottom in pairs:
1726 1726 n, l, i = top, [], 0
1727 1727 f = 1
1728 1728
1729 1729 while n != bottom and n != nullid:
1730 1730 p = self.changelog.parents(n)[0]
1731 1731 if i == f:
1732 1732 l.append(n)
1733 1733 f = f * 2
1734 1734 n = p
1735 1735 i += 1
1736 1736
1737 1737 r.append(l)
1738 1738
1739 1739 return r
1740 1740
1741 1741 def checkpush(self, pushop):
1742 1742 """Extensions can override this function if additional checks have
1743 1743 to be performed before pushing, or call it if they override push
1744 1744 command.
1745 1745 """
1746 1746 pass
1747 1747
1748 1748 @unfilteredpropertycache
1749 1749 def prepushoutgoinghooks(self):
1750 1750 """Return util.hooks consists of "(repo, remote, outgoing)"
1751 1751 functions, which are called before pushing changesets.
1752 1752 """
1753 1753 return util.hooks()
1754 1754
1755 def stream_in(self, remote, requirements):
1755 def stream_in(self, remote, remotereqs):
1756 1756 lock = self.lock()
1757 1757 try:
1758 1758 # Save remote branchmap. We will use it later
1759 1759 # to speed up branchcache creation
1760 1760 rbranchmap = None
1761 1761 if remote.capable("branchmap"):
1762 1762 rbranchmap = remote.branchmap()
1763 1763
1764 1764 fp = remote.stream_out()
1765 1765 l = fp.readline()
1766 1766 try:
1767 1767 resp = int(l)
1768 1768 except ValueError:
1769 1769 raise error.ResponseError(
1770 1770 _('unexpected response from remote server:'), l)
1771 1771 if resp == 1:
1772 1772 raise util.Abort(_('operation forbidden by server'))
1773 1773 elif resp == 2:
1774 1774 raise util.Abort(_('locking the remote repository failed'))
1775 1775 elif resp != 0:
1776 1776 raise util.Abort(_('the server sent an unknown error code'))
1777 1777 self.ui.status(_('streaming all changes\n'))
1778 1778 l = fp.readline()
1779 1779 try:
1780 1780 total_files, total_bytes = map(int, l.split(' ', 1))
1781 1781 except (ValueError, TypeError):
1782 1782 raise error.ResponseError(
1783 1783 _('unexpected response from remote server:'), l)
1784 1784 self.ui.status(_('%d files to transfer, %s of data\n') %
1785 1785 (total_files, util.bytecount(total_bytes)))
1786 1786 handled_bytes = 0
1787 1787 self.ui.progress(_('clone'), 0, total=total_bytes)
1788 1788 start = time.time()
1789 1789
1790 1790 tr = self.transaction(_('clone'))
1791 1791 try:
1792 1792 for i in xrange(total_files):
1793 1793 # XXX doesn't support '\n' or '\r' in filenames
1794 1794 l = fp.readline()
1795 1795 try:
1796 1796 name, size = l.split('\0', 1)
1797 1797 size = int(size)
1798 1798 except (ValueError, TypeError):
1799 1799 raise error.ResponseError(
1800 1800 _('unexpected response from remote server:'), l)
1801 1801 if self.ui.debugflag:
1802 1802 self.ui.debug('adding %s (%s)\n' %
1803 1803 (name, util.bytecount(size)))
1804 1804 # for backwards compat, name was partially encoded
1805 1805 ofp = self.svfs(store.decodedir(name), 'w')
1806 1806 for chunk in util.filechunkiter(fp, limit=size):
1807 1807 handled_bytes += len(chunk)
1808 1808 self.ui.progress(_('clone'), handled_bytes,
1809 1809 total=total_bytes)
1810 1810 ofp.write(chunk)
1811 1811 ofp.close()
1812 1812 tr.close()
1813 1813 finally:
1814 1814 tr.release()
1815 1815
1816 1816 # Writing straight to files circumvented the inmemory caches
1817 1817 self.invalidate()
1818 1818
1819 1819 elapsed = time.time() - start
1820 1820 if elapsed <= 0:
1821 1821 elapsed = 0.001
1822 1822 self.ui.progress(_('clone'), None)
1823 1823 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1824 1824 (util.bytecount(total_bytes), elapsed,
1825 1825 util.bytecount(total_bytes / elapsed)))
1826 1826
1827 1827 # new requirements = old non-format requirements +
1828 # new format-related
1828 # new format-related remote requirements
1829 1829 # requirements from the streamed-in repository
1830 self.requirements = requirements | (
1830 self.requirements = remotereqs | (
1831 1831 self.requirements - self.supportedformats)
1832 1832 self._applyopenerreqs()
1833 1833 self._writerequirements()
1834 1834
1835 1835 if rbranchmap:
1836 1836 rbheads = []
1837 1837 closed = []
1838 1838 for bheads in rbranchmap.itervalues():
1839 1839 rbheads.extend(bheads)
1840 1840 for h in bheads:
1841 1841 r = self.changelog.rev(h)
1842 1842 b, c = self.changelog.branchinfo(r)
1843 1843 if c:
1844 1844 closed.append(h)
1845 1845
1846 1846 if rbheads:
1847 1847 rtiprev = max((int(self.changelog.rev(node))
1848 1848 for node in rbheads))
1849 1849 cache = branchmap.branchcache(rbranchmap,
1850 1850 self[rtiprev].node(),
1851 1851 rtiprev,
1852 1852 closednodes=closed)
1853 1853 # Try to stick it as low as possible
1854 1854 # filter above served are unlikely to be fetch from a clone
1855 1855 for candidate in ('base', 'immutable', 'served'):
1856 1856 rview = self.filtered(candidate)
1857 1857 if cache.validfor(rview):
1858 1858 self._branchcaches[candidate] = cache
1859 1859 cache.write(rview)
1860 1860 break
1861 1861 self.invalidate()
1862 1862 return len(self.heads()) + 1
1863 1863 finally:
1864 1864 lock.release()
1865 1865
1866 1866 def clone(self, remote, heads=[], stream=None):
1867 1867 '''clone remote repository.
1868 1868
1869 1869 keyword arguments:
1870 1870 heads: list of revs to clone (forces use of pull)
1871 1871 stream: use streaming clone if possible'''
1872 1872
1873 1873 # now, all clients that can request uncompressed clones can
1874 1874 # read repo formats supported by all servers that can serve
1875 1875 # them.
1876 1876
1877 1877 # if revlog format changes, client will have to check version
1878 1878 # and format flags on "stream" capability, and use
1879 1879 # uncompressed only if compatible.
1880 1880
1881 1881 if stream is None:
1882 1882 # if the server explicitly prefers to stream (for fast LANs)
1883 1883 stream = remote.capable('stream-preferred')
1884 1884
1885 1885 if stream and not heads:
1886 1886 # 'stream' means remote revlog format is revlogv1 only
1887 1887 if remote.capable('stream'):
1888 1888 self.stream_in(remote, set(('revlogv1',)))
1889 1889 else:
1890 1890 # otherwise, 'streamreqs' contains the remote revlog format
1891 1891 streamreqs = remote.capable('streamreqs')
1892 1892 if streamreqs:
1893 1893 streamreqs = set(streamreqs.split(','))
1894 1894 # if we support it, stream in and adjust our requirements
1895 1895 if not streamreqs - self.supportedformats:
1896 1896 self.stream_in(remote, streamreqs)
1897 1897
1898 1898 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1899 1899 try:
1900 1900 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1901 1901 ret = exchange.pull(self, remote, heads).cgresult
1902 1902 finally:
1903 1903 self.ui.restoreconfig(quiet)
1904 1904 return ret
1905 1905
1906 1906 def pushkey(self, namespace, key, old, new):
1907 1907 try:
1908 1908 tr = self.currenttransaction()
1909 1909 hookargs = {}
1910 1910 if tr is not None:
1911 1911 hookargs.update(tr.hookargs)
1912 1912 pending = lambda: tr.writepending() and self.root or ""
1913 1913 hookargs['pending'] = pending
1914 1914 hookargs['namespace'] = namespace
1915 1915 hookargs['key'] = key
1916 1916 hookargs['old'] = old
1917 1917 hookargs['new'] = new
1918 1918 self.hook('prepushkey', throw=True, **hookargs)
1919 1919 except error.HookAbort, exc:
1920 1920 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1921 1921 if exc.hint:
1922 1922 self.ui.write_err(_("(%s)\n") % exc.hint)
1923 1923 return False
1924 1924 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1925 1925 ret = pushkey.push(self, namespace, key, old, new)
1926 1926 def runhook():
1927 1927 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1928 1928 ret=ret)
1929 1929 self._afterlock(runhook)
1930 1930 return ret
1931 1931
1932 1932 def listkeys(self, namespace):
1933 1933 self.hook('prelistkeys', throw=True, namespace=namespace)
1934 1934 self.ui.debug('listing keys for "%s"\n' % namespace)
1935 1935 values = pushkey.list(self, namespace)
1936 1936 self.hook('listkeys', namespace=namespace, values=values)
1937 1937 return values
1938 1938
1939 1939 def debugwireargs(self, one, two, three=None, four=None, five=None):
1940 1940 '''used to test argument passing over the wire'''
1941 1941 return "%s %s %s %s %s" % (one, two, three, four, five)
1942 1942
1943 1943 def savecommitmessage(self, text):
1944 1944 fp = self.vfs('last-message.txt', 'wb')
1945 1945 try:
1946 1946 fp.write(text)
1947 1947 finally:
1948 1948 fp.close()
1949 1949 return self.pathto(fp.name[len(self.root) + 1:])
1950 1950
1951 1951 # used to avoid circular references so destructors work
1952 1952 def aftertrans(files):
1953 1953 renamefiles = [tuple(t) for t in files]
1954 1954 def a():
1955 1955 for vfs, src, dest in renamefiles:
1956 1956 try:
1957 1957 vfs.rename(src, dest)
1958 1958 except OSError: # journal file does not yet exist
1959 1959 pass
1960 1960 return a
1961 1961
1962 1962 def undoname(fn):
1963 1963 base, name = os.path.split(fn)
1964 1964 assert name.startswith('journal')
1965 1965 return os.path.join(base, name.replace('journal', 'undo', 1))
1966 1966
1967 1967 def instance(ui, path, create):
1968 1968 return localrepository(ui, util.urllocalpath(path), create)
1969 1969
1970 1970 def islocal(path):
1971 1971 return True
General Comments 0
You need to be logged in to leave comments. Login now