##// END OF EJS Templates
streamclone: move streaming clone logic from localrepo...
Gregory Szorc -
r26445:f134fb33 default
parent child Browse files
Show More
@@ -1,1897 +1,1873 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, wdirrev, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect, random
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 import streamclone
23 23 propertycache = util.propertycache
24 24 filecache = scmutil.filecache
25 25
26 26 class repofilecache(filecache):
27 27 """All filecache usage on repo are done for logic that should be unfiltered
28 28 """
29 29
30 30 def __get__(self, repo, type=None):
31 31 return super(repofilecache, self).__get__(repo.unfiltered(), type)
32 32 def __set__(self, repo, value):
33 33 return super(repofilecache, self).__set__(repo.unfiltered(), value)
34 34 def __delete__(self, repo):
35 35 return super(repofilecache, self).__delete__(repo.unfiltered())
36 36
37 37 class storecache(repofilecache):
38 38 """filecache for files in the store"""
39 39 def join(self, obj, fname):
40 40 return obj.sjoin(fname)
41 41
42 42 class unfilteredpropertycache(propertycache):
43 43 """propertycache that apply to unfiltered repo only"""
44 44
45 45 def __get__(self, repo, type=None):
46 46 unfi = repo.unfiltered()
47 47 if unfi is repo:
48 48 return super(unfilteredpropertycache, self).__get__(unfi)
49 49 return getattr(unfi, self.name)
50 50
51 51 class filteredpropertycache(propertycache):
52 52 """propertycache that must take filtering in account"""
53 53
54 54 def cachevalue(self, obj, value):
55 55 object.__setattr__(obj, self.name, value)
56 56
57 57
58 58 def hasunfilteredcache(repo, name):
59 59 """check if a repo has an unfilteredpropertycache value for <name>"""
60 60 return name in vars(repo.unfiltered())
61 61
62 62 def unfilteredmethod(orig):
63 63 """decorate method that always need to be run on unfiltered version"""
64 64 def wrapper(repo, *args, **kwargs):
65 65 return orig(repo.unfiltered(), *args, **kwargs)
66 66 return wrapper
67 67
68 68 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
69 69 'unbundle'))
70 70 legacycaps = moderncaps.union(set(['changegroupsubset']))
71 71
72 72 class localpeer(peer.peerrepository):
73 73 '''peer for a local repo; reflects only the most recent API'''
74 74
75 75 def __init__(self, repo, caps=moderncaps):
76 76 peer.peerrepository.__init__(self)
77 77 self._repo = repo.filtered('served')
78 78 self.ui = repo.ui
79 79 self._caps = repo._restrictcapabilities(caps)
80 80 self.requirements = repo.requirements
81 81 self.supportedformats = repo.supportedformats
82 82
83 83 def close(self):
84 84 self._repo.close()
85 85
86 86 def _capabilities(self):
87 87 return self._caps
88 88
89 89 def local(self):
90 90 return self._repo
91 91
92 92 def canpush(self):
93 93 return True
94 94
95 95 def url(self):
96 96 return self._repo.url()
97 97
98 98 def lookup(self, key):
99 99 return self._repo.lookup(key)
100 100
101 101 def branchmap(self):
102 102 return self._repo.branchmap()
103 103
104 104 def heads(self):
105 105 return self._repo.heads()
106 106
107 107 def known(self, nodes):
108 108 return self._repo.known(nodes)
109 109
110 110 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
111 111 **kwargs):
112 112 cg = exchange.getbundle(self._repo, source, heads=heads,
113 113 common=common, bundlecaps=bundlecaps, **kwargs)
114 114 if bundlecaps is not None and 'HG20' in bundlecaps:
115 115 # When requesting a bundle2, getbundle returns a stream to make the
116 116 # wire level function happier. We need to build a proper object
117 117 # from it in local peer.
118 118 cg = bundle2.getunbundler(self.ui, cg)
119 119 return cg
120 120
121 121 # TODO We might want to move the next two calls into legacypeer and add
122 122 # unbundle instead.
123 123
124 124 def unbundle(self, cg, heads, url):
125 125 """apply a bundle on a repo
126 126
127 127 This function handles the repo locking itself."""
128 128 try:
129 129 try:
130 130 cg = exchange.readbundle(self.ui, cg, None)
131 131 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
132 132 if util.safehasattr(ret, 'getchunks'):
133 133 # This is a bundle20 object, turn it into an unbundler.
134 134 # This little dance should be dropped eventually when the
135 135 # API is finally improved.
136 136 stream = util.chunkbuffer(ret.getchunks())
137 137 ret = bundle2.getunbundler(self.ui, stream)
138 138 return ret
139 139 except Exception as exc:
140 140 # If the exception contains output salvaged from a bundle2
141 141 # reply, we need to make sure it is printed before continuing
142 142 # to fail. So we build a bundle2 with such output and consume
143 143 # it directly.
144 144 #
145 145 # This is not very elegant but allows a "simple" solution for
146 146 # issue4594
147 147 output = getattr(exc, '_bundle2salvagedoutput', ())
148 148 if output:
149 149 bundler = bundle2.bundle20(self._repo.ui)
150 150 for out in output:
151 151 bundler.addpart(out)
152 152 stream = util.chunkbuffer(bundler.getchunks())
153 153 b = bundle2.getunbundler(self.ui, stream)
154 154 bundle2.processbundle(self._repo, b)
155 155 raise
156 156 except error.PushRaced as exc:
157 157 raise error.ResponseError(_('push failed:'), str(exc))
158 158
159 159 def lock(self):
160 160 return self._repo.lock()
161 161
162 162 def addchangegroup(self, cg, source, url):
163 163 return changegroup.addchangegroup(self._repo, cg, source, url)
164 164
165 165 def pushkey(self, namespace, key, old, new):
166 166 return self._repo.pushkey(namespace, key, old, new)
167 167
168 168 def listkeys(self, namespace):
169 169 return self._repo.listkeys(namespace)
170 170
171 171 def debugwireargs(self, one, two, three=None, four=None, five=None):
172 172 '''used to test argument passing over the wire'''
173 173 return "%s %s %s %s %s" % (one, two, three, four, five)
174 174
175 175 class locallegacypeer(localpeer):
176 176 '''peer extension which implements legacy methods too; used for tests with
177 177 restricted capabilities'''
178 178
179 179 def __init__(self, repo):
180 180 localpeer.__init__(self, repo, caps=legacycaps)
181 181
182 182 def branches(self, nodes):
183 183 return self._repo.branches(nodes)
184 184
185 185 def between(self, pairs):
186 186 return self._repo.between(pairs)
187 187
188 188 def changegroup(self, basenodes, source):
189 189 return changegroup.changegroup(self._repo, basenodes, source)
190 190
191 191 def changegroupsubset(self, bases, heads, source):
192 192 return changegroup.changegroupsubset(self._repo, bases, heads, source)
193 193
194 194 class localrepository(object):
195 195
196 196 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
197 197 'manifestv2'))
198 198 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
199 199 'dotencode'))
200 200 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
201 201 filtername = None
202 202
203 203 # a list of (ui, featureset) functions.
204 204 # only functions defined in module of enabled extensions are invoked
205 205 featuresetupfuncs = set()
206 206
207 207 def _baserequirements(self, create):
208 208 return ['revlogv1']
209 209
210 210 def __init__(self, baseui, path=None, create=False):
211 211 self.requirements = set()
212 212 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
213 213 self.wopener = self.wvfs
214 214 self.root = self.wvfs.base
215 215 self.path = self.wvfs.join(".hg")
216 216 self.origroot = path
217 217 self.auditor = pathutil.pathauditor(self.root, self._checknested)
218 218 self.vfs = scmutil.vfs(self.path)
219 219 self.opener = self.vfs
220 220 self.baseui = baseui
221 221 self.ui = baseui.copy()
222 222 self.ui.copy = baseui.copy # prevent copying repo configuration
223 223 # A list of callback to shape the phase if no data were found.
224 224 # Callback are in the form: func(repo, roots) --> processed root.
225 225 # This list it to be filled by extension during repo setup
226 226 self._phasedefaults = []
227 227 try:
228 228 self.ui.readconfig(self.join("hgrc"), self.root)
229 229 extensions.loadall(self.ui)
230 230 except IOError:
231 231 pass
232 232
233 233 if self.featuresetupfuncs:
234 234 self.supported = set(self._basesupported) # use private copy
235 235 extmods = set(m.__name__ for n, m
236 236 in extensions.extensions(self.ui))
237 237 for setupfunc in self.featuresetupfuncs:
238 238 if setupfunc.__module__ in extmods:
239 239 setupfunc(self.ui, self.supported)
240 240 else:
241 241 self.supported = self._basesupported
242 242
243 243 if not self.vfs.isdir():
244 244 if create:
245 245 if not self.wvfs.exists():
246 246 self.wvfs.makedirs()
247 247 self.vfs.makedir(notindexed=True)
248 248 self.requirements.update(self._baserequirements(create))
249 249 if self.ui.configbool('format', 'usestore', True):
250 250 self.vfs.mkdir("store")
251 251 self.requirements.add("store")
252 252 if self.ui.configbool('format', 'usefncache', True):
253 253 self.requirements.add("fncache")
254 254 if self.ui.configbool('format', 'dotencode', True):
255 255 self.requirements.add('dotencode')
256 256 # create an invalid changelog
257 257 self.vfs.append(
258 258 "00changelog.i",
259 259 '\0\0\0\2' # represents revlogv2
260 260 ' dummy changelog to prevent using the old repo layout'
261 261 )
262 262 # experimental config: format.generaldelta
263 263 if self.ui.configbool('format', 'generaldelta', False):
264 264 self.requirements.add("generaldelta")
265 265 if self.ui.configbool('experimental', 'treemanifest', False):
266 266 self.requirements.add("treemanifest")
267 267 if self.ui.configbool('experimental', 'manifestv2', False):
268 268 self.requirements.add("manifestv2")
269 269 else:
270 270 raise error.RepoError(_("repository %s not found") % path)
271 271 elif create:
272 272 raise error.RepoError(_("repository %s already exists") % path)
273 273 else:
274 274 try:
275 275 self.requirements = scmutil.readrequires(
276 276 self.vfs, self.supported)
277 277 except IOError as inst:
278 278 if inst.errno != errno.ENOENT:
279 279 raise
280 280
281 281 self.sharedpath = self.path
282 282 try:
283 283 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
284 284 realpath=True)
285 285 s = vfs.base
286 286 if not vfs.exists():
287 287 raise error.RepoError(
288 288 _('.hg/sharedpath points to nonexistent directory %s') % s)
289 289 self.sharedpath = s
290 290 except IOError as inst:
291 291 if inst.errno != errno.ENOENT:
292 292 raise
293 293
294 294 self.store = store.store(
295 295 self.requirements, self.sharedpath, scmutil.vfs)
296 296 self.spath = self.store.path
297 297 self.svfs = self.store.vfs
298 298 self.sjoin = self.store.join
299 299 self.vfs.createmode = self.store.createmode
300 300 self._applyopenerreqs()
301 301 if create:
302 302 self._writerequirements()
303 303
304 304 self._dirstatevalidatewarned = False
305 305
306 306 self._branchcaches = {}
307 307 self._revbranchcache = None
308 308 self.filterpats = {}
309 309 self._datafilters = {}
310 310 self._transref = self._lockref = self._wlockref = None
311 311
312 312 # A cache for various files under .hg/ that tracks file changes,
313 313 # (used by the filecache decorator)
314 314 #
315 315 # Maps a property name to its util.filecacheentry
316 316 self._filecache = {}
317 317
318 318 # hold sets of revision to be filtered
319 319 # should be cleared when something might have changed the filter value:
320 320 # - new changesets,
321 321 # - phase change,
322 322 # - new obsolescence marker,
323 323 # - working directory parent change,
324 324 # - bookmark changes
325 325 self.filteredrevcache = {}
326 326
327 327 # generic mapping between names and nodes
328 328 self.names = namespaces.namespaces()
329 329
330 330 def close(self):
331 331 self._writecaches()
332 332
333 333 def _writecaches(self):
334 334 if self._revbranchcache:
335 335 self._revbranchcache.write()
336 336
337 337 def _restrictcapabilities(self, caps):
338 338 if self.ui.configbool('experimental', 'bundle2-advertise', True):
339 339 caps = set(caps)
340 340 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
341 341 caps.add('bundle2=' + urllib.quote(capsblob))
342 342 return caps
343 343
344 344 def _applyopenerreqs(self):
345 345 self.svfs.options = dict((r, 1) for r in self.requirements
346 346 if r in self.openerreqs)
347 347 # experimental config: format.chunkcachesize
348 348 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
349 349 if chunkcachesize is not None:
350 350 self.svfs.options['chunkcachesize'] = chunkcachesize
351 351 # experimental config: format.maxchainlen
352 352 maxchainlen = self.ui.configint('format', 'maxchainlen')
353 353 if maxchainlen is not None:
354 354 self.svfs.options['maxchainlen'] = maxchainlen
355 355 # experimental config: format.manifestcachesize
356 356 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
357 357 if manifestcachesize is not None:
358 358 self.svfs.options['manifestcachesize'] = manifestcachesize
359 359 # experimental config: format.aggressivemergedeltas
360 360 aggressivemergedeltas = self.ui.configbool('format',
361 361 'aggressivemergedeltas', False)
362 362 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
363 363
364 364 def _writerequirements(self):
365 365 scmutil.writerequires(self.vfs, self.requirements)
366 366
367 367 def _checknested(self, path):
368 368 """Determine if path is a legal nested repository."""
369 369 if not path.startswith(self.root):
370 370 return False
371 371 subpath = path[len(self.root) + 1:]
372 372 normsubpath = util.pconvert(subpath)
373 373
374 374 # XXX: Checking against the current working copy is wrong in
375 375 # the sense that it can reject things like
376 376 #
377 377 # $ hg cat -r 10 sub/x.txt
378 378 #
379 379 # if sub/ is no longer a subrepository in the working copy
380 380 # parent revision.
381 381 #
382 382 # However, it can of course also allow things that would have
383 383 # been rejected before, such as the above cat command if sub/
384 384 # is a subrepository now, but was a normal directory before.
385 385 # The old path auditor would have rejected by mistake since it
386 386 # panics when it sees sub/.hg/.
387 387 #
388 388 # All in all, checking against the working copy seems sensible
389 389 # since we want to prevent access to nested repositories on
390 390 # the filesystem *now*.
391 391 ctx = self[None]
392 392 parts = util.splitpath(subpath)
393 393 while parts:
394 394 prefix = '/'.join(parts)
395 395 if prefix in ctx.substate:
396 396 if prefix == normsubpath:
397 397 return True
398 398 else:
399 399 sub = ctx.sub(prefix)
400 400 return sub.checknested(subpath[len(prefix) + 1:])
401 401 else:
402 402 parts.pop()
403 403 return False
404 404
405 405 def peer(self):
406 406 return localpeer(self) # not cached to avoid reference cycle
407 407
408 408 def unfiltered(self):
409 409 """Return unfiltered version of the repository
410 410
411 411 Intended to be overwritten by filtered repo."""
412 412 return self
413 413
414 414 def filtered(self, name):
415 415 """Return a filtered version of a repository"""
416 416 # build a new class with the mixin and the current class
417 417 # (possibly subclass of the repo)
418 418 class proxycls(repoview.repoview, self.unfiltered().__class__):
419 419 pass
420 420 return proxycls(self, name)
421 421
422 422 @repofilecache('bookmarks')
423 423 def _bookmarks(self):
424 424 return bookmarks.bmstore(self)
425 425
426 426 @repofilecache('bookmarks.current')
427 427 def _activebookmark(self):
428 428 return bookmarks.readactive(self)
429 429
430 430 def bookmarkheads(self, bookmark):
431 431 name = bookmark.split('@', 1)[0]
432 432 heads = []
433 433 for mark, n in self._bookmarks.iteritems():
434 434 if mark.split('@', 1)[0] == name:
435 435 heads.append(n)
436 436 return heads
437 437
438 438 # _phaserevs and _phasesets depend on changelog. what we need is to
439 439 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
440 440 # can't be easily expressed in filecache mechanism.
441 441 @storecache('phaseroots', '00changelog.i')
442 442 def _phasecache(self):
443 443 return phases.phasecache(self, self._phasedefaults)
444 444
445 445 @storecache('obsstore')
446 446 def obsstore(self):
447 447 # read default format for new obsstore.
448 448 # developer config: format.obsstore-version
449 449 defaultformat = self.ui.configint('format', 'obsstore-version', None)
450 450 # rely on obsstore class default when possible.
451 451 kwargs = {}
452 452 if defaultformat is not None:
453 453 kwargs['defaultformat'] = defaultformat
454 454 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
455 455 store = obsolete.obsstore(self.svfs, readonly=readonly,
456 456 **kwargs)
457 457 if store and readonly:
458 458 self.ui.warn(
459 459 _('obsolete feature not enabled but %i markers found!\n')
460 460 % len(list(store)))
461 461 return store
462 462
463 463 @storecache('00changelog.i')
464 464 def changelog(self):
465 465 c = changelog.changelog(self.svfs)
466 466 if 'HG_PENDING' in os.environ:
467 467 p = os.environ['HG_PENDING']
468 468 if p.startswith(self.root):
469 469 c.readpending('00changelog.i.a')
470 470 return c
471 471
472 472 @storecache('00manifest.i')
473 473 def manifest(self):
474 474 return manifest.manifest(self.svfs)
475 475
476 476 def dirlog(self, dir):
477 477 return self.manifest.dirlog(dir)
478 478
479 479 @repofilecache('dirstate')
480 480 def dirstate(self):
481 481 return dirstate.dirstate(self.vfs, self.ui, self.root,
482 482 self._dirstatevalidate)
483 483
484 484 def _dirstatevalidate(self, node):
485 485 try:
486 486 self.changelog.rev(node)
487 487 return node
488 488 except error.LookupError:
489 489 if not self._dirstatevalidatewarned:
490 490 self._dirstatevalidatewarned = True
491 491 self.ui.warn(_("warning: ignoring unknown"
492 492 " working parent %s!\n") % short(node))
493 493 return nullid
494 494
495 495 def __getitem__(self, changeid):
496 496 if changeid is None or changeid == wdirrev:
497 497 return context.workingctx(self)
498 498 if isinstance(changeid, slice):
499 499 return [context.changectx(self, i)
500 500 for i in xrange(*changeid.indices(len(self)))
501 501 if i not in self.changelog.filteredrevs]
502 502 return context.changectx(self, changeid)
503 503
504 504 def __contains__(self, changeid):
505 505 try:
506 506 self[changeid]
507 507 return True
508 508 except error.RepoLookupError:
509 509 return False
510 510
511 511 def __nonzero__(self):
512 512 return True
513 513
514 514 def __len__(self):
515 515 return len(self.changelog)
516 516
517 517 def __iter__(self):
518 518 return iter(self.changelog)
519 519
520 520 def revs(self, expr, *args):
521 521 '''Return a list of revisions matching the given revset'''
522 522 expr = revset.formatspec(expr, *args)
523 523 m = revset.match(None, expr)
524 524 return m(self)
525 525
526 526 def set(self, expr, *args):
527 527 '''
528 528 Yield a context for each matching revision, after doing arg
529 529 replacement via revset.formatspec
530 530 '''
531 531 for r in self.revs(expr, *args):
532 532 yield self[r]
533 533
534 534 def url(self):
535 535 return 'file:' + self.root
536 536
537 537 def hook(self, name, throw=False, **args):
538 538 """Call a hook, passing this repo instance.
539 539
540 540 This a convenience method to aid invoking hooks. Extensions likely
541 541 won't call this unless they have registered a custom hook or are
542 542 replacing code that is expected to call a hook.
543 543 """
544 544 return hook.hook(self.ui, self, name, throw, **args)
545 545
546 546 @unfilteredmethod
547 547 def _tag(self, names, node, message, local, user, date, extra=None,
548 548 editor=False):
549 549 if isinstance(names, str):
550 550 names = (names,)
551 551
552 552 branches = self.branchmap()
553 553 for name in names:
554 554 self.hook('pretag', throw=True, node=hex(node), tag=name,
555 555 local=local)
556 556 if name in branches:
557 557 self.ui.warn(_("warning: tag %s conflicts with existing"
558 558 " branch name\n") % name)
559 559
560 560 def writetags(fp, names, munge, prevtags):
561 561 fp.seek(0, 2)
562 562 if prevtags and prevtags[-1] != '\n':
563 563 fp.write('\n')
564 564 for name in names:
565 565 if munge:
566 566 m = munge(name)
567 567 else:
568 568 m = name
569 569
570 570 if (self._tagscache.tagtypes and
571 571 name in self._tagscache.tagtypes):
572 572 old = self.tags().get(name, nullid)
573 573 fp.write('%s %s\n' % (hex(old), m))
574 574 fp.write('%s %s\n' % (hex(node), m))
575 575 fp.close()
576 576
577 577 prevtags = ''
578 578 if local:
579 579 try:
580 580 fp = self.vfs('localtags', 'r+')
581 581 except IOError:
582 582 fp = self.vfs('localtags', 'a')
583 583 else:
584 584 prevtags = fp.read()
585 585
586 586 # local tags are stored in the current charset
587 587 writetags(fp, names, None, prevtags)
588 588 for name in names:
589 589 self.hook('tag', node=hex(node), tag=name, local=local)
590 590 return
591 591
592 592 try:
593 593 fp = self.wfile('.hgtags', 'rb+')
594 594 except IOError as e:
595 595 if e.errno != errno.ENOENT:
596 596 raise
597 597 fp = self.wfile('.hgtags', 'ab')
598 598 else:
599 599 prevtags = fp.read()
600 600
601 601 # committed tags are stored in UTF-8
602 602 writetags(fp, names, encoding.fromlocal, prevtags)
603 603
604 604 fp.close()
605 605
606 606 self.invalidatecaches()
607 607
608 608 if '.hgtags' not in self.dirstate:
609 609 self[None].add(['.hgtags'])
610 610
611 611 m = matchmod.exact(self.root, '', ['.hgtags'])
612 612 tagnode = self.commit(message, user, date, extra=extra, match=m,
613 613 editor=editor)
614 614
615 615 for name in names:
616 616 self.hook('tag', node=hex(node), tag=name, local=local)
617 617
618 618 return tagnode
619 619
620 620 def tag(self, names, node, message, local, user, date, editor=False):
621 621 '''tag a revision with one or more symbolic names.
622 622
623 623 names is a list of strings or, when adding a single tag, names may be a
624 624 string.
625 625
626 626 if local is True, the tags are stored in a per-repository file.
627 627 otherwise, they are stored in the .hgtags file, and a new
628 628 changeset is committed with the change.
629 629
630 630 keyword arguments:
631 631
632 632 local: whether to store tags in non-version-controlled file
633 633 (default False)
634 634
635 635 message: commit message to use if committing
636 636
637 637 user: name of user to use if committing
638 638
639 639 date: date tuple to use if committing'''
640 640
641 641 if not local:
642 642 m = matchmod.exact(self.root, '', ['.hgtags'])
643 643 if any(self.status(match=m, unknown=True, ignored=True)):
644 644 raise util.Abort(_('working copy of .hgtags is changed'),
645 645 hint=_('please commit .hgtags manually'))
646 646
647 647 self.tags() # instantiate the cache
648 648 self._tag(names, node, message, local, user, date, editor=editor)
649 649
650 650 @filteredpropertycache
651 651 def _tagscache(self):
652 652 '''Returns a tagscache object that contains various tags related
653 653 caches.'''
654 654
655 655 # This simplifies its cache management by having one decorated
656 656 # function (this one) and the rest simply fetch things from it.
657 657 class tagscache(object):
658 658 def __init__(self):
659 659 # These two define the set of tags for this repository. tags
660 660 # maps tag name to node; tagtypes maps tag name to 'global' or
661 661 # 'local'. (Global tags are defined by .hgtags across all
662 662 # heads, and local tags are defined in .hg/localtags.)
663 663 # They constitute the in-memory cache of tags.
664 664 self.tags = self.tagtypes = None
665 665
666 666 self.nodetagscache = self.tagslist = None
667 667
668 668 cache = tagscache()
669 669 cache.tags, cache.tagtypes = self._findtags()
670 670
671 671 return cache
672 672
673 673 def tags(self):
674 674 '''return a mapping of tag to node'''
675 675 t = {}
676 676 if self.changelog.filteredrevs:
677 677 tags, tt = self._findtags()
678 678 else:
679 679 tags = self._tagscache.tags
680 680 for k, v in tags.iteritems():
681 681 try:
682 682 # ignore tags to unknown nodes
683 683 self.changelog.rev(v)
684 684 t[k] = v
685 685 except (error.LookupError, ValueError):
686 686 pass
687 687 return t
688 688
689 689 def _findtags(self):
690 690 '''Do the hard work of finding tags. Return a pair of dicts
691 691 (tags, tagtypes) where tags maps tag name to node, and tagtypes
692 692 maps tag name to a string like \'global\' or \'local\'.
693 693 Subclasses or extensions are free to add their own tags, but
694 694 should be aware that the returned dicts will be retained for the
695 695 duration of the localrepo object.'''
696 696
697 697 # XXX what tagtype should subclasses/extensions use? Currently
698 698 # mq and bookmarks add tags, but do not set the tagtype at all.
699 699 # Should each extension invent its own tag type? Should there
700 700 # be one tagtype for all such "virtual" tags? Or is the status
701 701 # quo fine?
702 702
703 703 alltags = {} # map tag name to (node, hist)
704 704 tagtypes = {}
705 705
706 706 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
707 707 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
708 708
709 709 # Build the return dicts. Have to re-encode tag names because
710 710 # the tags module always uses UTF-8 (in order not to lose info
711 711 # writing to the cache), but the rest of Mercurial wants them in
712 712 # local encoding.
713 713 tags = {}
714 714 for (name, (node, hist)) in alltags.iteritems():
715 715 if node != nullid:
716 716 tags[encoding.tolocal(name)] = node
717 717 tags['tip'] = self.changelog.tip()
718 718 tagtypes = dict([(encoding.tolocal(name), value)
719 719 for (name, value) in tagtypes.iteritems()])
720 720 return (tags, tagtypes)
721 721
722 722 def tagtype(self, tagname):
723 723 '''
724 724 return the type of the given tag. result can be:
725 725
726 726 'local' : a local tag
727 727 'global' : a global tag
728 728 None : tag does not exist
729 729 '''
730 730
731 731 return self._tagscache.tagtypes.get(tagname)
732 732
733 733 def tagslist(self):
734 734 '''return a list of tags ordered by revision'''
735 735 if not self._tagscache.tagslist:
736 736 l = []
737 737 for t, n in self.tags().iteritems():
738 738 l.append((self.changelog.rev(n), t, n))
739 739 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
740 740
741 741 return self._tagscache.tagslist
742 742
743 743 def nodetags(self, node):
744 744 '''return the tags associated with a node'''
745 745 if not self._tagscache.nodetagscache:
746 746 nodetagscache = {}
747 747 for t, n in self._tagscache.tags.iteritems():
748 748 nodetagscache.setdefault(n, []).append(t)
749 749 for tags in nodetagscache.itervalues():
750 750 tags.sort()
751 751 self._tagscache.nodetagscache = nodetagscache
752 752 return self._tagscache.nodetagscache.get(node, [])
753 753
754 754 def nodebookmarks(self, node):
755 755 marks = []
756 756 for bookmark, n in self._bookmarks.iteritems():
757 757 if n == node:
758 758 marks.append(bookmark)
759 759 return sorted(marks)
760 760
761 761 def branchmap(self):
762 762 '''returns a dictionary {branch: [branchheads]} with branchheads
763 763 ordered by increasing revision number'''
764 764 branchmap.updatecache(self)
765 765 return self._branchcaches[self.filtername]
766 766
767 767 @unfilteredmethod
768 768 def revbranchcache(self):
769 769 if not self._revbranchcache:
770 770 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
771 771 return self._revbranchcache
772 772
773 773 def branchtip(self, branch, ignoremissing=False):
774 774 '''return the tip node for a given branch
775 775
776 776 If ignoremissing is True, then this method will not raise an error.
777 777 This is helpful for callers that only expect None for a missing branch
778 778 (e.g. namespace).
779 779
780 780 '''
781 781 try:
782 782 return self.branchmap().branchtip(branch)
783 783 except KeyError:
784 784 if not ignoremissing:
785 785 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
786 786 else:
787 787 pass
788 788
789 789 def lookup(self, key):
790 790 return self[key].node()
791 791
792 792 def lookupbranch(self, key, remote=None):
793 793 repo = remote or self
794 794 if key in repo.branchmap():
795 795 return key
796 796
797 797 repo = (remote and remote.local()) and remote or self
798 798 return repo[key].branch()
799 799
800 800 def known(self, nodes):
801 801 nm = self.changelog.nodemap
802 802 pc = self._phasecache
803 803 result = []
804 804 for n in nodes:
805 805 r = nm.get(n)
806 806 resp = not (r is None or pc.phase(self, r) >= phases.secret)
807 807 result.append(resp)
808 808 return result
809 809
810 810 def local(self):
811 811 return self
812 812
813 813 def publishing(self):
814 814 # it's safe (and desirable) to trust the publish flag unconditionally
815 815 # so that we don't finalize changes shared between users via ssh or nfs
816 816 return self.ui.configbool('phases', 'publish', True, untrusted=True)
817 817
818 818 def cancopy(self):
819 819 # so statichttprepo's override of local() works
820 820 if not self.local():
821 821 return False
822 822 if not self.publishing():
823 823 return True
824 824 # if publishing we can't copy if there is filtered content
825 825 return not self.filtered('visible').changelog.filteredrevs
826 826
827 827 def shared(self):
828 828 '''the type of shared repository (None if not shared)'''
829 829 if self.sharedpath != self.path:
830 830 return 'store'
831 831 return None
832 832
833 833 def join(self, f, *insidef):
834 834 return self.vfs.join(os.path.join(f, *insidef))
835 835
836 836 def wjoin(self, f, *insidef):
837 837 return self.vfs.reljoin(self.root, f, *insidef)
838 838
839 839 def file(self, f):
840 840 if f[0] == '/':
841 841 f = f[1:]
842 842 return filelog.filelog(self.svfs, f)
843 843
844 844 def changectx(self, changeid):
845 845 return self[changeid]
846 846
847 847 def parents(self, changeid=None):
848 848 '''get list of changectxs for parents of changeid'''
849 849 return self[changeid].parents()
850 850
851 851 def setparents(self, p1, p2=nullid):
852 852 self.dirstate.beginparentchange()
853 853 copies = self.dirstate.setparents(p1, p2)
854 854 pctx = self[p1]
855 855 if copies:
856 856 # Adjust copy records, the dirstate cannot do it, it
857 857 # requires access to parents manifests. Preserve them
858 858 # only for entries added to first parent.
859 859 for f in copies:
860 860 if f not in pctx and copies[f] in pctx:
861 861 self.dirstate.copy(copies[f], f)
862 862 if p2 == nullid:
863 863 for f, s in sorted(self.dirstate.copies().items()):
864 864 if f not in pctx and s not in pctx:
865 865 self.dirstate.copy(None, f)
866 866 self.dirstate.endparentchange()
867 867
868 868 def filectx(self, path, changeid=None, fileid=None):
869 869 """changeid can be a changeset revision, node, or tag.
870 870 fileid can be a file revision or node."""
871 871 return context.filectx(self, path, changeid, fileid)
872 872
873 873 def getcwd(self):
874 874 return self.dirstate.getcwd()
875 875
876 876 def pathto(self, f, cwd=None):
877 877 return self.dirstate.pathto(f, cwd)
878 878
879 879 def wfile(self, f, mode='r'):
880 880 return self.wvfs(f, mode)
881 881
882 882 def _link(self, f):
883 883 return self.wvfs.islink(f)
884 884
885 885 def _loadfilter(self, filter):
886 886 if filter not in self.filterpats:
887 887 l = []
888 888 for pat, cmd in self.ui.configitems(filter):
889 889 if cmd == '!':
890 890 continue
891 891 mf = matchmod.match(self.root, '', [pat])
892 892 fn = None
893 893 params = cmd
894 894 for name, filterfn in self._datafilters.iteritems():
895 895 if cmd.startswith(name):
896 896 fn = filterfn
897 897 params = cmd[len(name):].lstrip()
898 898 break
899 899 if not fn:
900 900 fn = lambda s, c, **kwargs: util.filter(s, c)
901 901 # Wrap old filters not supporting keyword arguments
902 902 if not inspect.getargspec(fn)[2]:
903 903 oldfn = fn
904 904 fn = lambda s, c, **kwargs: oldfn(s, c)
905 905 l.append((mf, fn, params))
906 906 self.filterpats[filter] = l
907 907 return self.filterpats[filter]
908 908
909 909 def _filter(self, filterpats, filename, data):
910 910 for mf, fn, cmd in filterpats:
911 911 if mf(filename):
912 912 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
913 913 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
914 914 break
915 915
916 916 return data
917 917
918 918 @unfilteredpropertycache
919 919 def _encodefilterpats(self):
920 920 return self._loadfilter('encode')
921 921
922 922 @unfilteredpropertycache
923 923 def _decodefilterpats(self):
924 924 return self._loadfilter('decode')
925 925
926 926 def adddatafilter(self, name, filter):
927 927 self._datafilters[name] = filter
928 928
929 929 def wread(self, filename):
930 930 if self._link(filename):
931 931 data = self.wvfs.readlink(filename)
932 932 else:
933 933 data = self.wvfs.read(filename)
934 934 return self._filter(self._encodefilterpats, filename, data)
935 935
936 936 def wwrite(self, filename, data, flags):
937 937 """write ``data`` into ``filename`` in the working directory
938 938
939 939 This returns length of written (maybe decoded) data.
940 940 """
941 941 data = self._filter(self._decodefilterpats, filename, data)
942 942 if 'l' in flags:
943 943 self.wvfs.symlink(data, filename)
944 944 else:
945 945 self.wvfs.write(filename, data)
946 946 if 'x' in flags:
947 947 self.wvfs.setflags(filename, False, True)
948 948 return len(data)
949 949
950 950 def wwritedata(self, filename, data):
951 951 return self._filter(self._decodefilterpats, filename, data)
952 952
953 953 def currenttransaction(self):
954 954 """return the current transaction or None if non exists"""
955 955 if self._transref:
956 956 tr = self._transref()
957 957 else:
958 958 tr = None
959 959
960 960 if tr and tr.running():
961 961 return tr
962 962 return None
963 963
964 964 def transaction(self, desc, report=None):
965 965 if (self.ui.configbool('devel', 'all-warnings')
966 966 or self.ui.configbool('devel', 'check-locks')):
967 967 l = self._lockref and self._lockref()
968 968 if l is None or not l.held:
969 969 self.ui.develwarn('transaction with no lock')
970 970 tr = self.currenttransaction()
971 971 if tr is not None:
972 972 return tr.nest()
973 973
974 974 # abort here if the journal already exists
975 975 if self.svfs.exists("journal"):
976 976 raise error.RepoError(
977 977 _("abandoned transaction found"),
978 978 hint=_("run 'hg recover' to clean up transaction"))
979 979
980 980 # make journal.dirstate contain in-memory changes at this point
981 981 self.dirstate.write()
982 982
983 983 idbase = "%.40f#%f" % (random.random(), time.time())
984 984 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
985 985 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
986 986
987 987 self._writejournal(desc)
988 988 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
989 989 if report:
990 990 rp = report
991 991 else:
992 992 rp = self.ui.warn
993 993 vfsmap = {'plain': self.vfs} # root of .hg/
994 994 # we must avoid cyclic reference between repo and transaction.
995 995 reporef = weakref.ref(self)
996 996 def validate(tr):
997 997 """will run pre-closing hooks"""
998 998 pending = lambda: tr.writepending() and self.root or ""
999 999 reporef().hook('pretxnclose', throw=True, pending=pending,
1000 1000 txnname=desc, **tr.hookargs)
1001 1001
1002 1002 tr = transaction.transaction(rp, self.svfs, vfsmap,
1003 1003 "journal",
1004 1004 "undo",
1005 1005 aftertrans(renames),
1006 1006 self.store.createmode,
1007 1007 validator=validate)
1008 1008
1009 1009 tr.hookargs['txnid'] = txnid
1010 1010 # note: writing the fncache only during finalize mean that the file is
1011 1011 # outdated when running hooks. As fncache is used for streaming clone,
1012 1012 # this is not expected to break anything that happen during the hooks.
1013 1013 tr.addfinalize('flush-fncache', self.store.write)
1014 1014 def txnclosehook(tr2):
1015 1015 """To be run if transaction is successful, will schedule a hook run
1016 1016 """
1017 1017 def hook():
1018 1018 reporef().hook('txnclose', throw=False, txnname=desc,
1019 1019 **tr2.hookargs)
1020 1020 reporef()._afterlock(hook)
1021 1021 tr.addfinalize('txnclose-hook', txnclosehook)
1022 1022 def txnaborthook(tr2):
1023 1023 """To be run if transaction is aborted
1024 1024 """
1025 1025 reporef().hook('txnabort', throw=False, txnname=desc,
1026 1026 **tr2.hookargs)
1027 1027 tr.addabort('txnabort-hook', txnaborthook)
1028 1028 # avoid eager cache invalidation. in-memory data should be identical
1029 1029 # to stored data if transaction has no error.
1030 1030 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1031 1031 self._transref = weakref.ref(tr)
1032 1032 return tr
1033 1033
1034 1034 def _journalfiles(self):
1035 1035 return ((self.svfs, 'journal'),
1036 1036 (self.vfs, 'journal.dirstate'),
1037 1037 (self.vfs, 'journal.branch'),
1038 1038 (self.vfs, 'journal.desc'),
1039 1039 (self.vfs, 'journal.bookmarks'),
1040 1040 (self.svfs, 'journal.phaseroots'))
1041 1041
1042 1042 def undofiles(self):
1043 1043 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1044 1044
1045 1045 def _writejournal(self, desc):
1046 1046 self.vfs.write("journal.dirstate",
1047 1047 self.vfs.tryread("dirstate"))
1048 1048 self.vfs.write("journal.branch",
1049 1049 encoding.fromlocal(self.dirstate.branch()))
1050 1050 self.vfs.write("journal.desc",
1051 1051 "%d\n%s\n" % (len(self), desc))
1052 1052 self.vfs.write("journal.bookmarks",
1053 1053 self.vfs.tryread("bookmarks"))
1054 1054 self.svfs.write("journal.phaseroots",
1055 1055 self.svfs.tryread("phaseroots"))
1056 1056
1057 1057 def recover(self):
1058 1058 lock = self.lock()
1059 1059 try:
1060 1060 if self.svfs.exists("journal"):
1061 1061 self.ui.status(_("rolling back interrupted transaction\n"))
1062 1062 vfsmap = {'': self.svfs,
1063 1063 'plain': self.vfs,}
1064 1064 transaction.rollback(self.svfs, vfsmap, "journal",
1065 1065 self.ui.warn)
1066 1066 self.invalidate()
1067 1067 return True
1068 1068 else:
1069 1069 self.ui.warn(_("no interrupted transaction available\n"))
1070 1070 return False
1071 1071 finally:
1072 1072 lock.release()
1073 1073
1074 1074 def rollback(self, dryrun=False, force=False):
1075 1075 wlock = lock = None
1076 1076 try:
1077 1077 wlock = self.wlock()
1078 1078 lock = self.lock()
1079 1079 if self.svfs.exists("undo"):
1080 1080 return self._rollback(dryrun, force)
1081 1081 else:
1082 1082 self.ui.warn(_("no rollback information available\n"))
1083 1083 return 1
1084 1084 finally:
1085 1085 release(lock, wlock)
1086 1086
1087 1087 @unfilteredmethod # Until we get smarter cache management
1088 1088 def _rollback(self, dryrun, force):
1089 1089 ui = self.ui
1090 1090 try:
1091 1091 args = self.vfs.read('undo.desc').splitlines()
1092 1092 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1093 1093 if len(args) >= 3:
1094 1094 detail = args[2]
1095 1095 oldtip = oldlen - 1
1096 1096
1097 1097 if detail and ui.verbose:
1098 1098 msg = (_('repository tip rolled back to revision %s'
1099 1099 ' (undo %s: %s)\n')
1100 1100 % (oldtip, desc, detail))
1101 1101 else:
1102 1102 msg = (_('repository tip rolled back to revision %s'
1103 1103 ' (undo %s)\n')
1104 1104 % (oldtip, desc))
1105 1105 except IOError:
1106 1106 msg = _('rolling back unknown transaction\n')
1107 1107 desc = None
1108 1108
1109 1109 if not force and self['.'] != self['tip'] and desc == 'commit':
1110 1110 raise util.Abort(
1111 1111 _('rollback of last commit while not checked out '
1112 1112 'may lose data'), hint=_('use -f to force'))
1113 1113
1114 1114 ui.status(msg)
1115 1115 if dryrun:
1116 1116 return 0
1117 1117
1118 1118 parents = self.dirstate.parents()
1119 1119 self.destroying()
1120 1120 vfsmap = {'plain': self.vfs, '': self.svfs}
1121 1121 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1122 1122 if self.vfs.exists('undo.bookmarks'):
1123 1123 self.vfs.rename('undo.bookmarks', 'bookmarks')
1124 1124 if self.svfs.exists('undo.phaseroots'):
1125 1125 self.svfs.rename('undo.phaseroots', 'phaseroots')
1126 1126 self.invalidate()
1127 1127
1128 1128 parentgone = (parents[0] not in self.changelog.nodemap or
1129 1129 parents[1] not in self.changelog.nodemap)
1130 1130 if parentgone:
1131 1131 self.vfs.rename('undo.dirstate', 'dirstate')
1132 1132 try:
1133 1133 branch = self.vfs.read('undo.branch')
1134 1134 self.dirstate.setbranch(encoding.tolocal(branch))
1135 1135 except IOError:
1136 1136 ui.warn(_('named branch could not be reset: '
1137 1137 'current branch is still \'%s\'\n')
1138 1138 % self.dirstate.branch())
1139 1139
1140 1140 self.dirstate.invalidate()
1141 1141 parents = tuple([p.rev() for p in self.parents()])
1142 1142 if len(parents) > 1:
1143 1143 ui.status(_('working directory now based on '
1144 1144 'revisions %d and %d\n') % parents)
1145 1145 else:
1146 1146 ui.status(_('working directory now based on '
1147 1147 'revision %d\n') % parents)
1148 1148 ms = mergemod.mergestate(self)
1149 1149 ms.reset(self['.'].node())
1150 1150
1151 1151 # TODO: if we know which new heads may result from this rollback, pass
1152 1152 # them to destroy(), which will prevent the branchhead cache from being
1153 1153 # invalidated.
1154 1154 self.destroyed()
1155 1155 return 0
1156 1156
1157 1157 def invalidatecaches(self):
1158 1158
1159 1159 if '_tagscache' in vars(self):
1160 1160 # can't use delattr on proxy
1161 1161 del self.__dict__['_tagscache']
1162 1162
1163 1163 self.unfiltered()._branchcaches.clear()
1164 1164 self.invalidatevolatilesets()
1165 1165
1166 1166 def invalidatevolatilesets(self):
1167 1167 self.filteredrevcache.clear()
1168 1168 obsolete.clearobscaches(self)
1169 1169
1170 1170 def invalidatedirstate(self):
1171 1171 '''Invalidates the dirstate, causing the next call to dirstate
1172 1172 to check if it was modified since the last time it was read,
1173 1173 rereading it if it has.
1174 1174
1175 1175 This is different to dirstate.invalidate() that it doesn't always
1176 1176 rereads the dirstate. Use dirstate.invalidate() if you want to
1177 1177 explicitly read the dirstate again (i.e. restoring it to a previous
1178 1178 known good state).'''
1179 1179 if hasunfilteredcache(self, 'dirstate'):
1180 1180 for k in self.dirstate._filecache:
1181 1181 try:
1182 1182 delattr(self.dirstate, k)
1183 1183 except AttributeError:
1184 1184 pass
1185 1185 delattr(self.unfiltered(), 'dirstate')
1186 1186
1187 1187 def invalidate(self):
1188 1188 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1189 1189 for k in self._filecache:
1190 1190 # dirstate is invalidated separately in invalidatedirstate()
1191 1191 if k == 'dirstate':
1192 1192 continue
1193 1193
1194 1194 try:
1195 1195 delattr(unfiltered, k)
1196 1196 except AttributeError:
1197 1197 pass
1198 1198 self.invalidatecaches()
1199 1199 self.store.invalidatecaches()
1200 1200
1201 1201 def invalidateall(self):
1202 1202 '''Fully invalidates both store and non-store parts, causing the
1203 1203 subsequent operation to reread any outside changes.'''
1204 1204 # extension should hook this to invalidate its caches
1205 1205 self.invalidate()
1206 1206 self.invalidatedirstate()
1207 1207
1208 1208 def _refreshfilecachestats(self, tr):
1209 1209 """Reload stats of cached files so that they are flagged as valid"""
1210 1210 for k, ce in self._filecache.items():
1211 1211 if k == 'dirstate' or k not in self.__dict__:
1212 1212 continue
1213 1213 ce.refresh()
1214 1214
1215 1215 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1216 1216 parentenvvar=None):
1217 1217 parentlock = None
1218 1218 if parentenvvar is not None:
1219 1219 parentlock = os.environ.get(parentenvvar)
1220 1220 try:
1221 1221 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1222 1222 acquirefn=acquirefn, desc=desc,
1223 1223 parentlock=parentlock)
1224 1224 except error.LockHeld as inst:
1225 1225 if not wait:
1226 1226 raise
1227 1227 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1228 1228 (desc, inst.locker))
1229 1229 # default to 600 seconds timeout
1230 1230 l = lockmod.lock(vfs, lockname,
1231 1231 int(self.ui.config("ui", "timeout", "600")),
1232 1232 releasefn=releasefn, acquirefn=acquirefn,
1233 1233 desc=desc)
1234 1234 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1235 1235 return l
1236 1236
1237 1237 def _afterlock(self, callback):
1238 1238 """add a callback to be run when the repository is fully unlocked
1239 1239
1240 1240 The callback will be executed when the outermost lock is released
1241 1241 (with wlock being higher level than 'lock')."""
1242 1242 for ref in (self._wlockref, self._lockref):
1243 1243 l = ref and ref()
1244 1244 if l and l.held:
1245 1245 l.postrelease.append(callback)
1246 1246 break
1247 1247 else: # no lock have been found.
1248 1248 callback()
1249 1249
1250 1250 def lock(self, wait=True):
1251 1251 '''Lock the repository store (.hg/store) and return a weak reference
1252 1252 to the lock. Use this before modifying the store (e.g. committing or
1253 1253 stripping). If you are opening a transaction, get a lock as well.)
1254 1254
1255 1255 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1256 1256 'wlock' first to avoid a dead-lock hazard.'''
1257 1257 l = self._lockref and self._lockref()
1258 1258 if l is not None and l.held:
1259 1259 l.lock()
1260 1260 return l
1261 1261
1262 1262 l = self._lock(self.svfs, "lock", wait, None,
1263 1263 self.invalidate, _('repository %s') % self.origroot)
1264 1264 self._lockref = weakref.ref(l)
1265 1265 return l
1266 1266
1267 1267 def wlock(self, wait=True):
1268 1268 '''Lock the non-store parts of the repository (everything under
1269 1269 .hg except .hg/store) and return a weak reference to the lock.
1270 1270
1271 1271 Use this before modifying files in .hg.
1272 1272
1273 1273 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1274 1274 'wlock' first to avoid a dead-lock hazard.'''
1275 1275 l = self._wlockref and self._wlockref()
1276 1276 if l is not None and l.held:
1277 1277 l.lock()
1278 1278 return l
1279 1279
1280 1280 # We do not need to check for non-waiting lock aquisition. Such
1281 1281 # acquisition would not cause dead-lock as they would just fail.
1282 1282 if wait and (self.ui.configbool('devel', 'all-warnings')
1283 1283 or self.ui.configbool('devel', 'check-locks')):
1284 1284 l = self._lockref and self._lockref()
1285 1285 if l is not None and l.held:
1286 1286 self.ui.develwarn('"wlock" acquired after "lock"')
1287 1287
1288 1288 def unlock():
1289 1289 if self.dirstate.pendingparentchange():
1290 1290 self.dirstate.invalidate()
1291 1291 else:
1292 1292 self.dirstate.write()
1293 1293
1294 1294 self._filecache['dirstate'].refresh()
1295 1295
1296 1296 l = self._lock(self.vfs, "wlock", wait, unlock,
1297 1297 self.invalidatedirstate, _('working directory of %s') %
1298 1298 self.origroot)
1299 1299 self._wlockref = weakref.ref(l)
1300 1300 return l
1301 1301
1302 1302 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1303 1303 """
1304 1304 commit an individual file as part of a larger transaction
1305 1305 """
1306 1306
1307 1307 fname = fctx.path()
1308 1308 fparent1 = manifest1.get(fname, nullid)
1309 1309 fparent2 = manifest2.get(fname, nullid)
1310 1310 if isinstance(fctx, context.filectx):
1311 1311 node = fctx.filenode()
1312 1312 if node in [fparent1, fparent2]:
1313 1313 self.ui.debug('reusing %s filelog entry\n' % fname)
1314 1314 return node
1315 1315
1316 1316 flog = self.file(fname)
1317 1317 meta = {}
1318 1318 copy = fctx.renamed()
1319 1319 if copy and copy[0] != fname:
1320 1320 # Mark the new revision of this file as a copy of another
1321 1321 # file. This copy data will effectively act as a parent
1322 1322 # of this new revision. If this is a merge, the first
1323 1323 # parent will be the nullid (meaning "look up the copy data")
1324 1324 # and the second one will be the other parent. For example:
1325 1325 #
1326 1326 # 0 --- 1 --- 3 rev1 changes file foo
1327 1327 # \ / rev2 renames foo to bar and changes it
1328 1328 # \- 2 -/ rev3 should have bar with all changes and
1329 1329 # should record that bar descends from
1330 1330 # bar in rev2 and foo in rev1
1331 1331 #
1332 1332 # this allows this merge to succeed:
1333 1333 #
1334 1334 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1335 1335 # \ / merging rev3 and rev4 should use bar@rev2
1336 1336 # \- 2 --- 4 as the merge base
1337 1337 #
1338 1338
1339 1339 cfname = copy[0]
1340 1340 crev = manifest1.get(cfname)
1341 1341 newfparent = fparent2
1342 1342
1343 1343 if manifest2: # branch merge
1344 1344 if fparent2 == nullid or crev is None: # copied on remote side
1345 1345 if cfname in manifest2:
1346 1346 crev = manifest2[cfname]
1347 1347 newfparent = fparent1
1348 1348
1349 1349 # Here, we used to search backwards through history to try to find
1350 1350 # where the file copy came from if the source of a copy was not in
1351 1351 # the parent directory. However, this doesn't actually make sense to
1352 1352 # do (what does a copy from something not in your working copy even
1353 1353 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1354 1354 # the user that copy information was dropped, so if they didn't
1355 1355 # expect this outcome it can be fixed, but this is the correct
1356 1356 # behavior in this circumstance.
1357 1357
1358 1358 if crev:
1359 1359 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1360 1360 meta["copy"] = cfname
1361 1361 meta["copyrev"] = hex(crev)
1362 1362 fparent1, fparent2 = nullid, newfparent
1363 1363 else:
1364 1364 self.ui.warn(_("warning: can't find ancestor for '%s' "
1365 1365 "copied from '%s'!\n") % (fname, cfname))
1366 1366
1367 1367 elif fparent1 == nullid:
1368 1368 fparent1, fparent2 = fparent2, nullid
1369 1369 elif fparent2 != nullid:
1370 1370 # is one parent an ancestor of the other?
1371 1371 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1372 1372 if fparent1 in fparentancestors:
1373 1373 fparent1, fparent2 = fparent2, nullid
1374 1374 elif fparent2 in fparentancestors:
1375 1375 fparent2 = nullid
1376 1376
1377 1377 # is the file changed?
1378 1378 text = fctx.data()
1379 1379 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1380 1380 changelist.append(fname)
1381 1381 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1382 1382 # are just the flags changed during merge?
1383 1383 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1384 1384 changelist.append(fname)
1385 1385
1386 1386 return fparent1
1387 1387
1388 1388 @unfilteredmethod
1389 1389 def commit(self, text="", user=None, date=None, match=None, force=False,
1390 1390 editor=False, extra=None):
1391 1391 """Add a new revision to current repository.
1392 1392
1393 1393 Revision information is gathered from the working directory,
1394 1394 match can be used to filter the committed files. If editor is
1395 1395 supplied, it is called to get a commit message.
1396 1396 """
1397 1397 if extra is None:
1398 1398 extra = {}
1399 1399
1400 1400 def fail(f, msg):
1401 1401 raise util.Abort('%s: %s' % (f, msg))
1402 1402
1403 1403 if not match:
1404 1404 match = matchmod.always(self.root, '')
1405 1405
1406 1406 if not force:
1407 1407 vdirs = []
1408 1408 match.explicitdir = vdirs.append
1409 1409 match.bad = fail
1410 1410
1411 1411 wlock = self.wlock()
1412 1412 try:
1413 1413 wctx = self[None]
1414 1414 merge = len(wctx.parents()) > 1
1415 1415
1416 1416 if not force and merge and match.ispartial():
1417 1417 raise util.Abort(_('cannot partially commit a merge '
1418 1418 '(do not specify files or patterns)'))
1419 1419
1420 1420 status = self.status(match=match, clean=force)
1421 1421 if force:
1422 1422 status.modified.extend(status.clean) # mq may commit clean files
1423 1423
1424 1424 # check subrepos
1425 1425 subs = []
1426 1426 commitsubs = set()
1427 1427 newstate = wctx.substate.copy()
1428 1428 # only manage subrepos and .hgsubstate if .hgsub is present
1429 1429 if '.hgsub' in wctx:
1430 1430 # we'll decide whether to track this ourselves, thanks
1431 1431 for c in status.modified, status.added, status.removed:
1432 1432 if '.hgsubstate' in c:
1433 1433 c.remove('.hgsubstate')
1434 1434
1435 1435 # compare current state to last committed state
1436 1436 # build new substate based on last committed state
1437 1437 oldstate = wctx.p1().substate
1438 1438 for s in sorted(newstate.keys()):
1439 1439 if not match(s):
1440 1440 # ignore working copy, use old state if present
1441 1441 if s in oldstate:
1442 1442 newstate[s] = oldstate[s]
1443 1443 continue
1444 1444 if not force:
1445 1445 raise util.Abort(
1446 1446 _("commit with new subrepo %s excluded") % s)
1447 1447 dirtyreason = wctx.sub(s).dirtyreason(True)
1448 1448 if dirtyreason:
1449 1449 if not self.ui.configbool('ui', 'commitsubrepos'):
1450 1450 raise util.Abort(dirtyreason,
1451 1451 hint=_("use --subrepos for recursive commit"))
1452 1452 subs.append(s)
1453 1453 commitsubs.add(s)
1454 1454 else:
1455 1455 bs = wctx.sub(s).basestate()
1456 1456 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1457 1457 if oldstate.get(s, (None, None, None))[1] != bs:
1458 1458 subs.append(s)
1459 1459
1460 1460 # check for removed subrepos
1461 1461 for p in wctx.parents():
1462 1462 r = [s for s in p.substate if s not in newstate]
1463 1463 subs += [s for s in r if match(s)]
1464 1464 if subs:
1465 1465 if (not match('.hgsub') and
1466 1466 '.hgsub' in (wctx.modified() + wctx.added())):
1467 1467 raise util.Abort(
1468 1468 _("can't commit subrepos without .hgsub"))
1469 1469 status.modified.insert(0, '.hgsubstate')
1470 1470
1471 1471 elif '.hgsub' in status.removed:
1472 1472 # clean up .hgsubstate when .hgsub is removed
1473 1473 if ('.hgsubstate' in wctx and
1474 1474 '.hgsubstate' not in (status.modified + status.added +
1475 1475 status.removed)):
1476 1476 status.removed.insert(0, '.hgsubstate')
1477 1477
1478 1478 # make sure all explicit patterns are matched
1479 1479 if not force and (match.isexact() or match.prefix()):
1480 1480 matched = set(status.modified + status.added + status.removed)
1481 1481
1482 1482 for f in match.files():
1483 1483 f = self.dirstate.normalize(f)
1484 1484 if f == '.' or f in matched or f in wctx.substate:
1485 1485 continue
1486 1486 if f in status.deleted:
1487 1487 fail(f, _('file not found!'))
1488 1488 if f in vdirs: # visited directory
1489 1489 d = f + '/'
1490 1490 for mf in matched:
1491 1491 if mf.startswith(d):
1492 1492 break
1493 1493 else:
1494 1494 fail(f, _("no match under directory!"))
1495 1495 elif f not in self.dirstate:
1496 1496 fail(f, _("file not tracked!"))
1497 1497
1498 1498 cctx = context.workingcommitctx(self, status,
1499 1499 text, user, date, extra)
1500 1500
1501 1501 # internal config: ui.allowemptycommit
1502 1502 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1503 1503 or extra.get('close') or merge or cctx.files()
1504 1504 or self.ui.configbool('ui', 'allowemptycommit'))
1505 1505 if not allowemptycommit:
1506 1506 return None
1507 1507
1508 1508 if merge and cctx.deleted():
1509 1509 raise util.Abort(_("cannot commit merge with missing files"))
1510 1510
1511 1511 ms = mergemod.mergestate(self)
1512 1512 for f in status.modified:
1513 1513 if f in ms and ms[f] == 'u':
1514 1514 raise util.Abort(_('unresolved merge conflicts '
1515 1515 '(see "hg help resolve")'))
1516 1516
1517 1517 if editor:
1518 1518 cctx._text = editor(self, cctx, subs)
1519 1519 edited = (text != cctx._text)
1520 1520
1521 1521 # Save commit message in case this transaction gets rolled back
1522 1522 # (e.g. by a pretxncommit hook). Leave the content alone on
1523 1523 # the assumption that the user will use the same editor again.
1524 1524 msgfn = self.savecommitmessage(cctx._text)
1525 1525
1526 1526 # commit subs and write new state
1527 1527 if subs:
1528 1528 for s in sorted(commitsubs):
1529 1529 sub = wctx.sub(s)
1530 1530 self.ui.status(_('committing subrepository %s\n') %
1531 1531 subrepo.subrelpath(sub))
1532 1532 sr = sub.commit(cctx._text, user, date)
1533 1533 newstate[s] = (newstate[s][0], sr)
1534 1534 subrepo.writestate(self, newstate)
1535 1535
1536 1536 p1, p2 = self.dirstate.parents()
1537 1537 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1538 1538 try:
1539 1539 self.hook("precommit", throw=True, parent1=hookp1,
1540 1540 parent2=hookp2)
1541 1541 ret = self.commitctx(cctx, True)
1542 1542 except: # re-raises
1543 1543 if edited:
1544 1544 self.ui.write(
1545 1545 _('note: commit message saved in %s\n') % msgfn)
1546 1546 raise
1547 1547
1548 1548 # update bookmarks, dirstate and mergestate
1549 1549 bookmarks.update(self, [p1, p2], ret)
1550 1550 cctx.markcommitted(ret)
1551 1551 ms.reset()
1552 1552 finally:
1553 1553 wlock.release()
1554 1554
1555 1555 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1556 1556 # hack for command that use a temporary commit (eg: histedit)
1557 1557 # temporary commit got stripped before hook release
1558 1558 if self.changelog.hasnode(ret):
1559 1559 self.hook("commit", node=node, parent1=parent1,
1560 1560 parent2=parent2)
1561 1561 self._afterlock(commithook)
1562 1562 return ret
1563 1563
1564 1564 @unfilteredmethod
1565 1565 def commitctx(self, ctx, error=False):
1566 1566 """Add a new revision to current repository.
1567 1567 Revision information is passed via the context argument.
1568 1568 """
1569 1569
1570 1570 tr = None
1571 1571 p1, p2 = ctx.p1(), ctx.p2()
1572 1572 user = ctx.user()
1573 1573
1574 1574 lock = self.lock()
1575 1575 try:
1576 1576 tr = self.transaction("commit")
1577 1577 trp = weakref.proxy(tr)
1578 1578
1579 1579 if ctx.files():
1580 1580 m1 = p1.manifest()
1581 1581 m2 = p2.manifest()
1582 1582 m = m1.copy()
1583 1583
1584 1584 # check in files
1585 1585 added = []
1586 1586 changed = []
1587 1587 removed = list(ctx.removed())
1588 1588 linkrev = len(self)
1589 1589 self.ui.note(_("committing files:\n"))
1590 1590 for f in sorted(ctx.modified() + ctx.added()):
1591 1591 self.ui.note(f + "\n")
1592 1592 try:
1593 1593 fctx = ctx[f]
1594 1594 if fctx is None:
1595 1595 removed.append(f)
1596 1596 else:
1597 1597 added.append(f)
1598 1598 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1599 1599 trp, changed)
1600 1600 m.setflag(f, fctx.flags())
1601 1601 except OSError as inst:
1602 1602 self.ui.warn(_("trouble committing %s!\n") % f)
1603 1603 raise
1604 1604 except IOError as inst:
1605 1605 errcode = getattr(inst, 'errno', errno.ENOENT)
1606 1606 if error or errcode and errcode != errno.ENOENT:
1607 1607 self.ui.warn(_("trouble committing %s!\n") % f)
1608 1608 raise
1609 1609
1610 1610 # update manifest
1611 1611 self.ui.note(_("committing manifest\n"))
1612 1612 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1613 1613 drop = [f for f in removed if f in m]
1614 1614 for f in drop:
1615 1615 del m[f]
1616 1616 mn = self.manifest.add(m, trp, linkrev,
1617 1617 p1.manifestnode(), p2.manifestnode(),
1618 1618 added, drop)
1619 1619 files = changed + removed
1620 1620 else:
1621 1621 mn = p1.manifestnode()
1622 1622 files = []
1623 1623
1624 1624 # update changelog
1625 1625 self.ui.note(_("committing changelog\n"))
1626 1626 self.changelog.delayupdate(tr)
1627 1627 n = self.changelog.add(mn, files, ctx.description(),
1628 1628 trp, p1.node(), p2.node(),
1629 1629 user, ctx.date(), ctx.extra().copy())
1630 1630 p = lambda: tr.writepending() and self.root or ""
1631 1631 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1632 1632 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1633 1633 parent2=xp2, pending=p)
1634 1634 # set the new commit is proper phase
1635 1635 targetphase = subrepo.newcommitphase(self.ui, ctx)
1636 1636 if targetphase:
1637 1637 # retract boundary do not alter parent changeset.
1638 1638 # if a parent have higher the resulting phase will
1639 1639 # be compliant anyway
1640 1640 #
1641 1641 # if minimal phase was 0 we don't need to retract anything
1642 1642 phases.retractboundary(self, tr, targetphase, [n])
1643 1643 tr.close()
1644 1644 branchmap.updatecache(self.filtered('served'))
1645 1645 return n
1646 1646 finally:
1647 1647 if tr:
1648 1648 tr.release()
1649 1649 lock.release()
1650 1650
1651 1651 @unfilteredmethod
1652 1652 def destroying(self):
1653 1653 '''Inform the repository that nodes are about to be destroyed.
1654 1654 Intended for use by strip and rollback, so there's a common
1655 1655 place for anything that has to be done before destroying history.
1656 1656
1657 1657 This is mostly useful for saving state that is in memory and waiting
1658 1658 to be flushed when the current lock is released. Because a call to
1659 1659 destroyed is imminent, the repo will be invalidated causing those
1660 1660 changes to stay in memory (waiting for the next unlock), or vanish
1661 1661 completely.
1662 1662 '''
1663 1663 # When using the same lock to commit and strip, the phasecache is left
1664 1664 # dirty after committing. Then when we strip, the repo is invalidated,
1665 1665 # causing those changes to disappear.
1666 1666 if '_phasecache' in vars(self):
1667 1667 self._phasecache.write()
1668 1668
1669 1669 @unfilteredmethod
1670 1670 def destroyed(self):
1671 1671 '''Inform the repository that nodes have been destroyed.
1672 1672 Intended for use by strip and rollback, so there's a common
1673 1673 place for anything that has to be done after destroying history.
1674 1674 '''
1675 1675 # When one tries to:
1676 1676 # 1) destroy nodes thus calling this method (e.g. strip)
1677 1677 # 2) use phasecache somewhere (e.g. commit)
1678 1678 #
1679 1679 # then 2) will fail because the phasecache contains nodes that were
1680 1680 # removed. We can either remove phasecache from the filecache,
1681 1681 # causing it to reload next time it is accessed, or simply filter
1682 1682 # the removed nodes now and write the updated cache.
1683 1683 self._phasecache.filterunknown(self)
1684 1684 self._phasecache.write()
1685 1685
1686 1686 # update the 'served' branch cache to help read only server process
1687 1687 # Thanks to branchcache collaboration this is done from the nearest
1688 1688 # filtered subset and it is expected to be fast.
1689 1689 branchmap.updatecache(self.filtered('served'))
1690 1690
1691 1691 # Ensure the persistent tag cache is updated. Doing it now
1692 1692 # means that the tag cache only has to worry about destroyed
1693 1693 # heads immediately after a strip/rollback. That in turn
1694 1694 # guarantees that "cachetip == currenttip" (comparing both rev
1695 1695 # and node) always means no nodes have been added or destroyed.
1696 1696
1697 1697 # XXX this is suboptimal when qrefresh'ing: we strip the current
1698 1698 # head, refresh the tag cache, then immediately add a new head.
1699 1699 # But I think doing it this way is necessary for the "instant
1700 1700 # tag cache retrieval" case to work.
1701 1701 self.invalidate()
1702 1702
1703 1703 def walk(self, match, node=None):
1704 1704 '''
1705 1705 walk recursively through the directory tree or a given
1706 1706 changeset, finding all files matched by the match
1707 1707 function
1708 1708 '''
1709 1709 return self[node].walk(match)
1710 1710
1711 1711 def status(self, node1='.', node2=None, match=None,
1712 1712 ignored=False, clean=False, unknown=False,
1713 1713 listsubrepos=False):
1714 1714 '''a convenience method that calls node1.status(node2)'''
1715 1715 return self[node1].status(node2, match, ignored, clean, unknown,
1716 1716 listsubrepos)
1717 1717
1718 1718 def heads(self, start=None):
1719 1719 heads = self.changelog.heads(start)
1720 1720 # sort the output in rev descending order
1721 1721 return sorted(heads, key=self.changelog.rev, reverse=True)
1722 1722
1723 1723 def branchheads(self, branch=None, start=None, closed=False):
1724 1724 '''return a (possibly filtered) list of heads for the given branch
1725 1725
1726 1726 Heads are returned in topological order, from newest to oldest.
1727 1727 If branch is None, use the dirstate branch.
1728 1728 If start is not None, return only heads reachable from start.
1729 1729 If closed is True, return heads that are marked as closed as well.
1730 1730 '''
1731 1731 if branch is None:
1732 1732 branch = self[None].branch()
1733 1733 branches = self.branchmap()
1734 1734 if branch not in branches:
1735 1735 return []
1736 1736 # the cache returns heads ordered lowest to highest
1737 1737 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1738 1738 if start is not None:
1739 1739 # filter out the heads that cannot be reached from startrev
1740 1740 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1741 1741 bheads = [h for h in bheads if h in fbheads]
1742 1742 return bheads
1743 1743
1744 1744 def branches(self, nodes):
1745 1745 if not nodes:
1746 1746 nodes = [self.changelog.tip()]
1747 1747 b = []
1748 1748 for n in nodes:
1749 1749 t = n
1750 1750 while True:
1751 1751 p = self.changelog.parents(n)
1752 1752 if p[1] != nullid or p[0] == nullid:
1753 1753 b.append((t, n, p[0], p[1]))
1754 1754 break
1755 1755 n = p[0]
1756 1756 return b
1757 1757
1758 1758 def between(self, pairs):
1759 1759 r = []
1760 1760
1761 1761 for top, bottom in pairs:
1762 1762 n, l, i = top, [], 0
1763 1763 f = 1
1764 1764
1765 1765 while n != bottom and n != nullid:
1766 1766 p = self.changelog.parents(n)[0]
1767 1767 if i == f:
1768 1768 l.append(n)
1769 1769 f = f * 2
1770 1770 n = p
1771 1771 i += 1
1772 1772
1773 1773 r.append(l)
1774 1774
1775 1775 return r
1776 1776
1777 1777 def checkpush(self, pushop):
1778 1778 """Extensions can override this function if additional checks have
1779 1779 to be performed before pushing, or call it if they override push
1780 1780 command.
1781 1781 """
1782 1782 pass
1783 1783
1784 1784 @unfilteredpropertycache
1785 1785 def prepushoutgoinghooks(self):
1786 1786 """Return util.hooks consists of "(repo, remote, outgoing)"
1787 1787 functions, which are called before pushing changesets.
1788 1788 """
1789 1789 return util.hooks()
1790 1790
1791 1791 def clone(self, remote, heads=[], stream=None):
1792 1792 '''clone remote repository.
1793 1793
1794 1794 keyword arguments:
1795 1795 heads: list of revs to clone (forces use of pull)
1796 1796 stream: use streaming clone if possible'''
1797
1798 # now, all clients that can request uncompressed clones can
1799 # read repo formats supported by all servers that can serve
1800 # them.
1801
1802 # if revlog format changes, client will have to check version
1803 # and format flags on "stream" capability, and use
1804 # uncompressed only if compatible.
1805
1806 if stream is None:
1807 # if the server explicitly prefers to stream (for fast LANs)
1808 stream = remote.capable('stream-preferred')
1809
1810 if stream and not heads:
1811 # 'stream' means remote revlog format is revlogv1 only
1812 if remote.capable('stream'):
1813 streamclone.streamin(self, remote, set(('revlogv1',)))
1814 else:
1815 # otherwise, 'streamreqs' contains the remote revlog format
1816 streamreqs = remote.capable('streamreqs')
1817 if streamreqs:
1818 streamreqs = set(streamreqs.split(','))
1819 # if we support it, stream in and adjust our requirements
1820 if not streamreqs - self.supportedformats:
1821 streamclone.streamin(self, remote, streamreqs)
1797 streamclone.maybeperformstreamclone(self, remote, heads, stream)
1822 1798
1823 1799 # internal config: ui.quietbookmarkmove
1824 1800 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1825 1801 try:
1826 1802 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1827 1803 ret = exchange.pull(self, remote, heads).cgresult
1828 1804 finally:
1829 1805 self.ui.restoreconfig(quiet)
1830 1806 return ret
1831 1807
1832 1808 def pushkey(self, namespace, key, old, new):
1833 1809 try:
1834 1810 tr = self.currenttransaction()
1835 1811 hookargs = {}
1836 1812 if tr is not None:
1837 1813 hookargs.update(tr.hookargs)
1838 1814 pending = lambda: tr.writepending() and self.root or ""
1839 1815 hookargs['pending'] = pending
1840 1816 hookargs['namespace'] = namespace
1841 1817 hookargs['key'] = key
1842 1818 hookargs['old'] = old
1843 1819 hookargs['new'] = new
1844 1820 self.hook('prepushkey', throw=True, **hookargs)
1845 1821 except error.HookAbort as exc:
1846 1822 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1847 1823 if exc.hint:
1848 1824 self.ui.write_err(_("(%s)\n") % exc.hint)
1849 1825 return False
1850 1826 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1851 1827 ret = pushkey.push(self, namespace, key, old, new)
1852 1828 def runhook():
1853 1829 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1854 1830 ret=ret)
1855 1831 self._afterlock(runhook)
1856 1832 return ret
1857 1833
1858 1834 def listkeys(self, namespace):
1859 1835 self.hook('prelistkeys', throw=True, namespace=namespace)
1860 1836 self.ui.debug('listing keys for "%s"\n' % namespace)
1861 1837 values = pushkey.list(self, namespace)
1862 1838 self.hook('listkeys', namespace=namespace, values=values)
1863 1839 return values
1864 1840
1865 1841 def debugwireargs(self, one, two, three=None, four=None, five=None):
1866 1842 '''used to test argument passing over the wire'''
1867 1843 return "%s %s %s %s %s" % (one, two, three, four, five)
1868 1844
1869 1845 def savecommitmessage(self, text):
1870 1846 fp = self.vfs('last-message.txt', 'wb')
1871 1847 try:
1872 1848 fp.write(text)
1873 1849 finally:
1874 1850 fp.close()
1875 1851 return self.pathto(fp.name[len(self.root) + 1:])
1876 1852
1877 1853 # used to avoid circular references so destructors work
1878 1854 def aftertrans(files):
1879 1855 renamefiles = [tuple(t) for t in files]
1880 1856 def a():
1881 1857 for vfs, src, dest in renamefiles:
1882 1858 try:
1883 1859 vfs.rename(src, dest)
1884 1860 except OSError: # journal file does not yet exist
1885 1861 pass
1886 1862 return a
1887 1863
1888 1864 def undoname(fn):
1889 1865 base, name = os.path.split(fn)
1890 1866 assert name.startswith('journal')
1891 1867 return os.path.join(base, name.replace('journal', 'undo', 1))
1892 1868
1893 1869 def instance(ui, path, create):
1894 1870 return localrepository(ui, util.urllocalpath(path), create)
1895 1871
1896 1872 def islocal(path):
1897 1873 return True
@@ -1,225 +1,251 b''
1 1 # streamclone.py - producing and consuming streaming repository data
2 2 #
3 3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import time
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 branchmap,
15 15 error,
16 16 store,
17 17 util,
18 18 )
19 19
20 def maybeperformstreamclone(repo, remote, heads, stream):
21 # now, all clients that can request uncompressed clones can
22 # read repo formats supported by all servers that can serve
23 # them.
24
25 # if revlog format changes, client will have to check version
26 # and format flags on "stream" capability, and use
27 # uncompressed only if compatible.
28
29 if stream is None:
30 # if the server explicitly prefers to stream (for fast LANs)
31 stream = remote.capable('stream-preferred')
32
33 if stream and not heads:
34 # 'stream' means remote revlog format is revlogv1 only
35 if remote.capable('stream'):
36 streamin(repo, remote, set(('revlogv1',)))
37 else:
38 # otherwise, 'streamreqs' contains the remote revlog format
39 streamreqs = remote.capable('streamreqs')
40 if streamreqs:
41 streamreqs = set(streamreqs.split(','))
42 # if we support it, stream in and adjust our requirements
43 if not streamreqs - repo.supportedformats:
44 streamin(repo, remote, streamreqs)
45
20 46 def allowservergeneration(ui):
21 47 """Whether streaming clones are allowed from the server."""
22 48 return ui.configbool('server', 'uncompressed', True, untrusted=True)
23 49
24 50 # This is it's own function so extensions can override it.
25 51 def _walkstreamfiles(repo):
26 52 return repo.store.walk()
27 53
28 54 def generatev1(repo):
29 55 """Emit content for version 1 of a streaming clone.
30 56
31 57 This is a generator of raw chunks that constitute a streaming clone.
32 58
33 59 The stream begins with a line of 2 space-delimited integers containing the
34 60 number of entries and total bytes size.
35 61
36 62 Next, are N entries for each file being transferred. Each file entry starts
37 63 as a line with the file name and integer size delimited by a null byte.
38 64 The raw file data follows. Following the raw file data is the next file
39 65 entry, or EOF.
40 66
41 67 When used on the wire protocol, an additional line indicating protocol
42 68 success will be prepended to the stream. This function is not responsible
43 69 for adding it.
44 70
45 71 This function will obtain a repository lock to ensure a consistent view of
46 72 the store is captured. It therefore may raise LockError.
47 73 """
48 74 entries = []
49 75 total_bytes = 0
50 76 # Get consistent snapshot of repo, lock during scan.
51 77 lock = repo.lock()
52 78 try:
53 79 repo.ui.debug('scanning\n')
54 80 for name, ename, size in _walkstreamfiles(repo):
55 81 if size:
56 82 entries.append((name, size))
57 83 total_bytes += size
58 84 finally:
59 85 lock.release()
60 86
61 87 repo.ui.debug('%d files, %d bytes to transfer\n' %
62 88 (len(entries), total_bytes))
63 89 yield '%d %d\n' % (len(entries), total_bytes)
64 90
65 91 svfs = repo.svfs
66 92 oldaudit = svfs.mustaudit
67 93 debugflag = repo.ui.debugflag
68 94 svfs.mustaudit = False
69 95
70 96 try:
71 97 for name, size in entries:
72 98 if debugflag:
73 99 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
74 100 # partially encode name over the wire for backwards compat
75 101 yield '%s\0%d\n' % (store.encodedir(name), size)
76 102 if size <= 65536:
77 103 fp = svfs(name)
78 104 try:
79 105 data = fp.read(size)
80 106 finally:
81 107 fp.close()
82 108 yield data
83 109 else:
84 110 for chunk in util.filechunkiter(svfs(name), limit=size):
85 111 yield chunk
86 112 finally:
87 113 svfs.mustaudit = oldaudit
88 114
89 115 def consumev1(repo, fp):
90 116 """Apply the contents from version 1 of a streaming clone file handle.
91 117
92 118 This takes the output from "streamout" and applies it to the specified
93 119 repository.
94 120
95 121 Like "streamout," the status line added by the wire protocol is not handled
96 122 by this function.
97 123 """
98 124 lock = repo.lock()
99 125 try:
100 126 repo.ui.status(_('streaming all changes\n'))
101 127 l = fp.readline()
102 128 try:
103 129 total_files, total_bytes = map(int, l.split(' ', 1))
104 130 except (ValueError, TypeError):
105 131 raise error.ResponseError(
106 132 _('unexpected response from remote server:'), l)
107 133 repo.ui.status(_('%d files to transfer, %s of data\n') %
108 134 (total_files, util.bytecount(total_bytes)))
109 135 handled_bytes = 0
110 136 repo.ui.progress(_('clone'), 0, total=total_bytes)
111 137 start = time.time()
112 138
113 139 tr = repo.transaction(_('clone'))
114 140 try:
115 141 for i in xrange(total_files):
116 142 # XXX doesn't support '\n' or '\r' in filenames
117 143 l = fp.readline()
118 144 try:
119 145 name, size = l.split('\0', 1)
120 146 size = int(size)
121 147 except (ValueError, TypeError):
122 148 raise error.ResponseError(
123 149 _('unexpected response from remote server:'), l)
124 150 if repo.ui.debugflag:
125 151 repo.ui.debug('adding %s (%s)\n' %
126 152 (name, util.bytecount(size)))
127 153 # for backwards compat, name was partially encoded
128 154 ofp = repo.svfs(store.decodedir(name), 'w')
129 155 for chunk in util.filechunkiter(fp, limit=size):
130 156 handled_bytes += len(chunk)
131 157 repo.ui.progress(_('clone'), handled_bytes,
132 158 total=total_bytes)
133 159 ofp.write(chunk)
134 160 ofp.close()
135 161 tr.close()
136 162 finally:
137 163 tr.release()
138 164
139 165 # Writing straight to files circumvented the inmemory caches
140 166 repo.invalidate()
141 167
142 168 elapsed = time.time() - start
143 169 if elapsed <= 0:
144 170 elapsed = 0.001
145 171 repo.ui.progress(_('clone'), None)
146 172 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
147 173 (util.bytecount(total_bytes), elapsed,
148 174 util.bytecount(total_bytes / elapsed)))
149 175 finally:
150 176 lock.release()
151 177
152 178 def streamin(repo, remote, remotereqs):
153 179 # Save remote branchmap. We will use it later
154 180 # to speed up branchcache creation
155 181 rbranchmap = None
156 182 if remote.capable("branchmap"):
157 183 rbranchmap = remote.branchmap()
158 184
159 185 fp = remote.stream_out()
160 186 l = fp.readline()
161 187 try:
162 188 resp = int(l)
163 189 except ValueError:
164 190 raise error.ResponseError(
165 191 _('unexpected response from remote server:'), l)
166 192 if resp == 1:
167 193 raise util.Abort(_('operation forbidden by server'))
168 194 elif resp == 2:
169 195 raise util.Abort(_('locking the remote repository failed'))
170 196 elif resp != 0:
171 197 raise util.Abort(_('the server sent an unknown error code'))
172 198
173 199 applyremotedata(repo, remotereqs, rbranchmap, fp)
174 200 return len(repo.heads()) + 1
175 201
176 202 def applyremotedata(repo, remotereqs, remotebranchmap, fp):
177 203 """Apply stream clone data to a repository.
178 204
179 205 "remotereqs" is a set of requirements to handle the incoming data.
180 206 "remotebranchmap" is the result of a branchmap lookup on the remote. It
181 207 can be None.
182 208 "fp" is a file object containing the raw stream data, suitable for
183 209 feeding into consumev1().
184 210 """
185 211 lock = repo.lock()
186 212 try:
187 213 consumev1(repo, fp)
188 214
189 215 # new requirements = old non-format requirements +
190 216 # new format-related remote requirements
191 217 # requirements from the streamed-in repository
192 218 repo.requirements = remotereqs | (
193 219 repo.requirements - repo.supportedformats)
194 220 repo._applyopenerreqs()
195 221 repo._writerequirements()
196 222
197 223 if remotebranchmap:
198 224 rbheads = []
199 225 closed = []
200 226 for bheads in remotebranchmap.itervalues():
201 227 rbheads.extend(bheads)
202 228 for h in bheads:
203 229 r = repo.changelog.rev(h)
204 230 b, c = repo.changelog.branchinfo(r)
205 231 if c:
206 232 closed.append(h)
207 233
208 234 if rbheads:
209 235 rtiprev = max((int(repo.changelog.rev(node))
210 236 for node in rbheads))
211 237 cache = branchmap.branchcache(remotebranchmap,
212 238 repo[rtiprev].node(),
213 239 rtiprev,
214 240 closednodes=closed)
215 241 # Try to stick it as low as possible
216 242 # filter above served are unlikely to be fetch from a clone
217 243 for candidate in ('base', 'immutable', 'served'):
218 244 rview = repo.filtered(candidate)
219 245 if cache.validfor(rview):
220 246 repo._branchcaches[candidate] = cache
221 247 cache.write(rview)
222 248 break
223 249 repo.invalidate()
224 250 finally:
225 251 lock.release()
General Comments 0
You need to be logged in to leave comments. Login now