##// END OF EJS Templates
streamclone: move applystreamclone() from localrepo.py...
Gregory Szorc -
r26441:56527b88 default
parent child Browse files
Show More
@@ -0,0 +1,64 b''
1 # streamclone.py - producing and consuming streaming repository data
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 from __future__ import absolute_import
9
10 from . import (
11 branchmap,
12 exchange,
13 )
14
15 def applyremotedata(repo, remotereqs, remotebranchmap, fp):
16 """Apply stream clone data to a repository.
17
18 "remotereqs" is a set of requirements to handle the incoming data.
19 "remotebranchmap" is the result of a branchmap lookup on the remote. It
20 can be None.
21 "fp" is a file object containing the raw stream data, suitable for
22 feeding into exchange.consumestreamclone.
23 """
24 lock = repo.lock()
25 try:
26 exchange.consumestreamclone(repo, fp)
27
28 # new requirements = old non-format requirements +
29 # new format-related remote requirements
30 # requirements from the streamed-in repository
31 repo.requirements = remotereqs | (
32 repo.requirements - repo.supportedformats)
33 repo._applyopenerreqs()
34 repo._writerequirements()
35
36 if remotebranchmap:
37 rbheads = []
38 closed = []
39 for bheads in remotebranchmap.itervalues():
40 rbheads.extend(bheads)
41 for h in bheads:
42 r = repo.changelog.rev(h)
43 b, c = repo.changelog.branchinfo(r)
44 if c:
45 closed.append(h)
46
47 if rbheads:
48 rtiprev = max((int(repo.changelog.rev(node))
49 for node in rbheads))
50 cache = branchmap.branchcache(remotebranchmap,
51 repo[rtiprev].node(),
52 rtiprev,
53 closednodes=closed)
54 # Try to stick it as low as possible
55 # filter above served are unlikely to be fetch from a clone
56 for candidate in ('base', 'immutable', 'served'):
57 rview = repo.filtered(candidate)
58 if cache.validfor(rview):
59 repo._branchcaches[candidate] = cache
60 cache.write(rview)
61 break
62 repo.invalidate()
63 finally:
64 lock.release()
@@ -1,1971 +1,1921 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, wdirrev, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect, random
20 20 import branchmap, pathutil
21 21 import namespaces
22 import streamclone
22 23 propertycache = util.propertycache
23 24 filecache = scmutil.filecache
24 25
25 26 class repofilecache(filecache):
26 27 """All filecache usage on repo are done for logic that should be unfiltered
27 28 """
28 29
29 30 def __get__(self, repo, type=None):
30 31 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 32 def __set__(self, repo, value):
32 33 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 34 def __delete__(self, repo):
34 35 return super(repofilecache, self).__delete__(repo.unfiltered())
35 36
36 37 class storecache(repofilecache):
37 38 """filecache for files in the store"""
38 39 def join(self, obj, fname):
39 40 return obj.sjoin(fname)
40 41
41 42 class unfilteredpropertycache(propertycache):
42 43 """propertycache that apply to unfiltered repo only"""
43 44
44 45 def __get__(self, repo, type=None):
45 46 unfi = repo.unfiltered()
46 47 if unfi is repo:
47 48 return super(unfilteredpropertycache, self).__get__(unfi)
48 49 return getattr(unfi, self.name)
49 50
50 51 class filteredpropertycache(propertycache):
51 52 """propertycache that must take filtering in account"""
52 53
53 54 def cachevalue(self, obj, value):
54 55 object.__setattr__(obj, self.name, value)
55 56
56 57
57 58 def hasunfilteredcache(repo, name):
58 59 """check if a repo has an unfilteredpropertycache value for <name>"""
59 60 return name in vars(repo.unfiltered())
60 61
61 62 def unfilteredmethod(orig):
62 63 """decorate method that always need to be run on unfiltered version"""
63 64 def wrapper(repo, *args, **kwargs):
64 65 return orig(repo.unfiltered(), *args, **kwargs)
65 66 return wrapper
66 67
67 68 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 69 'unbundle'))
69 70 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 71
71 72 class localpeer(peer.peerrepository):
72 73 '''peer for a local repo; reflects only the most recent API'''
73 74
74 75 def __init__(self, repo, caps=moderncaps):
75 76 peer.peerrepository.__init__(self)
76 77 self._repo = repo.filtered('served')
77 78 self.ui = repo.ui
78 79 self._caps = repo._restrictcapabilities(caps)
79 80 self.requirements = repo.requirements
80 81 self.supportedformats = repo.supportedformats
81 82
82 83 def close(self):
83 84 self._repo.close()
84 85
85 86 def _capabilities(self):
86 87 return self._caps
87 88
88 89 def local(self):
89 90 return self._repo
90 91
91 92 def canpush(self):
92 93 return True
93 94
94 95 def url(self):
95 96 return self._repo.url()
96 97
97 98 def lookup(self, key):
98 99 return self._repo.lookup(key)
99 100
100 101 def branchmap(self):
101 102 return self._repo.branchmap()
102 103
103 104 def heads(self):
104 105 return self._repo.heads()
105 106
106 107 def known(self, nodes):
107 108 return self._repo.known(nodes)
108 109
109 110 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 111 **kwargs):
111 112 cg = exchange.getbundle(self._repo, source, heads=heads,
112 113 common=common, bundlecaps=bundlecaps, **kwargs)
113 114 if bundlecaps is not None and 'HG20' in bundlecaps:
114 115 # When requesting a bundle2, getbundle returns a stream to make the
115 116 # wire level function happier. We need to build a proper object
116 117 # from it in local peer.
117 118 cg = bundle2.getunbundler(self.ui, cg)
118 119 return cg
119 120
120 121 # TODO We might want to move the next two calls into legacypeer and add
121 122 # unbundle instead.
122 123
123 124 def unbundle(self, cg, heads, url):
124 125 """apply a bundle on a repo
125 126
126 127 This function handles the repo locking itself."""
127 128 try:
128 129 try:
129 130 cg = exchange.readbundle(self.ui, cg, None)
130 131 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 132 if util.safehasattr(ret, 'getchunks'):
132 133 # This is a bundle20 object, turn it into an unbundler.
133 134 # This little dance should be dropped eventually when the
134 135 # API is finally improved.
135 136 stream = util.chunkbuffer(ret.getchunks())
136 137 ret = bundle2.getunbundler(self.ui, stream)
137 138 return ret
138 139 except Exception as exc:
139 140 # If the exception contains output salvaged from a bundle2
140 141 # reply, we need to make sure it is printed before continuing
141 142 # to fail. So we build a bundle2 with such output and consume
142 143 # it directly.
143 144 #
144 145 # This is not very elegant but allows a "simple" solution for
145 146 # issue4594
146 147 output = getattr(exc, '_bundle2salvagedoutput', ())
147 148 if output:
148 149 bundler = bundle2.bundle20(self._repo.ui)
149 150 for out in output:
150 151 bundler.addpart(out)
151 152 stream = util.chunkbuffer(bundler.getchunks())
152 153 b = bundle2.getunbundler(self.ui, stream)
153 154 bundle2.processbundle(self._repo, b)
154 155 raise
155 156 except error.PushRaced as exc:
156 157 raise error.ResponseError(_('push failed:'), str(exc))
157 158
158 159 def lock(self):
159 160 return self._repo.lock()
160 161
161 162 def addchangegroup(self, cg, source, url):
162 163 return changegroup.addchangegroup(self._repo, cg, source, url)
163 164
164 165 def pushkey(self, namespace, key, old, new):
165 166 return self._repo.pushkey(namespace, key, old, new)
166 167
167 168 def listkeys(self, namespace):
168 169 return self._repo.listkeys(namespace)
169 170
170 171 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 172 '''used to test argument passing over the wire'''
172 173 return "%s %s %s %s %s" % (one, two, three, four, five)
173 174
174 175 class locallegacypeer(localpeer):
175 176 '''peer extension which implements legacy methods too; used for tests with
176 177 restricted capabilities'''
177 178
178 179 def __init__(self, repo):
179 180 localpeer.__init__(self, repo, caps=legacycaps)
180 181
181 182 def branches(self, nodes):
182 183 return self._repo.branches(nodes)
183 184
184 185 def between(self, pairs):
185 186 return self._repo.between(pairs)
186 187
187 188 def changegroup(self, basenodes, source):
188 189 return changegroup.changegroup(self._repo, basenodes, source)
189 190
190 191 def changegroupsubset(self, bases, heads, source):
191 192 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 193
193 194 class localrepository(object):
194 195
195 196 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 197 'manifestv2'))
197 198 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 199 'dotencode'))
199 200 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 201 filtername = None
201 202
202 203 # a list of (ui, featureset) functions.
203 204 # only functions defined in module of enabled extensions are invoked
204 205 featuresetupfuncs = set()
205 206
206 207 def _baserequirements(self, create):
207 208 return ['revlogv1']
208 209
209 210 def __init__(self, baseui, path=None, create=False):
210 211 self.requirements = set()
211 212 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 213 self.wopener = self.wvfs
213 214 self.root = self.wvfs.base
214 215 self.path = self.wvfs.join(".hg")
215 216 self.origroot = path
216 217 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 218 self.vfs = scmutil.vfs(self.path)
218 219 self.opener = self.vfs
219 220 self.baseui = baseui
220 221 self.ui = baseui.copy()
221 222 self.ui.copy = baseui.copy # prevent copying repo configuration
222 223 # A list of callback to shape the phase if no data were found.
223 224 # Callback are in the form: func(repo, roots) --> processed root.
224 225 # This list it to be filled by extension during repo setup
225 226 self._phasedefaults = []
226 227 try:
227 228 self.ui.readconfig(self.join("hgrc"), self.root)
228 229 extensions.loadall(self.ui)
229 230 except IOError:
230 231 pass
231 232
232 233 if self.featuresetupfuncs:
233 234 self.supported = set(self._basesupported) # use private copy
234 235 extmods = set(m.__name__ for n, m
235 236 in extensions.extensions(self.ui))
236 237 for setupfunc in self.featuresetupfuncs:
237 238 if setupfunc.__module__ in extmods:
238 239 setupfunc(self.ui, self.supported)
239 240 else:
240 241 self.supported = self._basesupported
241 242
242 243 if not self.vfs.isdir():
243 244 if create:
244 245 if not self.wvfs.exists():
245 246 self.wvfs.makedirs()
246 247 self.vfs.makedir(notindexed=True)
247 248 self.requirements.update(self._baserequirements(create))
248 249 if self.ui.configbool('format', 'usestore', True):
249 250 self.vfs.mkdir("store")
250 251 self.requirements.add("store")
251 252 if self.ui.configbool('format', 'usefncache', True):
252 253 self.requirements.add("fncache")
253 254 if self.ui.configbool('format', 'dotencode', True):
254 255 self.requirements.add('dotencode')
255 256 # create an invalid changelog
256 257 self.vfs.append(
257 258 "00changelog.i",
258 259 '\0\0\0\2' # represents revlogv2
259 260 ' dummy changelog to prevent using the old repo layout'
260 261 )
261 262 # experimental config: format.generaldelta
262 263 if self.ui.configbool('format', 'generaldelta', False):
263 264 self.requirements.add("generaldelta")
264 265 if self.ui.configbool('experimental', 'treemanifest', False):
265 266 self.requirements.add("treemanifest")
266 267 if self.ui.configbool('experimental', 'manifestv2', False):
267 268 self.requirements.add("manifestv2")
268 269 else:
269 270 raise error.RepoError(_("repository %s not found") % path)
270 271 elif create:
271 272 raise error.RepoError(_("repository %s already exists") % path)
272 273 else:
273 274 try:
274 275 self.requirements = scmutil.readrequires(
275 276 self.vfs, self.supported)
276 277 except IOError as inst:
277 278 if inst.errno != errno.ENOENT:
278 279 raise
279 280
280 281 self.sharedpath = self.path
281 282 try:
282 283 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
283 284 realpath=True)
284 285 s = vfs.base
285 286 if not vfs.exists():
286 287 raise error.RepoError(
287 288 _('.hg/sharedpath points to nonexistent directory %s') % s)
288 289 self.sharedpath = s
289 290 except IOError as inst:
290 291 if inst.errno != errno.ENOENT:
291 292 raise
292 293
293 294 self.store = store.store(
294 295 self.requirements, self.sharedpath, scmutil.vfs)
295 296 self.spath = self.store.path
296 297 self.svfs = self.store.vfs
297 298 self.sjoin = self.store.join
298 299 self.vfs.createmode = self.store.createmode
299 300 self._applyopenerreqs()
300 301 if create:
301 302 self._writerequirements()
302 303
303 304 self._dirstatevalidatewarned = False
304 305
305 306 self._branchcaches = {}
306 307 self._revbranchcache = None
307 308 self.filterpats = {}
308 309 self._datafilters = {}
309 310 self._transref = self._lockref = self._wlockref = None
310 311
311 312 # A cache for various files under .hg/ that tracks file changes,
312 313 # (used by the filecache decorator)
313 314 #
314 315 # Maps a property name to its util.filecacheentry
315 316 self._filecache = {}
316 317
317 318 # hold sets of revision to be filtered
318 319 # should be cleared when something might have changed the filter value:
319 320 # - new changesets,
320 321 # - phase change,
321 322 # - new obsolescence marker,
322 323 # - working directory parent change,
323 324 # - bookmark changes
324 325 self.filteredrevcache = {}
325 326
326 327 # generic mapping between names and nodes
327 328 self.names = namespaces.namespaces()
328 329
329 330 def close(self):
330 331 self._writecaches()
331 332
332 333 def _writecaches(self):
333 334 if self._revbranchcache:
334 335 self._revbranchcache.write()
335 336
336 337 def _restrictcapabilities(self, caps):
337 338 if self.ui.configbool('experimental', 'bundle2-advertise', True):
338 339 caps = set(caps)
339 340 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
340 341 caps.add('bundle2=' + urllib.quote(capsblob))
341 342 return caps
342 343
343 344 def _applyopenerreqs(self):
344 345 self.svfs.options = dict((r, 1) for r in self.requirements
345 346 if r in self.openerreqs)
346 347 # experimental config: format.chunkcachesize
347 348 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
348 349 if chunkcachesize is not None:
349 350 self.svfs.options['chunkcachesize'] = chunkcachesize
350 351 # experimental config: format.maxchainlen
351 352 maxchainlen = self.ui.configint('format', 'maxchainlen')
352 353 if maxchainlen is not None:
353 354 self.svfs.options['maxchainlen'] = maxchainlen
354 355 # experimental config: format.manifestcachesize
355 356 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
356 357 if manifestcachesize is not None:
357 358 self.svfs.options['manifestcachesize'] = manifestcachesize
358 359 # experimental config: format.aggressivemergedeltas
359 360 aggressivemergedeltas = self.ui.configbool('format',
360 361 'aggressivemergedeltas', False)
361 362 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
362 363
363 364 def _writerequirements(self):
364 365 scmutil.writerequires(self.vfs, self.requirements)
365 366
366 367 def _checknested(self, path):
367 368 """Determine if path is a legal nested repository."""
368 369 if not path.startswith(self.root):
369 370 return False
370 371 subpath = path[len(self.root) + 1:]
371 372 normsubpath = util.pconvert(subpath)
372 373
373 374 # XXX: Checking against the current working copy is wrong in
374 375 # the sense that it can reject things like
375 376 #
376 377 # $ hg cat -r 10 sub/x.txt
377 378 #
378 379 # if sub/ is no longer a subrepository in the working copy
379 380 # parent revision.
380 381 #
381 382 # However, it can of course also allow things that would have
382 383 # been rejected before, such as the above cat command if sub/
383 384 # is a subrepository now, but was a normal directory before.
384 385 # The old path auditor would have rejected by mistake since it
385 386 # panics when it sees sub/.hg/.
386 387 #
387 388 # All in all, checking against the working copy seems sensible
388 389 # since we want to prevent access to nested repositories on
389 390 # the filesystem *now*.
390 391 ctx = self[None]
391 392 parts = util.splitpath(subpath)
392 393 while parts:
393 394 prefix = '/'.join(parts)
394 395 if prefix in ctx.substate:
395 396 if prefix == normsubpath:
396 397 return True
397 398 else:
398 399 sub = ctx.sub(prefix)
399 400 return sub.checknested(subpath[len(prefix) + 1:])
400 401 else:
401 402 parts.pop()
402 403 return False
403 404
404 405 def peer(self):
405 406 return localpeer(self) # not cached to avoid reference cycle
406 407
407 408 def unfiltered(self):
408 409 """Return unfiltered version of the repository
409 410
410 411 Intended to be overwritten by filtered repo."""
411 412 return self
412 413
413 414 def filtered(self, name):
414 415 """Return a filtered version of a repository"""
415 416 # build a new class with the mixin and the current class
416 417 # (possibly subclass of the repo)
417 418 class proxycls(repoview.repoview, self.unfiltered().__class__):
418 419 pass
419 420 return proxycls(self, name)
420 421
421 422 @repofilecache('bookmarks')
422 423 def _bookmarks(self):
423 424 return bookmarks.bmstore(self)
424 425
425 426 @repofilecache('bookmarks.current')
426 427 def _activebookmark(self):
427 428 return bookmarks.readactive(self)
428 429
429 430 def bookmarkheads(self, bookmark):
430 431 name = bookmark.split('@', 1)[0]
431 432 heads = []
432 433 for mark, n in self._bookmarks.iteritems():
433 434 if mark.split('@', 1)[0] == name:
434 435 heads.append(n)
435 436 return heads
436 437
437 438 # _phaserevs and _phasesets depend on changelog. what we need is to
438 439 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
439 440 # can't be easily expressed in filecache mechanism.
440 441 @storecache('phaseroots', '00changelog.i')
441 442 def _phasecache(self):
442 443 return phases.phasecache(self, self._phasedefaults)
443 444
444 445 @storecache('obsstore')
445 446 def obsstore(self):
446 447 # read default format for new obsstore.
447 448 # developer config: format.obsstore-version
448 449 defaultformat = self.ui.configint('format', 'obsstore-version', None)
449 450 # rely on obsstore class default when possible.
450 451 kwargs = {}
451 452 if defaultformat is not None:
452 453 kwargs['defaultformat'] = defaultformat
453 454 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
454 455 store = obsolete.obsstore(self.svfs, readonly=readonly,
455 456 **kwargs)
456 457 if store and readonly:
457 458 self.ui.warn(
458 459 _('obsolete feature not enabled but %i markers found!\n')
459 460 % len(list(store)))
460 461 return store
461 462
462 463 @storecache('00changelog.i')
463 464 def changelog(self):
464 465 c = changelog.changelog(self.svfs)
465 466 if 'HG_PENDING' in os.environ:
466 467 p = os.environ['HG_PENDING']
467 468 if p.startswith(self.root):
468 469 c.readpending('00changelog.i.a')
469 470 return c
470 471
471 472 @storecache('00manifest.i')
472 473 def manifest(self):
473 474 return manifest.manifest(self.svfs)
474 475
475 476 def dirlog(self, dir):
476 477 return self.manifest.dirlog(dir)
477 478
478 479 @repofilecache('dirstate')
479 480 def dirstate(self):
480 481 return dirstate.dirstate(self.vfs, self.ui, self.root,
481 482 self._dirstatevalidate)
482 483
483 484 def _dirstatevalidate(self, node):
484 485 try:
485 486 self.changelog.rev(node)
486 487 return node
487 488 except error.LookupError:
488 489 if not self._dirstatevalidatewarned:
489 490 self._dirstatevalidatewarned = True
490 491 self.ui.warn(_("warning: ignoring unknown"
491 492 " working parent %s!\n") % short(node))
492 493 return nullid
493 494
494 495 def __getitem__(self, changeid):
495 496 if changeid is None or changeid == wdirrev:
496 497 return context.workingctx(self)
497 498 if isinstance(changeid, slice):
498 499 return [context.changectx(self, i)
499 500 for i in xrange(*changeid.indices(len(self)))
500 501 if i not in self.changelog.filteredrevs]
501 502 return context.changectx(self, changeid)
502 503
503 504 def __contains__(self, changeid):
504 505 try:
505 506 self[changeid]
506 507 return True
507 508 except error.RepoLookupError:
508 509 return False
509 510
510 511 def __nonzero__(self):
511 512 return True
512 513
513 514 def __len__(self):
514 515 return len(self.changelog)
515 516
516 517 def __iter__(self):
517 518 return iter(self.changelog)
518 519
519 520 def revs(self, expr, *args):
520 521 '''Return a list of revisions matching the given revset'''
521 522 expr = revset.formatspec(expr, *args)
522 523 m = revset.match(None, expr)
523 524 return m(self)
524 525
525 526 def set(self, expr, *args):
526 527 '''
527 528 Yield a context for each matching revision, after doing arg
528 529 replacement via revset.formatspec
529 530 '''
530 531 for r in self.revs(expr, *args):
531 532 yield self[r]
532 533
533 534 def url(self):
534 535 return 'file:' + self.root
535 536
536 537 def hook(self, name, throw=False, **args):
537 538 """Call a hook, passing this repo instance.
538 539
539 540 This a convenience method to aid invoking hooks. Extensions likely
540 541 won't call this unless they have registered a custom hook or are
541 542 replacing code that is expected to call a hook.
542 543 """
543 544 return hook.hook(self.ui, self, name, throw, **args)
544 545
545 546 @unfilteredmethod
546 547 def _tag(self, names, node, message, local, user, date, extra=None,
547 548 editor=False):
548 549 if isinstance(names, str):
549 550 names = (names,)
550 551
551 552 branches = self.branchmap()
552 553 for name in names:
553 554 self.hook('pretag', throw=True, node=hex(node), tag=name,
554 555 local=local)
555 556 if name in branches:
556 557 self.ui.warn(_("warning: tag %s conflicts with existing"
557 558 " branch name\n") % name)
558 559
559 560 def writetags(fp, names, munge, prevtags):
560 561 fp.seek(0, 2)
561 562 if prevtags and prevtags[-1] != '\n':
562 563 fp.write('\n')
563 564 for name in names:
564 565 if munge:
565 566 m = munge(name)
566 567 else:
567 568 m = name
568 569
569 570 if (self._tagscache.tagtypes and
570 571 name in self._tagscache.tagtypes):
571 572 old = self.tags().get(name, nullid)
572 573 fp.write('%s %s\n' % (hex(old), m))
573 574 fp.write('%s %s\n' % (hex(node), m))
574 575 fp.close()
575 576
576 577 prevtags = ''
577 578 if local:
578 579 try:
579 580 fp = self.vfs('localtags', 'r+')
580 581 except IOError:
581 582 fp = self.vfs('localtags', 'a')
582 583 else:
583 584 prevtags = fp.read()
584 585
585 586 # local tags are stored in the current charset
586 587 writetags(fp, names, None, prevtags)
587 588 for name in names:
588 589 self.hook('tag', node=hex(node), tag=name, local=local)
589 590 return
590 591
591 592 try:
592 593 fp = self.wfile('.hgtags', 'rb+')
593 594 except IOError as e:
594 595 if e.errno != errno.ENOENT:
595 596 raise
596 597 fp = self.wfile('.hgtags', 'ab')
597 598 else:
598 599 prevtags = fp.read()
599 600
600 601 # committed tags are stored in UTF-8
601 602 writetags(fp, names, encoding.fromlocal, prevtags)
602 603
603 604 fp.close()
604 605
605 606 self.invalidatecaches()
606 607
607 608 if '.hgtags' not in self.dirstate:
608 609 self[None].add(['.hgtags'])
609 610
610 611 m = matchmod.exact(self.root, '', ['.hgtags'])
611 612 tagnode = self.commit(message, user, date, extra=extra, match=m,
612 613 editor=editor)
613 614
614 615 for name in names:
615 616 self.hook('tag', node=hex(node), tag=name, local=local)
616 617
617 618 return tagnode
618 619
619 620 def tag(self, names, node, message, local, user, date, editor=False):
620 621 '''tag a revision with one or more symbolic names.
621 622
622 623 names is a list of strings or, when adding a single tag, names may be a
623 624 string.
624 625
625 626 if local is True, the tags are stored in a per-repository file.
626 627 otherwise, they are stored in the .hgtags file, and a new
627 628 changeset is committed with the change.
628 629
629 630 keyword arguments:
630 631
631 632 local: whether to store tags in non-version-controlled file
632 633 (default False)
633 634
634 635 message: commit message to use if committing
635 636
636 637 user: name of user to use if committing
637 638
638 639 date: date tuple to use if committing'''
639 640
640 641 if not local:
641 642 m = matchmod.exact(self.root, '', ['.hgtags'])
642 643 if any(self.status(match=m, unknown=True, ignored=True)):
643 644 raise util.Abort(_('working copy of .hgtags is changed'),
644 645 hint=_('please commit .hgtags manually'))
645 646
646 647 self.tags() # instantiate the cache
647 648 self._tag(names, node, message, local, user, date, editor=editor)
648 649
649 650 @filteredpropertycache
650 651 def _tagscache(self):
651 652 '''Returns a tagscache object that contains various tags related
652 653 caches.'''
653 654
654 655 # This simplifies its cache management by having one decorated
655 656 # function (this one) and the rest simply fetch things from it.
656 657 class tagscache(object):
657 658 def __init__(self):
658 659 # These two define the set of tags for this repository. tags
659 660 # maps tag name to node; tagtypes maps tag name to 'global' or
660 661 # 'local'. (Global tags are defined by .hgtags across all
661 662 # heads, and local tags are defined in .hg/localtags.)
662 663 # They constitute the in-memory cache of tags.
663 664 self.tags = self.tagtypes = None
664 665
665 666 self.nodetagscache = self.tagslist = None
666 667
667 668 cache = tagscache()
668 669 cache.tags, cache.tagtypes = self._findtags()
669 670
670 671 return cache
671 672
672 673 def tags(self):
673 674 '''return a mapping of tag to node'''
674 675 t = {}
675 676 if self.changelog.filteredrevs:
676 677 tags, tt = self._findtags()
677 678 else:
678 679 tags = self._tagscache.tags
679 680 for k, v in tags.iteritems():
680 681 try:
681 682 # ignore tags to unknown nodes
682 683 self.changelog.rev(v)
683 684 t[k] = v
684 685 except (error.LookupError, ValueError):
685 686 pass
686 687 return t
687 688
688 689 def _findtags(self):
689 690 '''Do the hard work of finding tags. Return a pair of dicts
690 691 (tags, tagtypes) where tags maps tag name to node, and tagtypes
691 692 maps tag name to a string like \'global\' or \'local\'.
692 693 Subclasses or extensions are free to add their own tags, but
693 694 should be aware that the returned dicts will be retained for the
694 695 duration of the localrepo object.'''
695 696
696 697 # XXX what tagtype should subclasses/extensions use? Currently
697 698 # mq and bookmarks add tags, but do not set the tagtype at all.
698 699 # Should each extension invent its own tag type? Should there
699 700 # be one tagtype for all such "virtual" tags? Or is the status
700 701 # quo fine?
701 702
702 703 alltags = {} # map tag name to (node, hist)
703 704 tagtypes = {}
704 705
705 706 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
706 707 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
707 708
708 709 # Build the return dicts. Have to re-encode tag names because
709 710 # the tags module always uses UTF-8 (in order not to lose info
710 711 # writing to the cache), but the rest of Mercurial wants them in
711 712 # local encoding.
712 713 tags = {}
713 714 for (name, (node, hist)) in alltags.iteritems():
714 715 if node != nullid:
715 716 tags[encoding.tolocal(name)] = node
716 717 tags['tip'] = self.changelog.tip()
717 718 tagtypes = dict([(encoding.tolocal(name), value)
718 719 for (name, value) in tagtypes.iteritems()])
719 720 return (tags, tagtypes)
720 721
721 722 def tagtype(self, tagname):
722 723 '''
723 724 return the type of the given tag. result can be:
724 725
725 726 'local' : a local tag
726 727 'global' : a global tag
727 728 None : tag does not exist
728 729 '''
729 730
730 731 return self._tagscache.tagtypes.get(tagname)
731 732
732 733 def tagslist(self):
733 734 '''return a list of tags ordered by revision'''
734 735 if not self._tagscache.tagslist:
735 736 l = []
736 737 for t, n in self.tags().iteritems():
737 738 l.append((self.changelog.rev(n), t, n))
738 739 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
739 740
740 741 return self._tagscache.tagslist
741 742
742 743 def nodetags(self, node):
743 744 '''return the tags associated with a node'''
744 745 if not self._tagscache.nodetagscache:
745 746 nodetagscache = {}
746 747 for t, n in self._tagscache.tags.iteritems():
747 748 nodetagscache.setdefault(n, []).append(t)
748 749 for tags in nodetagscache.itervalues():
749 750 tags.sort()
750 751 self._tagscache.nodetagscache = nodetagscache
751 752 return self._tagscache.nodetagscache.get(node, [])
752 753
753 754 def nodebookmarks(self, node):
754 755 marks = []
755 756 for bookmark, n in self._bookmarks.iteritems():
756 757 if n == node:
757 758 marks.append(bookmark)
758 759 return sorted(marks)
759 760
760 761 def branchmap(self):
761 762 '''returns a dictionary {branch: [branchheads]} with branchheads
762 763 ordered by increasing revision number'''
763 764 branchmap.updatecache(self)
764 765 return self._branchcaches[self.filtername]
765 766
766 767 @unfilteredmethod
767 768 def revbranchcache(self):
768 769 if not self._revbranchcache:
769 770 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
770 771 return self._revbranchcache
771 772
772 773 def branchtip(self, branch, ignoremissing=False):
773 774 '''return the tip node for a given branch
774 775
775 776 If ignoremissing is True, then this method will not raise an error.
776 777 This is helpful for callers that only expect None for a missing branch
777 778 (e.g. namespace).
778 779
779 780 '''
780 781 try:
781 782 return self.branchmap().branchtip(branch)
782 783 except KeyError:
783 784 if not ignoremissing:
784 785 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
785 786 else:
786 787 pass
787 788
788 789 def lookup(self, key):
789 790 return self[key].node()
790 791
791 792 def lookupbranch(self, key, remote=None):
792 793 repo = remote or self
793 794 if key in repo.branchmap():
794 795 return key
795 796
796 797 repo = (remote and remote.local()) and remote or self
797 798 return repo[key].branch()
798 799
799 800 def known(self, nodes):
800 801 nm = self.changelog.nodemap
801 802 pc = self._phasecache
802 803 result = []
803 804 for n in nodes:
804 805 r = nm.get(n)
805 806 resp = not (r is None or pc.phase(self, r) >= phases.secret)
806 807 result.append(resp)
807 808 return result
808 809
809 810 def local(self):
810 811 return self
811 812
812 813 def publishing(self):
813 814 # it's safe (and desirable) to trust the publish flag unconditionally
814 815 # so that we don't finalize changes shared between users via ssh or nfs
815 816 return self.ui.configbool('phases', 'publish', True, untrusted=True)
816 817
817 818 def cancopy(self):
818 819 # so statichttprepo's override of local() works
819 820 if not self.local():
820 821 return False
821 822 if not self.publishing():
822 823 return True
823 824 # if publishing we can't copy if there is filtered content
824 825 return not self.filtered('visible').changelog.filteredrevs
825 826
826 827 def shared(self):
827 828 '''the type of shared repository (None if not shared)'''
828 829 if self.sharedpath != self.path:
829 830 return 'store'
830 831 return None
831 832
832 833 def join(self, f, *insidef):
833 834 return self.vfs.join(os.path.join(f, *insidef))
834 835
835 836 def wjoin(self, f, *insidef):
836 837 return self.vfs.reljoin(self.root, f, *insidef)
837 838
838 839 def file(self, f):
839 840 if f[0] == '/':
840 841 f = f[1:]
841 842 return filelog.filelog(self.svfs, f)
842 843
843 844 def changectx(self, changeid):
844 845 return self[changeid]
845 846
846 847 def parents(self, changeid=None):
847 848 '''get list of changectxs for parents of changeid'''
848 849 return self[changeid].parents()
849 850
850 851 def setparents(self, p1, p2=nullid):
851 852 self.dirstate.beginparentchange()
852 853 copies = self.dirstate.setparents(p1, p2)
853 854 pctx = self[p1]
854 855 if copies:
855 856 # Adjust copy records, the dirstate cannot do it, it
856 857 # requires access to parents manifests. Preserve them
857 858 # only for entries added to first parent.
858 859 for f in copies:
859 860 if f not in pctx and copies[f] in pctx:
860 861 self.dirstate.copy(copies[f], f)
861 862 if p2 == nullid:
862 863 for f, s in sorted(self.dirstate.copies().items()):
863 864 if f not in pctx and s not in pctx:
864 865 self.dirstate.copy(None, f)
865 866 self.dirstate.endparentchange()
866 867
867 868 def filectx(self, path, changeid=None, fileid=None):
868 869 """changeid can be a changeset revision, node, or tag.
869 870 fileid can be a file revision or node."""
870 871 return context.filectx(self, path, changeid, fileid)
871 872
872 873 def getcwd(self):
873 874 return self.dirstate.getcwd()
874 875
875 876 def pathto(self, f, cwd=None):
876 877 return self.dirstate.pathto(f, cwd)
877 878
878 879 def wfile(self, f, mode='r'):
879 880 return self.wvfs(f, mode)
880 881
881 882 def _link(self, f):
882 883 return self.wvfs.islink(f)
883 884
884 885 def _loadfilter(self, filter):
885 886 if filter not in self.filterpats:
886 887 l = []
887 888 for pat, cmd in self.ui.configitems(filter):
888 889 if cmd == '!':
889 890 continue
890 891 mf = matchmod.match(self.root, '', [pat])
891 892 fn = None
892 893 params = cmd
893 894 for name, filterfn in self._datafilters.iteritems():
894 895 if cmd.startswith(name):
895 896 fn = filterfn
896 897 params = cmd[len(name):].lstrip()
897 898 break
898 899 if not fn:
899 900 fn = lambda s, c, **kwargs: util.filter(s, c)
900 901 # Wrap old filters not supporting keyword arguments
901 902 if not inspect.getargspec(fn)[2]:
902 903 oldfn = fn
903 904 fn = lambda s, c, **kwargs: oldfn(s, c)
904 905 l.append((mf, fn, params))
905 906 self.filterpats[filter] = l
906 907 return self.filterpats[filter]
907 908
908 909 def _filter(self, filterpats, filename, data):
909 910 for mf, fn, cmd in filterpats:
910 911 if mf(filename):
911 912 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
912 913 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
913 914 break
914 915
915 916 return data
916 917
917 918 @unfilteredpropertycache
918 919 def _encodefilterpats(self):
919 920 return self._loadfilter('encode')
920 921
921 922 @unfilteredpropertycache
922 923 def _decodefilterpats(self):
923 924 return self._loadfilter('decode')
924 925
925 926 def adddatafilter(self, name, filter):
926 927 self._datafilters[name] = filter
927 928
928 929 def wread(self, filename):
929 930 if self._link(filename):
930 931 data = self.wvfs.readlink(filename)
931 932 else:
932 933 data = self.wvfs.read(filename)
933 934 return self._filter(self._encodefilterpats, filename, data)
934 935
935 936 def wwrite(self, filename, data, flags):
936 937 """write ``data`` into ``filename`` in the working directory
937 938
938 939 This returns length of written (maybe decoded) data.
939 940 """
940 941 data = self._filter(self._decodefilterpats, filename, data)
941 942 if 'l' in flags:
942 943 self.wvfs.symlink(data, filename)
943 944 else:
944 945 self.wvfs.write(filename, data)
945 946 if 'x' in flags:
946 947 self.wvfs.setflags(filename, False, True)
947 948 return len(data)
948 949
949 950 def wwritedata(self, filename, data):
950 951 return self._filter(self._decodefilterpats, filename, data)
951 952
952 953 def currenttransaction(self):
953 954 """return the current transaction or None if non exists"""
954 955 if self._transref:
955 956 tr = self._transref()
956 957 else:
957 958 tr = None
958 959
959 960 if tr and tr.running():
960 961 return tr
961 962 return None
962 963
963 964 def transaction(self, desc, report=None):
964 965 if (self.ui.configbool('devel', 'all-warnings')
965 966 or self.ui.configbool('devel', 'check-locks')):
966 967 l = self._lockref and self._lockref()
967 968 if l is None or not l.held:
968 969 self.ui.develwarn('transaction with no lock')
969 970 tr = self.currenttransaction()
970 971 if tr is not None:
971 972 return tr.nest()
972 973
973 974 # abort here if the journal already exists
974 975 if self.svfs.exists("journal"):
975 976 raise error.RepoError(
976 977 _("abandoned transaction found"),
977 978 hint=_("run 'hg recover' to clean up transaction"))
978 979
979 980 # make journal.dirstate contain in-memory changes at this point
980 981 self.dirstate.write()
981 982
982 983 idbase = "%.40f#%f" % (random.random(), time.time())
983 984 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
984 985 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
985 986
986 987 self._writejournal(desc)
987 988 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
988 989 if report:
989 990 rp = report
990 991 else:
991 992 rp = self.ui.warn
992 993 vfsmap = {'plain': self.vfs} # root of .hg/
993 994 # we must avoid cyclic reference between repo and transaction.
994 995 reporef = weakref.ref(self)
995 996 def validate(tr):
996 997 """will run pre-closing hooks"""
997 998 pending = lambda: tr.writepending() and self.root or ""
998 999 reporef().hook('pretxnclose', throw=True, pending=pending,
999 1000 txnname=desc, **tr.hookargs)
1000 1001
1001 1002 tr = transaction.transaction(rp, self.svfs, vfsmap,
1002 1003 "journal",
1003 1004 "undo",
1004 1005 aftertrans(renames),
1005 1006 self.store.createmode,
1006 1007 validator=validate)
1007 1008
1008 1009 tr.hookargs['txnid'] = txnid
1009 1010 # note: writing the fncache only during finalize mean that the file is
1010 1011 # outdated when running hooks. As fncache is used for streaming clone,
1011 1012 # this is not expected to break anything that happen during the hooks.
1012 1013 tr.addfinalize('flush-fncache', self.store.write)
1013 1014 def txnclosehook(tr2):
1014 1015 """To be run if transaction is successful, will schedule a hook run
1015 1016 """
1016 1017 def hook():
1017 1018 reporef().hook('txnclose', throw=False, txnname=desc,
1018 1019 **tr2.hookargs)
1019 1020 reporef()._afterlock(hook)
1020 1021 tr.addfinalize('txnclose-hook', txnclosehook)
1021 1022 def txnaborthook(tr2):
1022 1023 """To be run if transaction is aborted
1023 1024 """
1024 1025 reporef().hook('txnabort', throw=False, txnname=desc,
1025 1026 **tr2.hookargs)
1026 1027 tr.addabort('txnabort-hook', txnaborthook)
1027 1028 # avoid eager cache invalidation. in-memory data should be identical
1028 1029 # to stored data if transaction has no error.
1029 1030 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1030 1031 self._transref = weakref.ref(tr)
1031 1032 return tr
1032 1033
1033 1034 def _journalfiles(self):
1034 1035 return ((self.svfs, 'journal'),
1035 1036 (self.vfs, 'journal.dirstate'),
1036 1037 (self.vfs, 'journal.branch'),
1037 1038 (self.vfs, 'journal.desc'),
1038 1039 (self.vfs, 'journal.bookmarks'),
1039 1040 (self.svfs, 'journal.phaseroots'))
1040 1041
1041 1042 def undofiles(self):
1042 1043 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1043 1044
1044 1045 def _writejournal(self, desc):
1045 1046 self.vfs.write("journal.dirstate",
1046 1047 self.vfs.tryread("dirstate"))
1047 1048 self.vfs.write("journal.branch",
1048 1049 encoding.fromlocal(self.dirstate.branch()))
1049 1050 self.vfs.write("journal.desc",
1050 1051 "%d\n%s\n" % (len(self), desc))
1051 1052 self.vfs.write("journal.bookmarks",
1052 1053 self.vfs.tryread("bookmarks"))
1053 1054 self.svfs.write("journal.phaseroots",
1054 1055 self.svfs.tryread("phaseroots"))
1055 1056
1056 1057 def recover(self):
1057 1058 lock = self.lock()
1058 1059 try:
1059 1060 if self.svfs.exists("journal"):
1060 1061 self.ui.status(_("rolling back interrupted transaction\n"))
1061 1062 vfsmap = {'': self.svfs,
1062 1063 'plain': self.vfs,}
1063 1064 transaction.rollback(self.svfs, vfsmap, "journal",
1064 1065 self.ui.warn)
1065 1066 self.invalidate()
1066 1067 return True
1067 1068 else:
1068 1069 self.ui.warn(_("no interrupted transaction available\n"))
1069 1070 return False
1070 1071 finally:
1071 1072 lock.release()
1072 1073
1073 1074 def rollback(self, dryrun=False, force=False):
1074 1075 wlock = lock = None
1075 1076 try:
1076 1077 wlock = self.wlock()
1077 1078 lock = self.lock()
1078 1079 if self.svfs.exists("undo"):
1079 1080 return self._rollback(dryrun, force)
1080 1081 else:
1081 1082 self.ui.warn(_("no rollback information available\n"))
1082 1083 return 1
1083 1084 finally:
1084 1085 release(lock, wlock)
1085 1086
1086 1087 @unfilteredmethod # Until we get smarter cache management
1087 1088 def _rollback(self, dryrun, force):
1088 1089 ui = self.ui
1089 1090 try:
1090 1091 args = self.vfs.read('undo.desc').splitlines()
1091 1092 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1092 1093 if len(args) >= 3:
1093 1094 detail = args[2]
1094 1095 oldtip = oldlen - 1
1095 1096
1096 1097 if detail and ui.verbose:
1097 1098 msg = (_('repository tip rolled back to revision %s'
1098 1099 ' (undo %s: %s)\n')
1099 1100 % (oldtip, desc, detail))
1100 1101 else:
1101 1102 msg = (_('repository tip rolled back to revision %s'
1102 1103 ' (undo %s)\n')
1103 1104 % (oldtip, desc))
1104 1105 except IOError:
1105 1106 msg = _('rolling back unknown transaction\n')
1106 1107 desc = None
1107 1108
1108 1109 if not force and self['.'] != self['tip'] and desc == 'commit':
1109 1110 raise util.Abort(
1110 1111 _('rollback of last commit while not checked out '
1111 1112 'may lose data'), hint=_('use -f to force'))
1112 1113
1113 1114 ui.status(msg)
1114 1115 if dryrun:
1115 1116 return 0
1116 1117
1117 1118 parents = self.dirstate.parents()
1118 1119 self.destroying()
1119 1120 vfsmap = {'plain': self.vfs, '': self.svfs}
1120 1121 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1121 1122 if self.vfs.exists('undo.bookmarks'):
1122 1123 self.vfs.rename('undo.bookmarks', 'bookmarks')
1123 1124 if self.svfs.exists('undo.phaseroots'):
1124 1125 self.svfs.rename('undo.phaseroots', 'phaseroots')
1125 1126 self.invalidate()
1126 1127
1127 1128 parentgone = (parents[0] not in self.changelog.nodemap or
1128 1129 parents[1] not in self.changelog.nodemap)
1129 1130 if parentgone:
1130 1131 self.vfs.rename('undo.dirstate', 'dirstate')
1131 1132 try:
1132 1133 branch = self.vfs.read('undo.branch')
1133 1134 self.dirstate.setbranch(encoding.tolocal(branch))
1134 1135 except IOError:
1135 1136 ui.warn(_('named branch could not be reset: '
1136 1137 'current branch is still \'%s\'\n')
1137 1138 % self.dirstate.branch())
1138 1139
1139 1140 self.dirstate.invalidate()
1140 1141 parents = tuple([p.rev() for p in self.parents()])
1141 1142 if len(parents) > 1:
1142 1143 ui.status(_('working directory now based on '
1143 1144 'revisions %d and %d\n') % parents)
1144 1145 else:
1145 1146 ui.status(_('working directory now based on '
1146 1147 'revision %d\n') % parents)
1147 1148 ms = mergemod.mergestate(self)
1148 1149 ms.reset(self['.'].node())
1149 1150
1150 1151 # TODO: if we know which new heads may result from this rollback, pass
1151 1152 # them to destroy(), which will prevent the branchhead cache from being
1152 1153 # invalidated.
1153 1154 self.destroyed()
1154 1155 return 0
1155 1156
1156 1157 def invalidatecaches(self):
1157 1158
1158 1159 if '_tagscache' in vars(self):
1159 1160 # can't use delattr on proxy
1160 1161 del self.__dict__['_tagscache']
1161 1162
1162 1163 self.unfiltered()._branchcaches.clear()
1163 1164 self.invalidatevolatilesets()
1164 1165
1165 1166 def invalidatevolatilesets(self):
1166 1167 self.filteredrevcache.clear()
1167 1168 obsolete.clearobscaches(self)
1168 1169
1169 1170 def invalidatedirstate(self):
1170 1171 '''Invalidates the dirstate, causing the next call to dirstate
1171 1172 to check if it was modified since the last time it was read,
1172 1173 rereading it if it has.
1173 1174
1174 1175 This is different to dirstate.invalidate() that it doesn't always
1175 1176 rereads the dirstate. Use dirstate.invalidate() if you want to
1176 1177 explicitly read the dirstate again (i.e. restoring it to a previous
1177 1178 known good state).'''
1178 1179 if hasunfilteredcache(self, 'dirstate'):
1179 1180 for k in self.dirstate._filecache:
1180 1181 try:
1181 1182 delattr(self.dirstate, k)
1182 1183 except AttributeError:
1183 1184 pass
1184 1185 delattr(self.unfiltered(), 'dirstate')
1185 1186
1186 1187 def invalidate(self):
1187 1188 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1188 1189 for k in self._filecache:
1189 1190 # dirstate is invalidated separately in invalidatedirstate()
1190 1191 if k == 'dirstate':
1191 1192 continue
1192 1193
1193 1194 try:
1194 1195 delattr(unfiltered, k)
1195 1196 except AttributeError:
1196 1197 pass
1197 1198 self.invalidatecaches()
1198 1199 self.store.invalidatecaches()
1199 1200
1200 1201 def invalidateall(self):
1201 1202 '''Fully invalidates both store and non-store parts, causing the
1202 1203 subsequent operation to reread any outside changes.'''
1203 1204 # extension should hook this to invalidate its caches
1204 1205 self.invalidate()
1205 1206 self.invalidatedirstate()
1206 1207
1207 1208 def _refreshfilecachestats(self, tr):
1208 1209 """Reload stats of cached files so that they are flagged as valid"""
1209 1210 for k, ce in self._filecache.items():
1210 1211 if k == 'dirstate' or k not in self.__dict__:
1211 1212 continue
1212 1213 ce.refresh()
1213 1214
1214 1215 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1215 1216 parentenvvar=None):
1216 1217 parentlock = None
1217 1218 if parentenvvar is not None:
1218 1219 parentlock = os.environ.get(parentenvvar)
1219 1220 try:
1220 1221 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1221 1222 acquirefn=acquirefn, desc=desc,
1222 1223 parentlock=parentlock)
1223 1224 except error.LockHeld as inst:
1224 1225 if not wait:
1225 1226 raise
1226 1227 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1227 1228 (desc, inst.locker))
1228 1229 # default to 600 seconds timeout
1229 1230 l = lockmod.lock(vfs, lockname,
1230 1231 int(self.ui.config("ui", "timeout", "600")),
1231 1232 releasefn=releasefn, acquirefn=acquirefn,
1232 1233 desc=desc)
1233 1234 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1234 1235 return l
1235 1236
1236 1237 def _afterlock(self, callback):
1237 1238 """add a callback to be run when the repository is fully unlocked
1238 1239
1239 1240 The callback will be executed when the outermost lock is released
1240 1241 (with wlock being higher level than 'lock')."""
1241 1242 for ref in (self._wlockref, self._lockref):
1242 1243 l = ref and ref()
1243 1244 if l and l.held:
1244 1245 l.postrelease.append(callback)
1245 1246 break
1246 1247 else: # no lock have been found.
1247 1248 callback()
1248 1249
1249 1250 def lock(self, wait=True):
1250 1251 '''Lock the repository store (.hg/store) and return a weak reference
1251 1252 to the lock. Use this before modifying the store (e.g. committing or
1252 1253 stripping). If you are opening a transaction, get a lock as well.)
1253 1254
1254 1255 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1255 1256 'wlock' first to avoid a dead-lock hazard.'''
1256 1257 l = self._lockref and self._lockref()
1257 1258 if l is not None and l.held:
1258 1259 l.lock()
1259 1260 return l
1260 1261
1261 1262 l = self._lock(self.svfs, "lock", wait, None,
1262 1263 self.invalidate, _('repository %s') % self.origroot)
1263 1264 self._lockref = weakref.ref(l)
1264 1265 return l
1265 1266
1266 1267 def wlock(self, wait=True):
1267 1268 '''Lock the non-store parts of the repository (everything under
1268 1269 .hg except .hg/store) and return a weak reference to the lock.
1269 1270
1270 1271 Use this before modifying files in .hg.
1271 1272
1272 1273 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1273 1274 'wlock' first to avoid a dead-lock hazard.'''
1274 1275 l = self._wlockref and self._wlockref()
1275 1276 if l is not None and l.held:
1276 1277 l.lock()
1277 1278 return l
1278 1279
1279 1280 # We do not need to check for non-waiting lock aquisition. Such
1280 1281 # acquisition would not cause dead-lock as they would just fail.
1281 1282 if wait and (self.ui.configbool('devel', 'all-warnings')
1282 1283 or self.ui.configbool('devel', 'check-locks')):
1283 1284 l = self._lockref and self._lockref()
1284 1285 if l is not None and l.held:
1285 1286 self.ui.develwarn('"wlock" acquired after "lock"')
1286 1287
1287 1288 def unlock():
1288 1289 if self.dirstate.pendingparentchange():
1289 1290 self.dirstate.invalidate()
1290 1291 else:
1291 1292 self.dirstate.write()
1292 1293
1293 1294 self._filecache['dirstate'].refresh()
1294 1295
1295 1296 l = self._lock(self.vfs, "wlock", wait, unlock,
1296 1297 self.invalidatedirstate, _('working directory of %s') %
1297 1298 self.origroot)
1298 1299 self._wlockref = weakref.ref(l)
1299 1300 return l
1300 1301
1301 1302 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1302 1303 """
1303 1304 commit an individual file as part of a larger transaction
1304 1305 """
1305 1306
1306 1307 fname = fctx.path()
1307 1308 fparent1 = manifest1.get(fname, nullid)
1308 1309 fparent2 = manifest2.get(fname, nullid)
1309 1310 if isinstance(fctx, context.filectx):
1310 1311 node = fctx.filenode()
1311 1312 if node in [fparent1, fparent2]:
1312 1313 self.ui.debug('reusing %s filelog entry\n' % fname)
1313 1314 return node
1314 1315
1315 1316 flog = self.file(fname)
1316 1317 meta = {}
1317 1318 copy = fctx.renamed()
1318 1319 if copy and copy[0] != fname:
1319 1320 # Mark the new revision of this file as a copy of another
1320 1321 # file. This copy data will effectively act as a parent
1321 1322 # of this new revision. If this is a merge, the first
1322 1323 # parent will be the nullid (meaning "look up the copy data")
1323 1324 # and the second one will be the other parent. For example:
1324 1325 #
1325 1326 # 0 --- 1 --- 3 rev1 changes file foo
1326 1327 # \ / rev2 renames foo to bar and changes it
1327 1328 # \- 2 -/ rev3 should have bar with all changes and
1328 1329 # should record that bar descends from
1329 1330 # bar in rev2 and foo in rev1
1330 1331 #
1331 1332 # this allows this merge to succeed:
1332 1333 #
1333 1334 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1334 1335 # \ / merging rev3 and rev4 should use bar@rev2
1335 1336 # \- 2 --- 4 as the merge base
1336 1337 #
1337 1338
1338 1339 cfname = copy[0]
1339 1340 crev = manifest1.get(cfname)
1340 1341 newfparent = fparent2
1341 1342
1342 1343 if manifest2: # branch merge
1343 1344 if fparent2 == nullid or crev is None: # copied on remote side
1344 1345 if cfname in manifest2:
1345 1346 crev = manifest2[cfname]
1346 1347 newfparent = fparent1
1347 1348
1348 1349 # Here, we used to search backwards through history to try to find
1349 1350 # where the file copy came from if the source of a copy was not in
1350 1351 # the parent directory. However, this doesn't actually make sense to
1351 1352 # do (what does a copy from something not in your working copy even
1352 1353 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1353 1354 # the user that copy information was dropped, so if they didn't
1354 1355 # expect this outcome it can be fixed, but this is the correct
1355 1356 # behavior in this circumstance.
1356 1357
1357 1358 if crev:
1358 1359 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1359 1360 meta["copy"] = cfname
1360 1361 meta["copyrev"] = hex(crev)
1361 1362 fparent1, fparent2 = nullid, newfparent
1362 1363 else:
1363 1364 self.ui.warn(_("warning: can't find ancestor for '%s' "
1364 1365 "copied from '%s'!\n") % (fname, cfname))
1365 1366
1366 1367 elif fparent1 == nullid:
1367 1368 fparent1, fparent2 = fparent2, nullid
1368 1369 elif fparent2 != nullid:
1369 1370 # is one parent an ancestor of the other?
1370 1371 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1371 1372 if fparent1 in fparentancestors:
1372 1373 fparent1, fparent2 = fparent2, nullid
1373 1374 elif fparent2 in fparentancestors:
1374 1375 fparent2 = nullid
1375 1376
1376 1377 # is the file changed?
1377 1378 text = fctx.data()
1378 1379 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1379 1380 changelist.append(fname)
1380 1381 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1381 1382 # are just the flags changed during merge?
1382 1383 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1383 1384 changelist.append(fname)
1384 1385
1385 1386 return fparent1
1386 1387
1387 1388 @unfilteredmethod
1388 1389 def commit(self, text="", user=None, date=None, match=None, force=False,
1389 1390 editor=False, extra=None):
1390 1391 """Add a new revision to current repository.
1391 1392
1392 1393 Revision information is gathered from the working directory,
1393 1394 match can be used to filter the committed files. If editor is
1394 1395 supplied, it is called to get a commit message.
1395 1396 """
1396 1397 if extra is None:
1397 1398 extra = {}
1398 1399
1399 1400 def fail(f, msg):
1400 1401 raise util.Abort('%s: %s' % (f, msg))
1401 1402
1402 1403 if not match:
1403 1404 match = matchmod.always(self.root, '')
1404 1405
1405 1406 if not force:
1406 1407 vdirs = []
1407 1408 match.explicitdir = vdirs.append
1408 1409 match.bad = fail
1409 1410
1410 1411 wlock = self.wlock()
1411 1412 try:
1412 1413 wctx = self[None]
1413 1414 merge = len(wctx.parents()) > 1
1414 1415
1415 1416 if not force and merge and match.ispartial():
1416 1417 raise util.Abort(_('cannot partially commit a merge '
1417 1418 '(do not specify files or patterns)'))
1418 1419
1419 1420 status = self.status(match=match, clean=force)
1420 1421 if force:
1421 1422 status.modified.extend(status.clean) # mq may commit clean files
1422 1423
1423 1424 # check subrepos
1424 1425 subs = []
1425 1426 commitsubs = set()
1426 1427 newstate = wctx.substate.copy()
1427 1428 # only manage subrepos and .hgsubstate if .hgsub is present
1428 1429 if '.hgsub' in wctx:
1429 1430 # we'll decide whether to track this ourselves, thanks
1430 1431 for c in status.modified, status.added, status.removed:
1431 1432 if '.hgsubstate' in c:
1432 1433 c.remove('.hgsubstate')
1433 1434
1434 1435 # compare current state to last committed state
1435 1436 # build new substate based on last committed state
1436 1437 oldstate = wctx.p1().substate
1437 1438 for s in sorted(newstate.keys()):
1438 1439 if not match(s):
1439 1440 # ignore working copy, use old state if present
1440 1441 if s in oldstate:
1441 1442 newstate[s] = oldstate[s]
1442 1443 continue
1443 1444 if not force:
1444 1445 raise util.Abort(
1445 1446 _("commit with new subrepo %s excluded") % s)
1446 1447 dirtyreason = wctx.sub(s).dirtyreason(True)
1447 1448 if dirtyreason:
1448 1449 if not self.ui.configbool('ui', 'commitsubrepos'):
1449 1450 raise util.Abort(dirtyreason,
1450 1451 hint=_("use --subrepos for recursive commit"))
1451 1452 subs.append(s)
1452 1453 commitsubs.add(s)
1453 1454 else:
1454 1455 bs = wctx.sub(s).basestate()
1455 1456 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1456 1457 if oldstate.get(s, (None, None, None))[1] != bs:
1457 1458 subs.append(s)
1458 1459
1459 1460 # check for removed subrepos
1460 1461 for p in wctx.parents():
1461 1462 r = [s for s in p.substate if s not in newstate]
1462 1463 subs += [s for s in r if match(s)]
1463 1464 if subs:
1464 1465 if (not match('.hgsub') and
1465 1466 '.hgsub' in (wctx.modified() + wctx.added())):
1466 1467 raise util.Abort(
1467 1468 _("can't commit subrepos without .hgsub"))
1468 1469 status.modified.insert(0, '.hgsubstate')
1469 1470
1470 1471 elif '.hgsub' in status.removed:
1471 1472 # clean up .hgsubstate when .hgsub is removed
1472 1473 if ('.hgsubstate' in wctx and
1473 1474 '.hgsubstate' not in (status.modified + status.added +
1474 1475 status.removed)):
1475 1476 status.removed.insert(0, '.hgsubstate')
1476 1477
1477 1478 # make sure all explicit patterns are matched
1478 1479 if not force and (match.isexact() or match.prefix()):
1479 1480 matched = set(status.modified + status.added + status.removed)
1480 1481
1481 1482 for f in match.files():
1482 1483 f = self.dirstate.normalize(f)
1483 1484 if f == '.' or f in matched or f in wctx.substate:
1484 1485 continue
1485 1486 if f in status.deleted:
1486 1487 fail(f, _('file not found!'))
1487 1488 if f in vdirs: # visited directory
1488 1489 d = f + '/'
1489 1490 for mf in matched:
1490 1491 if mf.startswith(d):
1491 1492 break
1492 1493 else:
1493 1494 fail(f, _("no match under directory!"))
1494 1495 elif f not in self.dirstate:
1495 1496 fail(f, _("file not tracked!"))
1496 1497
1497 1498 cctx = context.workingcommitctx(self, status,
1498 1499 text, user, date, extra)
1499 1500
1500 1501 # internal config: ui.allowemptycommit
1501 1502 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1502 1503 or extra.get('close') or merge or cctx.files()
1503 1504 or self.ui.configbool('ui', 'allowemptycommit'))
1504 1505 if not allowemptycommit:
1505 1506 return None
1506 1507
1507 1508 if merge and cctx.deleted():
1508 1509 raise util.Abort(_("cannot commit merge with missing files"))
1509 1510
1510 1511 ms = mergemod.mergestate(self)
1511 1512 for f in status.modified:
1512 1513 if f in ms and ms[f] == 'u':
1513 1514 raise util.Abort(_('unresolved merge conflicts '
1514 1515 '(see "hg help resolve")'))
1515 1516
1516 1517 if editor:
1517 1518 cctx._text = editor(self, cctx, subs)
1518 1519 edited = (text != cctx._text)
1519 1520
1520 1521 # Save commit message in case this transaction gets rolled back
1521 1522 # (e.g. by a pretxncommit hook). Leave the content alone on
1522 1523 # the assumption that the user will use the same editor again.
1523 1524 msgfn = self.savecommitmessage(cctx._text)
1524 1525
1525 1526 # commit subs and write new state
1526 1527 if subs:
1527 1528 for s in sorted(commitsubs):
1528 1529 sub = wctx.sub(s)
1529 1530 self.ui.status(_('committing subrepository %s\n') %
1530 1531 subrepo.subrelpath(sub))
1531 1532 sr = sub.commit(cctx._text, user, date)
1532 1533 newstate[s] = (newstate[s][0], sr)
1533 1534 subrepo.writestate(self, newstate)
1534 1535
1535 1536 p1, p2 = self.dirstate.parents()
1536 1537 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1537 1538 try:
1538 1539 self.hook("precommit", throw=True, parent1=hookp1,
1539 1540 parent2=hookp2)
1540 1541 ret = self.commitctx(cctx, True)
1541 1542 except: # re-raises
1542 1543 if edited:
1543 1544 self.ui.write(
1544 1545 _('note: commit message saved in %s\n') % msgfn)
1545 1546 raise
1546 1547
1547 1548 # update bookmarks, dirstate and mergestate
1548 1549 bookmarks.update(self, [p1, p2], ret)
1549 1550 cctx.markcommitted(ret)
1550 1551 ms.reset()
1551 1552 finally:
1552 1553 wlock.release()
1553 1554
1554 1555 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1555 1556 # hack for command that use a temporary commit (eg: histedit)
1556 1557 # temporary commit got stripped before hook release
1557 1558 if self.changelog.hasnode(ret):
1558 1559 self.hook("commit", node=node, parent1=parent1,
1559 1560 parent2=parent2)
1560 1561 self._afterlock(commithook)
1561 1562 return ret
1562 1563
1563 1564 @unfilteredmethod
1564 1565 def commitctx(self, ctx, error=False):
1565 1566 """Add a new revision to current repository.
1566 1567 Revision information is passed via the context argument.
1567 1568 """
1568 1569
1569 1570 tr = None
1570 1571 p1, p2 = ctx.p1(), ctx.p2()
1571 1572 user = ctx.user()
1572 1573
1573 1574 lock = self.lock()
1574 1575 try:
1575 1576 tr = self.transaction("commit")
1576 1577 trp = weakref.proxy(tr)
1577 1578
1578 1579 if ctx.files():
1579 1580 m1 = p1.manifest()
1580 1581 m2 = p2.manifest()
1581 1582 m = m1.copy()
1582 1583
1583 1584 # check in files
1584 1585 added = []
1585 1586 changed = []
1586 1587 removed = list(ctx.removed())
1587 1588 linkrev = len(self)
1588 1589 self.ui.note(_("committing files:\n"))
1589 1590 for f in sorted(ctx.modified() + ctx.added()):
1590 1591 self.ui.note(f + "\n")
1591 1592 try:
1592 1593 fctx = ctx[f]
1593 1594 if fctx is None:
1594 1595 removed.append(f)
1595 1596 else:
1596 1597 added.append(f)
1597 1598 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1598 1599 trp, changed)
1599 1600 m.setflag(f, fctx.flags())
1600 1601 except OSError as inst:
1601 1602 self.ui.warn(_("trouble committing %s!\n") % f)
1602 1603 raise
1603 1604 except IOError as inst:
1604 1605 errcode = getattr(inst, 'errno', errno.ENOENT)
1605 1606 if error or errcode and errcode != errno.ENOENT:
1606 1607 self.ui.warn(_("trouble committing %s!\n") % f)
1607 1608 raise
1608 1609
1609 1610 # update manifest
1610 1611 self.ui.note(_("committing manifest\n"))
1611 1612 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1612 1613 drop = [f for f in removed if f in m]
1613 1614 for f in drop:
1614 1615 del m[f]
1615 1616 mn = self.manifest.add(m, trp, linkrev,
1616 1617 p1.manifestnode(), p2.manifestnode(),
1617 1618 added, drop)
1618 1619 files = changed + removed
1619 1620 else:
1620 1621 mn = p1.manifestnode()
1621 1622 files = []
1622 1623
1623 1624 # update changelog
1624 1625 self.ui.note(_("committing changelog\n"))
1625 1626 self.changelog.delayupdate(tr)
1626 1627 n = self.changelog.add(mn, files, ctx.description(),
1627 1628 trp, p1.node(), p2.node(),
1628 1629 user, ctx.date(), ctx.extra().copy())
1629 1630 p = lambda: tr.writepending() and self.root or ""
1630 1631 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1631 1632 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1632 1633 parent2=xp2, pending=p)
1633 1634 # set the new commit is proper phase
1634 1635 targetphase = subrepo.newcommitphase(self.ui, ctx)
1635 1636 if targetphase:
1636 1637 # retract boundary do not alter parent changeset.
1637 1638 # if a parent have higher the resulting phase will
1638 1639 # be compliant anyway
1639 1640 #
1640 1641 # if minimal phase was 0 we don't need to retract anything
1641 1642 phases.retractboundary(self, tr, targetphase, [n])
1642 1643 tr.close()
1643 1644 branchmap.updatecache(self.filtered('served'))
1644 1645 return n
1645 1646 finally:
1646 1647 if tr:
1647 1648 tr.release()
1648 1649 lock.release()
1649 1650
1650 1651 @unfilteredmethod
1651 1652 def destroying(self):
1652 1653 '''Inform the repository that nodes are about to be destroyed.
1653 1654 Intended for use by strip and rollback, so there's a common
1654 1655 place for anything that has to be done before destroying history.
1655 1656
1656 1657 This is mostly useful for saving state that is in memory and waiting
1657 1658 to be flushed when the current lock is released. Because a call to
1658 1659 destroyed is imminent, the repo will be invalidated causing those
1659 1660 changes to stay in memory (waiting for the next unlock), or vanish
1660 1661 completely.
1661 1662 '''
1662 1663 # When using the same lock to commit and strip, the phasecache is left
1663 1664 # dirty after committing. Then when we strip, the repo is invalidated,
1664 1665 # causing those changes to disappear.
1665 1666 if '_phasecache' in vars(self):
1666 1667 self._phasecache.write()
1667 1668
1668 1669 @unfilteredmethod
1669 1670 def destroyed(self):
1670 1671 '''Inform the repository that nodes have been destroyed.
1671 1672 Intended for use by strip and rollback, so there's a common
1672 1673 place for anything that has to be done after destroying history.
1673 1674 '''
1674 1675 # When one tries to:
1675 1676 # 1) destroy nodes thus calling this method (e.g. strip)
1676 1677 # 2) use phasecache somewhere (e.g. commit)
1677 1678 #
1678 1679 # then 2) will fail because the phasecache contains nodes that were
1679 1680 # removed. We can either remove phasecache from the filecache,
1680 1681 # causing it to reload next time it is accessed, or simply filter
1681 1682 # the removed nodes now and write the updated cache.
1682 1683 self._phasecache.filterunknown(self)
1683 1684 self._phasecache.write()
1684 1685
1685 1686 # update the 'served' branch cache to help read only server process
1686 1687 # Thanks to branchcache collaboration this is done from the nearest
1687 1688 # filtered subset and it is expected to be fast.
1688 1689 branchmap.updatecache(self.filtered('served'))
1689 1690
1690 1691 # Ensure the persistent tag cache is updated. Doing it now
1691 1692 # means that the tag cache only has to worry about destroyed
1692 1693 # heads immediately after a strip/rollback. That in turn
1693 1694 # guarantees that "cachetip == currenttip" (comparing both rev
1694 1695 # and node) always means no nodes have been added or destroyed.
1695 1696
1696 1697 # XXX this is suboptimal when qrefresh'ing: we strip the current
1697 1698 # head, refresh the tag cache, then immediately add a new head.
1698 1699 # But I think doing it this way is necessary for the "instant
1699 1700 # tag cache retrieval" case to work.
1700 1701 self.invalidate()
1701 1702
1702 1703 def walk(self, match, node=None):
1703 1704 '''
1704 1705 walk recursively through the directory tree or a given
1705 1706 changeset, finding all files matched by the match
1706 1707 function
1707 1708 '''
1708 1709 return self[node].walk(match)
1709 1710
1710 1711 def status(self, node1='.', node2=None, match=None,
1711 1712 ignored=False, clean=False, unknown=False,
1712 1713 listsubrepos=False):
1713 1714 '''a convenience method that calls node1.status(node2)'''
1714 1715 return self[node1].status(node2, match, ignored, clean, unknown,
1715 1716 listsubrepos)
1716 1717
1717 1718 def heads(self, start=None):
1718 1719 heads = self.changelog.heads(start)
1719 1720 # sort the output in rev descending order
1720 1721 return sorted(heads, key=self.changelog.rev, reverse=True)
1721 1722
1722 1723 def branchheads(self, branch=None, start=None, closed=False):
1723 1724 '''return a (possibly filtered) list of heads for the given branch
1724 1725
1725 1726 Heads are returned in topological order, from newest to oldest.
1726 1727 If branch is None, use the dirstate branch.
1727 1728 If start is not None, return only heads reachable from start.
1728 1729 If closed is True, return heads that are marked as closed as well.
1729 1730 '''
1730 1731 if branch is None:
1731 1732 branch = self[None].branch()
1732 1733 branches = self.branchmap()
1733 1734 if branch not in branches:
1734 1735 return []
1735 1736 # the cache returns heads ordered lowest to highest
1736 1737 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1737 1738 if start is not None:
1738 1739 # filter out the heads that cannot be reached from startrev
1739 1740 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1740 1741 bheads = [h for h in bheads if h in fbheads]
1741 1742 return bheads
1742 1743
1743 1744 def branches(self, nodes):
1744 1745 if not nodes:
1745 1746 nodes = [self.changelog.tip()]
1746 1747 b = []
1747 1748 for n in nodes:
1748 1749 t = n
1749 1750 while True:
1750 1751 p = self.changelog.parents(n)
1751 1752 if p[1] != nullid or p[0] == nullid:
1752 1753 b.append((t, n, p[0], p[1]))
1753 1754 break
1754 1755 n = p[0]
1755 1756 return b
1756 1757
1757 1758 def between(self, pairs):
1758 1759 r = []
1759 1760
1760 1761 for top, bottom in pairs:
1761 1762 n, l, i = top, [], 0
1762 1763 f = 1
1763 1764
1764 1765 while n != bottom and n != nullid:
1765 1766 p = self.changelog.parents(n)[0]
1766 1767 if i == f:
1767 1768 l.append(n)
1768 1769 f = f * 2
1769 1770 n = p
1770 1771 i += 1
1771 1772
1772 1773 r.append(l)
1773 1774
1774 1775 return r
1775 1776
1776 1777 def checkpush(self, pushop):
1777 1778 """Extensions can override this function if additional checks have
1778 1779 to be performed before pushing, or call it if they override push
1779 1780 command.
1780 1781 """
1781 1782 pass
1782 1783
1783 1784 @unfilteredpropertycache
1784 1785 def prepushoutgoinghooks(self):
1785 1786 """Return util.hooks consists of "(repo, remote, outgoing)"
1786 1787 functions, which are called before pushing changesets.
1787 1788 """
1788 1789 return util.hooks()
1789 1790
1790 1791 def stream_in(self, remote, remotereqs):
1791 1792 # Save remote branchmap. We will use it later
1792 1793 # to speed up branchcache creation
1793 1794 rbranchmap = None
1794 1795 if remote.capable("branchmap"):
1795 1796 rbranchmap = remote.branchmap()
1796 1797
1797 1798 fp = remote.stream_out()
1798 1799 l = fp.readline()
1799 1800 try:
1800 1801 resp = int(l)
1801 1802 except ValueError:
1802 1803 raise error.ResponseError(
1803 1804 _('unexpected response from remote server:'), l)
1804 1805 if resp == 1:
1805 1806 raise util.Abort(_('operation forbidden by server'))
1806 1807 elif resp == 2:
1807 1808 raise util.Abort(_('locking the remote repository failed'))
1808 1809 elif resp != 0:
1809 1810 raise util.Abort(_('the server sent an unknown error code'))
1810 1811
1811 self.applystreamclone(remotereqs, rbranchmap, fp)
1812 streamclone.applyremotedata(self, remotereqs, rbranchmap, fp)
1812 1813 return len(self.heads()) + 1
1813 1814
1814 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1815 """Apply stream clone data to this repository.
1816
1817 "remotereqs" is a set of requirements to handle the incoming data.
1818 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1819 can be None.
1820 "fp" is a file object containing the raw stream data, suitable for
1821 feeding into exchange.consumestreamclone.
1822 """
1823 lock = self.lock()
1824 try:
1825 exchange.consumestreamclone(self, fp)
1826
1827 # new requirements = old non-format requirements +
1828 # new format-related remote requirements
1829 # requirements from the streamed-in repository
1830 self.requirements = remotereqs | (
1831 self.requirements - self.supportedformats)
1832 self._applyopenerreqs()
1833 self._writerequirements()
1834
1835 if remotebranchmap:
1836 rbheads = []
1837 closed = []
1838 for bheads in remotebranchmap.itervalues():
1839 rbheads.extend(bheads)
1840 for h in bheads:
1841 r = self.changelog.rev(h)
1842 b, c = self.changelog.branchinfo(r)
1843 if c:
1844 closed.append(h)
1845
1846 if rbheads:
1847 rtiprev = max((int(self.changelog.rev(node))
1848 for node in rbheads))
1849 cache = branchmap.branchcache(remotebranchmap,
1850 self[rtiprev].node(),
1851 rtiprev,
1852 closednodes=closed)
1853 # Try to stick it as low as possible
1854 # filter above served are unlikely to be fetch from a clone
1855 for candidate in ('base', 'immutable', 'served'):
1856 rview = self.filtered(candidate)
1857 if cache.validfor(rview):
1858 self._branchcaches[candidate] = cache
1859 cache.write(rview)
1860 break
1861 self.invalidate()
1862 finally:
1863 lock.release()
1864
1865 1815 def clone(self, remote, heads=[], stream=None):
1866 1816 '''clone remote repository.
1867 1817
1868 1818 keyword arguments:
1869 1819 heads: list of revs to clone (forces use of pull)
1870 1820 stream: use streaming clone if possible'''
1871 1821
1872 1822 # now, all clients that can request uncompressed clones can
1873 1823 # read repo formats supported by all servers that can serve
1874 1824 # them.
1875 1825
1876 1826 # if revlog format changes, client will have to check version
1877 1827 # and format flags on "stream" capability, and use
1878 1828 # uncompressed only if compatible.
1879 1829
1880 1830 if stream is None:
1881 1831 # if the server explicitly prefers to stream (for fast LANs)
1882 1832 stream = remote.capable('stream-preferred')
1883 1833
1884 1834 if stream and not heads:
1885 1835 # 'stream' means remote revlog format is revlogv1 only
1886 1836 if remote.capable('stream'):
1887 1837 self.stream_in(remote, set(('revlogv1',)))
1888 1838 else:
1889 1839 # otherwise, 'streamreqs' contains the remote revlog format
1890 1840 streamreqs = remote.capable('streamreqs')
1891 1841 if streamreqs:
1892 1842 streamreqs = set(streamreqs.split(','))
1893 1843 # if we support it, stream in and adjust our requirements
1894 1844 if not streamreqs - self.supportedformats:
1895 1845 self.stream_in(remote, streamreqs)
1896 1846
1897 1847 # internal config: ui.quietbookmarkmove
1898 1848 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1899 1849 try:
1900 1850 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1901 1851 ret = exchange.pull(self, remote, heads).cgresult
1902 1852 finally:
1903 1853 self.ui.restoreconfig(quiet)
1904 1854 return ret
1905 1855
1906 1856 def pushkey(self, namespace, key, old, new):
1907 1857 try:
1908 1858 tr = self.currenttransaction()
1909 1859 hookargs = {}
1910 1860 if tr is not None:
1911 1861 hookargs.update(tr.hookargs)
1912 1862 pending = lambda: tr.writepending() and self.root or ""
1913 1863 hookargs['pending'] = pending
1914 1864 hookargs['namespace'] = namespace
1915 1865 hookargs['key'] = key
1916 1866 hookargs['old'] = old
1917 1867 hookargs['new'] = new
1918 1868 self.hook('prepushkey', throw=True, **hookargs)
1919 1869 except error.HookAbort as exc:
1920 1870 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1921 1871 if exc.hint:
1922 1872 self.ui.write_err(_("(%s)\n") % exc.hint)
1923 1873 return False
1924 1874 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1925 1875 ret = pushkey.push(self, namespace, key, old, new)
1926 1876 def runhook():
1927 1877 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1928 1878 ret=ret)
1929 1879 self._afterlock(runhook)
1930 1880 return ret
1931 1881
1932 1882 def listkeys(self, namespace):
1933 1883 self.hook('prelistkeys', throw=True, namespace=namespace)
1934 1884 self.ui.debug('listing keys for "%s"\n' % namespace)
1935 1885 values = pushkey.list(self, namespace)
1936 1886 self.hook('listkeys', namespace=namespace, values=values)
1937 1887 return values
1938 1888
1939 1889 def debugwireargs(self, one, two, three=None, four=None, five=None):
1940 1890 '''used to test argument passing over the wire'''
1941 1891 return "%s %s %s %s %s" % (one, two, three, four, five)
1942 1892
1943 1893 def savecommitmessage(self, text):
1944 1894 fp = self.vfs('last-message.txt', 'wb')
1945 1895 try:
1946 1896 fp.write(text)
1947 1897 finally:
1948 1898 fp.close()
1949 1899 return self.pathto(fp.name[len(self.root) + 1:])
1950 1900
1951 1901 # used to avoid circular references so destructors work
1952 1902 def aftertrans(files):
1953 1903 renamefiles = [tuple(t) for t in files]
1954 1904 def a():
1955 1905 for vfs, src, dest in renamefiles:
1956 1906 try:
1957 1907 vfs.rename(src, dest)
1958 1908 except OSError: # journal file does not yet exist
1959 1909 pass
1960 1910 return a
1961 1911
1962 1912 def undoname(fn):
1963 1913 base, name = os.path.split(fn)
1964 1914 assert name.startswith('journal')
1965 1915 return os.path.join(base, name.replace('journal', 'undo', 1))
1966 1916
1967 1917 def instance(ui, path, create):
1968 1918 return localrepository(ui, util.urllocalpath(path), create)
1969 1919
1970 1920 def islocal(path):
1971 1921 return True
General Comments 0
You need to be logged in to leave comments. Login now